query
stringlengths
33
521
document
stringlengths
8
49.6k
metadata
dict
negatives
sequencelengths
5
101
negative_scores
sequencelengths
5
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Compute the matrixvector product y = Cu where C is a circulant matrix All matrices are real
def circulant_multiplication(u, a): return real(ifft(fft(a)*fft(u)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def covar(fx,cx):\n \n fx = np.array(fx)\n cx = np.array(cx)\n \n shape_fx = fx.shape\n shape_cx = cx.shape\n \n \n if shape_fx[1] != shape_cx[0]:\n print('-----------------------------------------')\n print(\"Shapes of fx and cx cannot be multiplied:\")\n print(shape_fx,\"x\",shape_cx)\n print('-----------------------------------------')\n raise ValueError('Input matrices are not compliant')\n \n cy = np.dot(np.dot(fx,cx),fx.T)\n \n print(\"Size of Cy matrix: \",np.shape(cy))\n \n return cy", "def __matmul__(self, q: np.ndarray) -> np.ndarray:\n return self.product(q)", "def matrix_vector_prod(m,u):\n each_product = []\n for v in m:\n each_product.append(dot_prod(v, u))\n return each_product", "def __matmul__(self, csys):\n self._transform(csys)\n return self", "def factor_circulant_multiplication(u, x, k=1):\n n = len(u) \n D_k = (k**(1/n))**np.arange(0,n)\n Lambda = fft(D_k*x)\n return (1/D_k)*real(ifft(Lambda*fft(D_k*u))) # y", "def updateC(A, U, B):\n \n m_dim = A.shape[1] \n q_dim = B.shape[0]\n \n C_tensor = np.zeros((m_dim, m_dim, q_dim), dtype=np.complex)\n \n for k in range(q_dim):\n A_k = A[:, :, k]\n b_k = B[k]\n \n x_hat = U @ b_k\n y_hat = A_k.conj().T @ x_hat\n \n phase_y = np.exp(1j*np.angle(y_hat))\n #phase_y = np.sign(y_hat)\n C_k = np.diag(phase_y)\n C_tensor[:, :, k] = C_k\n \n \n return C_tensor", "def c_matrix(x1,x2,x3):\n\tC = np.array([\t[\t2*(x2-x1), \t\t(x2-x1), \t\t\t0\t\t\t], \\\n\t\t\t\t\t[\t(x2-x1), \t\t2*(x3-x1), \t\t(x3-x2)\t\t], \\\n\t\t\t\t\t[\t0,\t\t\t\t(x3-x2),\t\t2*(x3-x2)\t] \t], \\\n\t\t\t\t\tfloat)\n\treturn(C)", "def toeplitz_multiplication(u, c, r=None):\n n = len(u)\n if r is None:\n r = c\n u1 = zeros((2*n))\n u1[0:n] = u\n \n c = np.concatenate((c, [0], r[-1:0:-1])) \n \n y1 = circulant_multiplication(u1, c)\n \n return y1[0:n]", "def matmul(x, y):\n return np.matmul(x, y)", "def CoTang(M):\n x = [sy.Dummy() for _ in range(nargs(M))]\n fx = [sy.Dummy() for _ in range(len(x))]\n\n y = list(M(*x))\n J = Jac(M)(*x)\n J = sy.Matrix(J).reshape(len(y), len(x))\n\n fy = list(J.T.inv() @ sy.Matrix(fx))\n return sy.lambdify(\n x + fx,\n y + fy,\n 'sympy',\n )", "def _C(self):\n\n # Find the local x and y coordinates at each node\n xi = 0\n yi = 0\n xj = self.width()\n yj = 0\n xm = xj\n ym = self.height()\n xn = 0\n yn = ym\n\n # Calculate the [C] coefficient matrix\n C = array([[1, xi, yi, xi**2, xi*yi, yi**2, xi**3, xi**2*yi, xi*yi**2, yi**3, xi**3*yi, xi*yi**3],\n [0, 0, 1, 0, xi, 2*yi, 0, xi**2, 2*xi*yi, 3*yi**2, xi**3, 3*xi*yi**2],\n [0, -1, 0, -2*xi, -yi, 0, -3*xi**2, -2*xi*yi, -yi**2, 0, -3*xi**2*yi, -yi**3],\n \n [1, xj, yj, xj**2, xj*yj, yj**2, xj**3, xj**2*yj, xj*yj**2, yj**3, xj**3*yj, xj*yj**3],\n [0, 0, 1, 0, xj, 2*yj, 0, xj**2, 2*xj*yj, 3*yj**2, xj**3, 3*xj*yj**2],\n [0, -1, 0, -2*xj, -yj, 0, -3*xj**2, -2*xj*yj, -yj**2, 0, -3*xj**2*yj, -yj**3],\n\n [1, xm, ym, xm**2, xm*ym, ym**2, xm**3, xm**2*ym, xm*ym**2, ym**3, xm**3*ym, xm*ym**3],\n [0, 0, 1, 0, xm, 2*ym, 0, xm**2, 2*xm*ym, 3*ym**2, xm**3, 3*xm*ym**2],\n [0, -1, 0, -2*xm, -ym, 0, -3*xm**2, -2*xm*ym, -ym**2, 0, -3*xm**2*ym, -ym**3],\n\n [1, xn, yn, xn**2, xn*yn, yn**2, xn**3, xn**2*yn, xn*yn**2, yn**3, xn**3*yn, xn*yn**3],\n [0, 0, 1, 0, xn, 2*yn, 0, xn**2, 2*xn*yn, 3*yn**2, xn**3, 3*xn*yn**2],\n [0, -1, 0, -2*xn, -yn, 0, -3*xn**2, -2*xn*yn, -yn**2, 0, -3*xn**2*yn, -yn**3]])\n \n # Return the coefficient matrix\n return C", "def simple_doct_product(u, v):\n v = [i / (sum(v)) for i in v]\n\n return np.dot(u, v)", "def matvec(self, x):\n return self * x", "def p_ym_c(pm,px,py,pyx_c,pmx_c):\n pym_c = np.zeros((py.size,pm.size))\n for yi in range(py.size):\n for mi in range(pm.size):\n for xi in range(px.size):\n pym_c[yi,mi] += (1./pm[mi])*pyx_c[yi,xi]*pmx_c[mi,xi]*px[xi]\n return pym_c", "def cofactorMatrix(self):\n returnvalue = Matrix()\n for i in range(self._height):\n newRow = list()\n for j in range(self._width):\n newRow.append(self.cofactor(i, j))\n returnvalue.addRow(*newRow)\n return returnvalue", "def matmul(x, y):\n if len(list(y.size())) == 2:\n # if one of them is a vector (i.e. wanting to do MV mult)\n z = torch.zeros(2, x.size()[1], dtype=torch.double, device=x.device)\n z[0] = torch.mv(x[0], y[0]) - torch.mv(x[1], y[1])\n z[1] = torch.mv(x[0], y[1]) + torch.mv(x[1], y[0])\n\n if len(list(y.size())) == 3:\n z = torch.zeros(\n 2, x.size()[1], y.size()[2], dtype=torch.double, device=x.device\n )\n z[0] = torch.matmul(x[0], y[0]) - torch.matmul(x[1], y[1])\n z[1] = torch.matmul(x[0], y[1]) + torch.matmul(x[1], y[0])\n\n return z", "def cofactor_matrix(self):\n resp = []\n len_b = len(self.take_vec())\n for i in range(self.order):\n _matrix = aux.cofactor(self.take_matrix(),\n (i, self.order-1)\n )\n _resp = math.pow(-1, len_b-1)\n _resp = _resp * np.linalg.det(_matrix)\n _resp = _resp * math.pow(-1, i * (self.order-1))\n resp.append(int(round(_resp)))\n\n return resp", "def method3(self):\n cres=0.\n Ux_aloc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy_aloc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.ALDM[ix ,iy, : , : ]\n mat2=self.ALDM[(ix%self.kS.Nx)+1, iy, : , : ]\n mat3=self.ALDM[ix ,(iy%self.kS.Ny)+1, : , : ]\n \n Ux_aloc[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat2)[self.NL-1:,self.NL-1:])\n Uy_aloc[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat3)[self.NL-1:,self.NL-1:])\n\n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux_aloc[ix,iy]*Uy_aloc[ix+1,iy]/Ux_aloc[ix,iy+1]/Uy_aloc[ix,iy])\n cres+=ftemp/2./pi/1j\n \n return cres.real\n #End of method3", "def naive_matrix_vector_dot(x, y):\n assert len(x.shape) == 2\n assert len(y.shape) == 1\n assert x.shape[1] == y.shape[0]\n\n z = np.zeros(x.shape[0])\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n z[i] += x[i, j] * y[j]\n return z", "def _mul(*args):\n\treturn functools.reduce(numpy.dot, args)", "def m_c(self) -> np.ndarray:\n assert self._k is not None, \"camera must be calibrated\"\n return forge_projective_matrix(self._k)", "def _mps_CA(self, C, A):\n return np.tensordot(C, A, axes=(1, 0))", "def matmul():\n\n if RESULT_IN_NVRAM:\n matrix_c = ResultMatrixInDaos()\n else:\n matrix_c = ResultMatrixInMemory()\n\n # This could be trivially optimized by reordering indexes\n # and caching either a_block or b_block (assuming C in-memory).\n # *However* it would result in unfair comparisons with the \n # previous implementation used elsewhere.\n # Using the naive algorithm makes sense for a raw comparison.\n for i in range(MATRIXSIZE):\n for j in range(MATRIXSIZE):\n partial_result_block = np.zeros((BLOCKSIZE, BLOCKSIZE))\n\n for k in range(MATRIXSIZE):\n a_block = np.fromstring(\n DAOS_KV[\"A%02d%02d\" % (i, k)],\n dtype=NP_FROMSTRING_DTYPE\n ).reshape((BLOCKSIZE, BLOCKSIZE))\n\n b_block = np.fromstring(\n DAOS_KV[\"B%02d%02d\" % (k, j)],\n dtype=NP_FROMSTRING_DTYPE\n ).reshape((BLOCKSIZE, BLOCKSIZE))\n\n partial_result_block += a_block @ b_block\n \n matrix_c[i,j] = partial_result_block\n\n return matrix_c", "def _rmatvec(self, u: np.ndarray) -> np.ndarray:\n return convolve(self.x.conj()[::-1], u, mode='valid', method=self.method)", "def complex_multiplication(c1,c2,cr):\n cr[0] = c1[0]*c2[0] - c1[1]*c2[1]\n cr[1] = c1[0]*c2[1] + c1[1]*c2[0]\n return cr", "def get_C(n_c,CV_matrix):\n C = np.zeros((n_c, n_c), dtype=np.float32)\n for i in range(3):\n C += np.asfortranarray(CV_matrix[:, :, i]) @ np.asfortranarray(CV_matrix[:, :, np.mod(i + 2, 3)].T)\n C = (C != 0).astype(np.int32)\n return C", "def muscovite():\n\n rho = 2834.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 181.; C[0,1] = 48.8; C[0,2] = 25.6; C[0,3] = 0.; C[0,4] = -14.2; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 178.4; C[1,2] = 21.2; C[1,3] = 0.; C[1,4] = 1.1; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 58.6; C[2,3] = 0.; C[2,4] = 1.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 16.5; C[3,4] = 0.; C[3,5] = -5.2\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 19.5; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 72.\n\n return C, rho", "def rhs(t, conc, S_matrix, educt_matrix, kin_par):\n fluxes = calculate_fluxes(conc, S_matrix, educt_matrix, kin_par)\n return np.dot(S_matrix, fluxes)", "def cofiCostFunc(self,params, *args):\n\t\tY, R, num_users, num_products, num_features,l = args[0], args[1],args[2], args[3],args[4],args[5]\n\n\t\taux = params.reshape((num_products + num_users, num_features))\n\n\t\tX = aux[0:num_products , :]\n\n\t\tTheta = aux[num_products:, :] \n\n\t\ttest = np.dot(X,Theta.transpose())\n\t\ttest = test - Y\n\t\ttest = np.multiply(test , R)\n\t\ttest = np.power(test,2)\n\t\ttest = test.sum()\n\t\ttest = 0.5 * test\n\n\t\tJ = 0;\n\t\tregularization = (l * 0.5) * np.power(X,2).sum() + np.power(Theta,2).sum()\n\n\t\tJ = test# + regularization\n\n\t\treturn J", "def c(self, z, y, r, t):\n \n u = np.zeros( self.m ) \n \n return u", "def _set_u_matirx(self):\n c_matrix = self.get_c_matrix()\n u_matrix, d_matrix, _ = np.linalg.svd(c_matrix)\n self.u_matrix = np.matrix(u_matrix)", "def reduce_C(self, C_on_basis_vecs):\n self.C_reduced = np.mat(np.array(C_on_basis_vecs, ndmin=2))\n return self.C_reduced", "def reduce_C(self, C_on_basis_vecs):\n self.C_reduced = np.mat(np.array(C_on_basis_vecs, ndmin=2).T)\n return self.C_reduced", "def scalar_multiply(c, v):\n\treturn [c * v_i for v_i in v]", "def vectordot(a, b):\n return np.sum(a * b, 1)", "def _mps_AC(self, A, C):\n return np.tensordot(A, C, axes=(2, 0))", "def form_matrix_yt(w):\r\n M = np.zeros((len(w),len(w)))\r\n for i in range(len(w)):\r\n for j in range(len(w)):\r\n M[i,j] = YoungTableaux(w[i],w[j]).CMNR()\r\n return M", "def vect_contract(m, c, n):\n a = np.tensordot(m, c, (0, 0))\n mn = np.tensordot(a, n, (2, 0))\n return mn", "def naive_vector_dot(x, y):\n assert len(x.shape) == 1\n assert len(y.shape) == 1\n assert x.shape[0] == y.shape[0]\n\n z = 0\n for i in range(x.shape[0]):\n z += x[i] * y[i]\n return z", "def __mul__(self, othertr):\n res = self.dot(othertr)\n return res", "def _c_correlation(cls, X, y):\n su = np.zeros(X.shape[1])\n for i in np.arange(X.shape[1]):\n su[i] = cls._symmetrical_uncertainty(X[:, i], y)\n return su", "def complex_inverse(c1,cr):", "def scalar_multiply(c: float, v: Vector) -> Vector:\n return [c * v_i for v_i in v]", "def scalar_multiply(c: float, v: Vector) -> Vector:\n return [c * v_i for v_i in v]", "def _build_c_phi_matrices(self, t: tf.Tensor) -> tf.Tensor:\n c_phi_matrices = self.kernel.compute_c_phi(t, t)\\\n + tf.expand_dims(tf.eye(self.n_points_int, dtype=tf.float64), 0)\\\n * self.likelihood_variances\n return c_phi_matrices", "def dot_prod(u,v):\n each_product = []\n for i in range(len(u)):\n each_product.append(u[i] * v[i])\n return sum(each_product)", "def matrix_deflation(X_curr, Y_curr, X_orig, Y_orig, u, v):\n Xp = X_curr\n Yp = Y_curr\n #u = u / (np.sqrt(np.sum(u**2)+1e-7))\n #v = v / (np.sqrt(np.sum(v**2)+1e-7))\n\n qx = np.dot(Xp.T,np.dot(X_orig,u))\n qx = qx / (np.sqrt(np.sum(qx**2)+1e-7))\n #qx = qx.astype('float16')\n Xp = Xp - np.dot(Xp,qx).dot(qx.T)\n X = Xp.reshape(1,Xp.shape[0],Xp.shape[1])\n\n qy = np.dot(Yp.T,np.dot(Y_orig,v))\n qy = qy / (np.sqrt(np.sum(qy**2)+1e-7))\n #qy = qy.astype('float16')\n Yp = Yp - np.dot(Yp,qy).dot(qy.T)\n Y = Yp.reshape(1,Yp.shape[0],Yp.shape[1])\n\n return X, Y", "def scalar_multiply(c, v):\n return [c * v_i for v_i in v]", "def scalar_multiply(c, v):\n return [c * v_i for v_i in v]", "def mcc(self):\n tp = self.tp\n tn = self.tn\n fp = self.fp\n fn = self.fn\n return tp * tn / np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))", "def kronecker_prod(x, y):\n if len(list(x.size())) != 3 or len(list(y.size())) != 3:\n raise ValueError(\"An input is not of the right dimension.\")\n\n z = torch.zeros(\n 2,\n x.size()[1] * y.size()[1],\n x.size()[2] * y.size()[2],\n dtype=torch.double,\n device=x.device,\n )\n\n row_count = 0\n\n for i in range(x.size()[1]):\n for k in range(y.size()[1]):\n column_count = 0\n for j in range(x.size()[2]):\n for l in range(y.size()[2]):\n\n z[0][row_count][column_count] = (x[0][i][j] * y[0][k][l]) - (\n x[1][i][j] * y[1][k][l]\n )\n z[1][row_count][column_count] = (x[0][i][j] * y[1][k][l]) + (\n x[1][i][j] * y[0][k][l]\n )\n\n column_count += 1\n row_count += 1\n\n return z", "def Q2C(self, q):\n\n #q = q.squeeze();\n C = np.empty((3,3));\n\tC[0,0] = (q[0]**2.0) + (q[1]**2.0) - (q[2]**2.0) - (q[3]**2.0);\n\tC[0,1] = 2.0 * ((q[1]*q[2]) + (q[0]*q[3]));\n\tC[0,2] = 2.0 * ((q[1]*q[3]) - (q[0]*q[2]));\n\n\tC[1,0] = 2.0 * ((q[1]*q[2]) - (q[0]*q[3]));\n\tC[1,1] = (q[0]**2.0) - (q[1]**2.0) + (q[2]**2.0) - (q[3]**2.0);\n\tC[1,2] = 2.0 * ((q[2]*q[3]) + (q[0]*q[1]));\n\n\tC[2,0] = 2.0 * ((q[1]*q[3]) + (q[0]*q[2]));\n\tC[2,1] = 2.0 * ((q[2]*q[3]) - (q[0]*q[1]));\n\tC[2,2] = (q[0]**2.0) - (q[1]**2.0) - (q[2]**2.0) + (q[3]**2.0);\n\n return C", "def scalar_mult(v, u):\n return [v[i] * u[i] for i in range(len(v))]", "def f(self, x: np.array) -> np.array:\n return self.m * x + self.c", "def p_mx_c(pm,px,py,pyx_c,pym_c,beta):\n \n pmx_c = np.zeros((pm.size,px.size)) # P(M|X) matrix to be returned\n for mi in range(pm.size):\n for xi in range(px.size):\n pmx_c[mi,xi] = pm[mi] * np.exp(-beta * entropy(pyx_c[:,xi], pym_c[:,mi], base=2))\n z = pmx_c.sum(axis=0)\n pmx_c /= z #Normalize\n \n \t\n return pmx_c, z", "def ccc_v(y_true, y_pred):\n x = y_true[:, 0]\n y = y_pred[:, 0]\n mx = K.mean(x, axis=0)\n my = K.mean(y, axis=0)\n xm, ym = x - mx, y - my\n rho = K.sum(xm * ym) / (K.sqrt(K.sum(xm ** 2)) * K.sqrt(K.sum(ym ** 2)))\n x_s = K.std(x)\n y_s = K.std(y)\n ccc = 2 * rho * x_s * y_s / (x_s ** 2 + y_s ** 2 + (mx - my) ** 2)\n return ccc", "def factor_circulant_matrix(x, k):\n n=len(x)\n return circulant(x) * (tri(n,n, 0) + k*np.transpose(tri(n,n, -1)))", "def p_m(pmx_c,px):\n pm = np.zeros(pmx_c.shape[0])\n for mi in range(pm.size):\n for xi in range(px.size):\n pm[mi] += pmx_c[mi,xi]*px[xi]\n return pm", "def alternate_ls (u_num, Y, P, C, reg):\n\n\t# get # of items/users and # of latent factors\n\t[i_num, f_num] = Y.shape\n\n\t# output buffer\n\tX = np.zeros((u_num, f_num))\n\n\t# precalculate YtY to improve the performance\n\tYtY = Y.T * Y\n\n\t# iterate over each user/item\n\tfor u in range(u_num):\n\n\t\t# store the diagonal elements of the matrix Cu discussed in the paper in a vector\n\t\tCu = C[u,:]\n\n\t\t# store the coresponding row/column of the preference matrix\n\t\tPu = P[u,:]\n\n\t\t# compute Cu-I\n\t\tCu_I = Cu - 1\n\n\t\t# calculate Yt(Cu-I)Y\n\t\tYtCu_IY = np.zeros((f_num, f_num))\n\t\tCuIY = np.multiply(Y, Cu_I.T) # weight each row of Y with Cu-I\n\t\tfor row in range(f_num):\n\t\t\tfor col in range(f_num):\n\t\t\t\tYtCu_IY[row,col] = Y[:,row].T * CuIY[:,col]\n\t\t\n\t\t# left term : ((YtCuY + regI)^(-1)) = (YtY + Yt(Cu-I)Y + regI)^(-1)\n\t\tleft_inv = YtY + YtCu_IY + reg*np.eye(f_num)\n\t\tleft = np.linalg.inv(left_inv)\n\n\t\t# right term : YtCuPu\n\t\tright = Y.T * np.multiply(Cu.T, Pu.T)\n\n\t\t# compute the latent factor of the user/item\n\t\tx = left * right\n\t\t\n\t\t# store it in a matrix\n\t\tX[u,:] = x.T\n\n\t# return an MxF or NxF matrix\n\treturn np.matrix(X)", "def matrix_multiply(x, y):\r\n\r\n # handle the base case of receiving\r\n # two empty matrices\r\n if x == [] and y == []:\r\n return []\r\n\r\n # determine the number of rows and columns in the result matrix\r\n num_rows = len(x)\r\n num_cols = len(y[0])\r\n\r\n num_cross = len(x[0])\r\n\r\n # initialize the result matrix\r\n result_matrix = [[0] * num_cols for _ in xrange(num_rows)]\r\n\r\n # compute the values for each cell of the result\r\n # matrix\r\n for row_index in xrange(num_rows):\r\n for col_index in xrange(num_cols):\r\n\r\n # sum up the corresponding values from\r\n # x and y\r\n for multiplication_index in xrange(num_cross):\r\n\r\n x_value = x[row_index][multiplication_index]\r\n y_value = y[multiplication_index][col_index]\r\n\r\n result_matrix[row_index][col_index] += x_value * y_value\r\n\r\n return result_matrix", "def matrix_dot(*args):\r\n rval = args[0]\r\n for a in args[1:]:\r\n rval = theano.tensor.dot(rval, a)\r\n return rval", "def product_moment(*args, **kwargs):\n return ConfusionMatrix2.from_ccw(*args, **kwargs).matthews_corr()", "def _calc_C(self, lambdify=True):\n\n C = None\n C_func = None\n # check to see if we have our term saved in file\n C, C_func = self._load_from_file('C', lambdify)\n\n if C is None and C_func is None:\n # if no saved file was loaded, generate function\n print('Generating centrifugal and Coriolis compensation function')\n\n # first get the inertia matrix\n M = self._calc_M(lambdify=False)\n\n # C_{kj} = sum_i c_{ijk}(q) \\dot{q}_i\n # c_{ijk} = 1/2 * sum_i (\\frac{\\partial M_{kj}}{\\partial q_j} +\n # \\frac{\\partial M_{ki}}{\\partial q_j} - \\frac{\\partial M_{ij}}\n # {\\partial q_k})\n C = sp.zeros(self.N_JOINTS, self.N_JOINTS)\n for kk in range(self.N_JOINTS):\n for jj in range(self.N_JOINTS):\n for ii in range(self.N_JOINTS):\n dMkjdqi = M[kk, jj].diff(self.q[ii])\n dMkidqj = M[kk, ii].diff(self.q[jj])\n dMijdqk = M[ii, jj].diff(self.q[kk])\n C[kk, jj] += .5 * (dMkjdqi + dMkidqj - dMijdqk) * self.dq[ii]\n C[kk, jj] = C[kk, jj]\n C = sp.Matrix(C)\n\n # save to file\n abr_control.utils.os_utils.makedirs(\n '%s/C' % self.config_folder)\n cloudpickle.dump(C, open(\n '%s/C/C' % self.config_folder, 'wb'))\n\n if lambdify is False:\n # if should return expression not function\n return C\n\n if C_func is None:\n C_func = self._generate_and_save_function(\n filename='C', expression=C,\n parameters=self.q+self.dq)\n return C_func", "def __mul__(self, other):\n if isinstance(other, Vector):\n # Matrix vector product\n v = Vector(list())\n for n in range(len(other.vectors)):\n v += scale(other.vectors[n][n], self.vectors[n])\n return v\n elif isinstance(other, Matrix):\n # Matrix matrix product\n if self.n != other.m:\n raise ValueError(\"Wrong fucking sizes, nøøb\")\n\n selfVectors = self.vectors\n selfColVectors = self.transpose()\n otherVectors = other.vectors\n otherColVectors = other.transpose()\n vectors = list()\n for col in range(other.n):\n cordinator = []\n\n for row in range(self.m):\n coord = 0\n\n for k in range(other.m):\n coord += (\n selfVectors[row].coords[k]\n * otherColVectors.vectors[col].coords[k]\n )\n\n cordinator.append(coord)\n\n v = Vector(cordinator)\n vectors.append(v)\n matrix = Matrix(vectors)\n matrix = matrix.transpose()\n return matrix\n elif isinstance(other, int) or isinstance(other, float): # Skalering af matrix\n for i in range(len(self.vectors)):\n self.vectors[i] *= other\n else:\n raise ValueError(\n \"Can only multiply Matrix with Matrix, Vector, Integer or Float\"\n )", "def c_coefficients(x1,x2,x3,y1,y2,y3,initial_slope,final_slope):\n\tC = c_matrix(x1,x2,x3)\n\ty = y_vector(x1,x2,x3,y1,y2,y3,initial_slope,final_slope)\n\tCCoefficients = np.dot(inv(C),y)\n\treturn(CCoefficients)", "def c( self , y , r , t = 0 ):\n \n u = np.zeros(self.m) # State derivative vector\n \n raise NotImplementedError\n \n return u", "def my_matmul(x, y):\n ##\n cmd = getattr(th, \"matmul\")\n x1, x2 = my_cut(x)\n y1, y2 = my_cut(y)\n x2y1 = cmd(x2, y1)\n x1y2 = cmd(x1, y2)\n x2y2 = cmd(x2, y2)\n ret = (x2y1 + x1y2) % int24field * int24field + x2y2\n ret = int48module(ret)\n return ret", "def outer_product(x,y):\n\n return x[:,0]*y[:,1] -x[:,1]*y[:,0]", "def lazy_matrix_mul(m_a, m_b):\n return np.dot(m_a, m_b)", "def matmul(xs: List[List[float]],\n ys: List[List[float]]) -> List[List[float]]:\n product = []\n for x_row in range(len(xs)):\n row = []\n for y_col in range(len(ys[0])):\n col = [ys[y_row][y_col] for y_row in range(len(ys))]\n row.append(Math.dot(xs[x_row], col))\n product.append(row)\n return product", "def compute_factor(X, v, c1, c2):\n\n assert np.shape(v)[1] == 1,\"v is not a column vector\"\n\n v = normalize_l2(v)\n\n sz_u = np.shape(X)[0]\n sz_v = np.shape(X)[1]\n\n assert sz_v == np.size(v)\n\n u = update_with_delta(X @ v, c1)\n v = update_with_delta(X.T @ u, c2)\n\n delta_u = 1000\n delta_v = 1000\n\n while delta_u > 1e-5 or delta_v > 1e-5:\n oldU = u\n oldV = v\n\n u = update_with_delta(X @ v, c1)\n v = update_with_delta(X.T @ u, c2)\n\n delta_u = npla.norm(u - oldU) / sz_u\n delta_v = npla.norm(v - oldV) / sz_v\n\n d = u.T @ X @ v\n\n return (d,u,v)", "def matrix_multiplication_loop(x_matrix, y_matrix):\n result = []\n for i, row in enumerate(x_matrix):\n row_vector = []\n for j in range(len(y_matrix[0])):\n product = 0\n for k in range(len(row)):\n product += x_matrix[i][k] * y_matrix[k][j]\n row_vector.append(product)\n result.append(row_vector)\n return result", "def mul(Z,X,Y):", "def Cvec(self):\n return vec(self.xc, self.yc)", "def test03(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n if self.rootdir:\n dirc, dird = self.rootdir+'.c', self.rootdir+'.d'\n else:\n dirc, dird = None, None\n c = bcolz.carray(a, rootdir=dirc)\n d = bcolz.carray(b, rootdir=dird)\n cr = bcolz.eval(\"a * d\")\n nr = a * b\n # print \"bcolz.eval ->\", cr\n # print \"numpy ->\", nr\n assert_array_equal(cr[:], nr, \"eval does not work correctly\")", "def alg(c):\n return c[0]*G[0] + c[1]*G[1] + c[2]*G[2]", "def calculate_xi(self, postJ):\n # get output of rec model\n self.batch_mu = self.mu_net(postJ)\n self.batch_u = self.u_net(postJ)\n self.batch_unc_d = self.unc_d_net(postJ)\n\n # add extra dim to batch_u, so it gets treated as column vectors when\n # iterated over\n\n self.batch_u = tf.expand_dims(self.batch_u, -1)\n\n def get_cov(acc, inputs):\n # convert output of rec model to rank-1 covariance matrix\n\n # use softplus to get positive constrained d, minimum of -15\n # since softplus will turn low numbers into 0, which become NaNs\n # when inverted\n u, unc_d = inputs\n d = tf.nn.softplus(tf.maximum(unc_d, -15.0))\n D_inv = tf.diag(1.0 / d)\n eta = 1.0 / (tf.matmul(tf.matmul(tf.transpose(u), D_inv), u) + 1.0)\n C = D_inv - eta*tf.matmul(tf.matmul(tf.matmul(D_inv, u),\n tf.transpose(u)), D_inv)\n Tr_C = tf.trace(C)\n ld_C = tf.log(eta) - tf.reduce_sum(tf.log(d)) # eq 20 in DLGM\n # coeff = ((1 - T.sqrt(eta)) / (u.T.dot(D_inv).dot(u)))\n # simplified coefficient below is more stable as u -> 0\n # original coefficient from paper is above\n coeff = eta / (1.0 + tf.sqrt(eta))\n R = (tf.sqrt(D_inv) - coeff * tf.matmul\n (tf.matmul(tf.matmul(D_inv, u), tf.transpose(u)),\n tf.sqrt(D_inv)))\n return Tr_C, ld_C, R\n\n (self.batch_Tr_C, self.batch_ld_C, self.batch_R) = tf.scan(\n get_cov, [self.batch_u, self.batch_unc_d],\n initializer=(0.0, tf.zeros([1, 1]), tf.diag(self.batch_unc_d[0])))\n\n self.batch_xi = (self.batch_mu +\n (tf.squeeze(tf.matmul(self.batch_R,\n (tf.expand_dims(tf.random_normal(\n [tf.shape(self.batch_R)[0],\n self.num_units]), -1))))))", "def circumcenter(C):\n ri, rj, rk = C.transpose(1,2,0)\n ax, ay = ri\n bx, by = rj\n cx, cy = rk\n d = 2 * (ax * (by - cy) + bx * (cy - ay) + cx * (ay - by))\n ux = ((ax * ax + ay * ay) * (by - cy) + (bx * bx + by * by) * (cy - ay) + (cx * cx + cy * cy) * (\n ay - by)) / d\n uy = ((ax * ax + ay * ay) * (cx - bx) + (bx * bx + by * by) * (ax - cx) + (cx * cx + cy * cy) * (\n bx - ax)) / d\n vs = np.empty((ax.size,2),dtype=np.float64)\n vs[:,0],vs[:,1] = ux,uy\n return vs", "def cc_coefficient(x, y):\n cor = np.sum( (x-np.mean(x)) * (y-np.mean(y)) )\n norm = sqrt( np.sum((x-np.mean(x))**2) * np.sum((x-np.mean(x))**2) )\n r = cor/norm\n return r", "def Cijkl(C):\n c = np.zeros(shape=(3, 3, 3, 3))\n CC = np.zeros(shape=(9, 9))\n CC[0:6, 0:6] = C[0:6, 0:6]\n CC[6:9, 6:9] = C[3:6, 3:6]\n CC[0:6, 6:9] = C[0:6, 3:6]\n CC[6:9, 0:6] = C[3:6, 0:6]\n\n c[0, 0, 0, 0] = CC[0, 0]\n c[0, 0, 1, 1] = CC[0, 1]\n c[0, 0, 2, 2] = CC[0, 2]\n c[0, 0, 1, 2] = CC[0, 3]\n c[0, 0, 2, 0] = CC[0, 4]\n c[0, 0, 0, 1] = CC[0, 5]\n c[0, 0, 2, 1] = CC[0, 6]\n c[0, 0, 0, 2] = CC[0, 7]\n c[0, 0, 1, 0] = CC[0, 8]\n\n c[1, 1, 0, 0] = CC[1, 0]\n c[1, 1, 1, 1] = CC[1, 1]\n c[1, 1, 2, 2] = CC[1, 2]\n c[1, 1, 1, 2] = CC[1, 3]\n c[1, 1, 2, 0] = CC[1, 4]\n c[1, 1, 0, 1] = CC[1, 5]\n c[1, 1, 2, 1] = CC[1, 6]\n c[1, 1, 0, 2] = CC[1, 7]\n c[1, 1, 1, 0] = CC[1, 8]\n\n c[2, 2, 0, 0] = CC[2, 0]\n c[2, 2, 1, 1] = CC[2, 1]\n c[2, 2, 2, 2] = CC[2, 2]\n c[2, 2, 1, 2] = CC[2, 3]\n c[2, 2, 2, 0] = CC[2, 4]\n c[2, 2, 0, 1] = CC[2, 5]\n c[2, 2, 2, 1] = CC[2, 6]\n c[2, 2, 0, 2] = CC[2, 7]\n c[2, 2, 1, 0] = CC[2, 8]\n\n c[1, 2, 0, 0] = CC[3, 0]\n c[1, 2, 1, 1] = CC[3, 1]\n c[1, 2, 2, 2] = CC[3, 2]\n c[1, 2, 1, 2] = CC[3, 3]\n c[1, 2, 2, 0] = CC[3, 4]\n c[1, 2, 0, 1] = CC[3, 5]\n c[1, 2, 2, 1] = CC[3, 6]\n c[1, 2, 0, 2] = CC[3, 7]\n c[1, 2, 1, 0] = CC[3, 8]\n\n c[2, 0, 0, 0] = CC[4, 0]\n c[2, 0, 1, 1] = CC[4, 1]\n c[2, 0, 2, 2] = CC[4, 2]\n c[2, 0, 1, 2] = CC[4, 3]\n c[2, 0, 2, 0] = CC[4, 4]\n c[2, 0, 0, 1] = CC[4, 5]\n c[2, 0, 2, 1] = CC[4, 6]\n c[2, 0, 0, 2] = CC[4, 7]\n c[2, 0, 1, 0] = CC[4, 8]\n\n c[0, 1, 0, 0] = CC[5, 0]\n c[0, 1, 1, 1] = CC[5, 1]\n c[0, 1, 2, 2] = CC[5, 2]\n c[0, 1, 1, 2] = CC[5, 3]\n c[0, 1, 2, 0] = CC[5, 4]\n c[0, 1, 0, 1] = CC[5, 5]\n c[0, 1, 2, 1] = CC[5, 6]\n c[0, 1, 0, 2] = CC[5, 7]\n c[0, 1, 1, 0] = CC[5, 8]\n\n c[2, 1, 0, 0] = CC[6, 0]\n c[2, 1, 1, 1] = CC[6, 1]\n c[2, 1, 2, 2] = CC[6, 2]\n c[2, 1, 1, 2] = CC[6, 3]\n c[2, 1, 2, 0] = CC[6, 4]\n c[2, 1, 0, 1] = CC[6, 5]\n c[2, 1, 2, 1] = CC[6, 6]\n c[2, 1, 0, 2] = CC[6, 7]\n c[2, 1, 1, 0] = CC[6, 8]\n\n c[0, 2, 0, 0] = CC[7, 0]\n c[0, 2, 1, 1] = CC[7, 1]\n c[0, 2, 2, 2] = CC[7, 2]\n c[0, 2, 1, 2] = CC[7, 3]\n c[0, 2, 2, 0] = CC[7, 4]\n c[0, 2, 0, 1] = CC[7, 5]\n c[0, 2, 2, 1] = CC[7, 6]\n c[0, 2, 0, 2] = CC[7, 7]\n c[0, 2, 1, 0] = CC[7, 8]\n\n c[1, 0, 0, 0] = CC[8, 0]\n c[1, 0, 1, 1] = CC[8, 1]\n c[1, 0, 2, 2] = CC[8, 2]\n c[1, 0, 1, 2] = CC[8, 3]\n c[1, 0, 2, 0] = CC[8, 4]\n c[1, 0, 0, 1] = CC[8, 5]\n c[1, 0, 2, 1] = CC[8, 6]\n c[1, 0, 0, 2] = CC[8, 7]\n c[1, 0, 1, 0] = CC[8, 8]\n return c", "def mvector(B, c):\n # for Sun Mg Potential: c=1.6281689374348\n A = np.zeros(shape=4)\n A[0] = (2 / 3) * B[0]\n A[1] = 0.5 * ((2 / sqrt(3)) * B[1] - A[0])\n A[2] = -A[0] - A[1]\n A[3] = B[2] / c\n return A", "def complex_mul(x1, x2):\n assert x1.size(-1) == 2 and x2.size(-1) == 2\n\n res = torch.stack(\n (x1[..., 0]*x2[..., 0]-x1[..., 1]*x2[..., 1],\n x1[..., 0]*x2[..., 1] + x1[..., 1]*x2[..., 0]), -1)\n\n return res", "def evaluate_c(self, x, out=None, **kwargs):\n return np.zeros(0)", "def build_cooc_matrix(users):\n nprods = constants.N_PRODUCTS\n M = scipy.sparse.dok_matrix((nprods, nprods), dtype=np.int32)\n i = 0\n for user in users:\n order = user.orders[-1]\n for pid in user.sorted_pids:\n focal_ix = pid-1\n prevs = paired_pids(user, pid)\n for prev in prevs:\n key = (focal_ix, prev-1)\n #n = M.get(key, 0)\n # further centi-optimization\n n = dict.get(M, key, 0)\n M.update({key:n+1})\n # Above is like 5x faster than below (and this inner loop is current bottleneck)\n #M[focal_ix, prev-1] += 1\n i += 1\n if i % 10000 == 0:\n logging.info('Processed {} users'.format(i))\n\n return M", "def matTimesVec(M, x):\n return [dot(m, x) for m in M]", "def __mul__(self, other): \n if isinstance(other, Iterable):\n # dot product\n return self.x * other[0] + self.y * other[1]\n else:\n # scalar product\n return Vector(self.x * other, self.y * other)", "def product1(a, b, c) :\n return a * b * c", "def classical(m1,m2):\n \n n = m1.shape\n result = np.zeros(n, dtype = int)\n\n for i in range(n[0]):\n for j in range(n[0]):\n for k in range(n[0]):\n result[i][j] += m1[i][k] * m2[k][j]\n return result", "def abc_matrix(a, b, c):\n ax = np.linalg.norm(a)\n a_hat = a/ax\n bx = np.dot(b, a_hat)\n by = np.linalg.norm(np.cross(a_hat, b))\n cx = np.dot(c, a_hat)\n axb = np.cross(a,b)\n axb_hat = axb / np.linalg.norm(axb)\n cy = np.dot(c, np.cross(axb_hat, a_hat))\n cz = np.dot(c, axb_hat)\n return np.array([[ax, bx, cx],[0, by, cy],[0 , 0, cz]])", "def coproduct(self, element):\n from sage.categories.tensor import tensor\n base = element.lift().parent()\n return self.tensor_square().sum(coeff * tensor([self(base[x]), self(base[y])])\n for ((x,y), coeff) in element.lift().coproduct())", "def mbvector(A, c=sqrt(8 / 3)):\n la = len(A)\n sa = A.size\n if la == sa:\n B = np.array([0.0, 0.0, 0.0])\n a1 = A[0] * np.array([1.0, 0.0])\n a2 = A[1] * np.array([-0.5, 0.5 * sqrt(3)])\n a3 = A[2] * np.array([-0.5, -0.5 * sqrt(3)])\n B[0] = a1[0] + a2[0] + a3[0]\n B[1] = a1[1] + a2[1] + a3[1]\n B[2] = c * A[3]\n else:\n sa = A.shape\n B = np.zeros(shape=(sa[0], 3))\n for i in range(sa[0]):\n B[i, 0] = a1[0] + a2[0] + a3[0]\n B[i, 1] = a1[1] + a2[1] + a3[1]\n B[i, 2] = c * A[i, 3]\n return B", "def compound_dot(self, A, B, C, alpha=1.0, beta=0.0, relu=False, bsum=None):\n\n # checking type and shape\n assert A.dtype == B.dtype == C.dtype\n assert A.shape[0] == C.shape[0]\n assert B.shape[1] == C.shape[1]\n assert A.shape[1] == B.shape[0]\n\n # cleaner implementation, shall be equivalent to the one below\n # if relu:\n # C[:] = self.log(1. + self.exp(alpha * self.dot(A, B))) + beta * C\n # else:\n # C[:] = alpha * self.dot(A, B) + beta * C\n\n if beta == 0:\n if C._tensor.flags['C_CONTIGUOUS'] is not True:\n tmp = np.empty(C.shape, dtype=C.dtype)\n math_cpu.blas_dot(A._tensor, B._tensor, tmp)\n C._tensor[:] = tmp.copy()\n else:\n math_cpu.blas_dot(A._tensor, B._tensor, C._tensor)\n if relu:\n self.Relu(C._tensor, C._tensor)\n else:\n # mfma: change np.multiply to mul\n if beta != 1:\n np.multiply(C._tensor, beta, C._tensor)\n tmp = np.empty(C.shape, dtype=C.dtype)\n np.dot(A._tensor, B._tensor, tmp)\n # mfma: change np.multiply to mul\n if alpha != 1:\n np.multiply(tmp, alpha, tmp)\n if relu:\n self.Relu(tmp, tmp)\n np.add(C._tensor, tmp, C._tensor)\n if bsum is not None:\n bsum[:] = self.sum(C, 1)\n\n return C", "def __matmul__(self, tensor):\n return self.matmul(tensor)", "def __mul__(self, other):\n #\n # TODO - your code here\n #\n final_matrix = []\n for i in range(self.h):\n temp_row = []\n for j in range(other.w):\n # take dot-product of row of\n # matrix in 1st arg with col of\n # matrix in 2nd arg\n temp_row.append(dot_product(get_row(self.g, i), get_col(other.g, j)))\n final_matrix.append(temp_row)\n return Matrix(final_matrix)\n # TODO - your code here", "def least_squares(Cui, X, Y, regularization, num_threads):\n users, factors = X.shape\n YtY = Y.T.dot(Y)\n\n for u in range(users):\n # accumulate YtCuY + regularization*I in A\n A = YtY + regularization * np.eye(factors)\n\n # accumulate YtCuPu in b\n b = np.zeros(factors)\n\n for i, confidence in nonzeros(Cui, u):\n factor = Y[i]\n A += (confidence - 1) * np.outer(factor, factor)\n b += confidence * factor\n\n # Xu = (YtCuY + regularization * I)^-1 (YtCuPu)\n X[u] = np.linalg.solve(A, b)", "def compCoeff_CGP(i, A, c, N):\n Ap = np.copy(A)\n out = c[i, 0] * np.eye(N)\n j = 1\n while j <= i:\n # compute A to the power p\n if j > 1:\n Ap = Ap.dot(A)\n\n # add to the polynome\n out += c[i, j] * Ap\n j += 1\n\n return out", "def compute_coriolis(self):\r\n # compute the Coriolis force\r\n self.coriolis.assign(\r\n project(-2*self.rho*cross(self.omega, self.u), self.V))", "def _generate_mult_process(X, mat, inits):\n M = np.empty_like(X, dtype=float)\n M[..., 0] = inits[X[..., 0]]\n M[..., 1:] = mat[X[..., :-1], X[..., 1:]]\n np.cumprod(M, axis=-1, out=M)\n return M", "def compute_operator(self, Xc, Yc):\n\n U, s, V = self._compute_svd(Xc)\n\n self._Atilde = (np.linalg.multi_dot([U.T.conj(), (Yc), (V)])\n * np.reciprocal(s))\n\n self._compute_eigenquantities()\n self._compute_modes(Yc, U, s, V)\n\n self._slow_modes = (np.abs(old_div(np.log(self.eigenvalues),\n self._eigs_divider))) <= self._rho", "def u(self,c,x):\r\n alpha = self.alpha ; sigma = self.sigma\r\n \r\n ctilde = c - alpha*x\r\n u = ctilde**(1-sigma) / (1-sigma)\r\n \r\n return u" ]
[ "0.650418", "0.650212", "0.6441079", "0.6313763", "0.6310517", "0.62949276", "0.62782884", "0.62631303", "0.61975265", "0.6096459", "0.608041", "0.606508", "0.6038961", "0.6011421", "0.60068315", "0.59920776", "0.59303707", "0.58836865", "0.5879482", "0.58772385", "0.58575416", "0.5838892", "0.58091784", "0.5796622", "0.57843477", "0.57586485", "0.57561576", "0.57366264", "0.5728224", "0.57246524", "0.572282", "0.57148993", "0.57086194", "0.5698373", "0.5695539", "0.5695106", "0.569498", "0.5687259", "0.56838983", "0.567735", "0.566609", "0.5664836", "0.5649978", "0.5649978", "0.5646444", "0.56459975", "0.5616804", "0.5613991", "0.5613991", "0.56069696", "0.5602995", "0.56016886", "0.5601508", "0.5595393", "0.5594796", "0.55833477", "0.5577543", "0.55557126", "0.55539906", "0.5553841", "0.5552608", "0.55387753", "0.55368805", "0.55332106", "0.5529269", "0.5527718", "0.5523153", "0.55210274", "0.5515821", "0.55033433", "0.55023336", "0.5484248", "0.54813796", "0.5480753", "0.5479537", "0.5474091", "0.546962", "0.54678774", "0.54670525", "0.5465292", "0.5459378", "0.5458223", "0.5457664", "0.54558104", "0.54443145", "0.54395616", "0.5437733", "0.543512", "0.54349375", "0.543476", "0.54323757", "0.5431858", "0.5431346", "0.54254824", "0.5424391", "0.54220825", "0.54190916", "0.5415009", "0.5414828", "0.5412295" ]
0.6389226
3
Compute the matrixvector product y = Tu where T is a Toeplitz matrix All matrices are real
def toeplitz_multiplication(u, c, r=None): n = len(u) if r is None: r = c u1 = zeros((2*n)) u1[0:n] = u c = np.concatenate((c, [0], r[-1:0:-1])) y1 = circulant_multiplication(u1, c) return y1[0:n]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def matrix_vector_prod(m,u):\n each_product = []\n for v in m:\n each_product.append(dot_prod(v, u))\n return each_product", "def matmul(x, y):\n if len(list(y.size())) == 2:\n # if one of them is a vector (i.e. wanting to do MV mult)\n z = torch.zeros(2, x.size()[1], dtype=torch.double, device=x.device)\n z[0] = torch.mv(x[0], y[0]) - torch.mv(x[1], y[1])\n z[1] = torch.mv(x[0], y[1]) + torch.mv(x[1], y[0])\n\n if len(list(y.size())) == 3:\n z = torch.zeros(\n 2, x.size()[1], y.size()[2], dtype=torch.double, device=x.device\n )\n z[0] = torch.matmul(x[0], y[0]) - torch.matmul(x[1], y[1])\n z[1] = torch.matmul(x[0], y[1]) + torch.matmul(x[1], y[0])\n\n return z", "def __matmul__(self, q: np.ndarray) -> np.ndarray:\n return self.product(q)", "def toeplitz_inverse_multiplication(u, x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4):\n\n y = fft(D_phi*u)\n a = Lambda_1*fft(D_psi*(1/D_phi)*ifft(Lambda_2*y))\n b = Lambda_3*fft(D_psi*(1/D_phi)*ifft(Lambda_4*y))\n y = (1/D_psi)*real(ifft(a-b))/(x_0*(phi-psi))\n \n return y", "def matmul(x, y):\n return np.matmul(x, y)", "def _mul(*args):\n\treturn functools.reduce(numpy.dot, args)", "def bd_toeplitz_inverse_multiplication(u, *arrs):\n \n y = zeros(shape(u))\n n_start = 0\n n_end = 0\n for t in arrs:\n n_start = n_end\n n_end += len(t[3]) # len(t[3]) is the length of the block\n y[n_start:n_end] = toeplitz_inverse_multiplication(u[n_start:n_end], *t)\n assert len(y) == n_end\n return y", "def test_matrix_product(self, use_cache):\n\n key = jrandom.PRNGKey(0)\n dim = 50\n max_power = 25\n\n matrix = jrandom.normal(key, (dim, dim)) / 10\n vector = jnp.ones((dim,), dtype=jnp.float32)\n\n if use_cache:\n mpstate = model_utils.CachedMatrixPowerState.precompute(matrix, max_power)\n else:\n mpstate = model_utils.LazyMatrixPowerState(matrix)\n\n for t in range(max_power):\n result = mpstate.matrix_power_multiply(vector, t)\n expected = np.linalg.matrix_power(matrix, t) @ vector\n\n np.testing.assert_array_almost_equal(result, expected, decimal=1)", "def mul(Z,X,Y):", "def _dot_product_attention_inner_relative(x, y, z, transpose):\n batch_size, heads, length, _ = x.size()\n\n # xy_matmul is [batch_size, heads, length, length or depth]\n xy_matmul = torch.matmul(x, y if not transpose else y.transpose(-2, -1))\n # x_t is [length, batch_size, heads, length or depth]\n x_t = x.permute(2, 0, 1, 3)\n # x_t_r is [length, batch_size * heads, length or depth]\n x_t_r = x_t.view(length, batch_size * heads, -1)\n # x_tz_matmul is [length, batch_size * heads, length or depth]\n x_tz_matmul = torch.matmul(x_t_r, z if not transpose else z.transpose(-2, -1))\n # x_tz_matmul_r is [length, batch_size, heads, length or depth]\n x_tz_matmul_r = x_tz_matmul.view(length, batch_size, heads, -1)\n # x_tz_matmul_r_t is [batch_size, heads, length, length or depth]\n x_tz_matmul_r_t = x_tz_matmul_r.permute(1, 2, 0, 3)\n\n return xy_matmul + x_tz_matmul_r_t", "def _z2matvecmul(self, mat, vec):\n prod = np.mod(np.dot(mat, vec), 2)\n return prod", "def inner_prod(x, y):\n z = torch.zeros(2, dtype=torch.double, device=x.device)\n\n if len(list(x.size())) == 2 and len(list(y.size())) == 2:\n z[0] = torch.dot(x[0], y[0]) - torch.dot(-x[1], y[1])\n z[1] = torch.dot(x[0], y[1]) + torch.dot(-x[1], y[0])\n\n if len(list(x.size())) == 1 and len(list(y.size())) == 1:\n z[0] = (x[0] * y[0]) - (-x[1] * y[1])\n z[1] = (x[0] * y[1]) + (-x[1] * y[0])\n\n return z", "def matvec(self, x):\n return self * x", "def _z2matmul(self, left, right):\n prod = np.mod(np.dot(left, right), 2)\n return prod", "def naive_matrix_vector_dot(x, y):\n assert len(x.shape) == 2\n assert len(y.shape) == 1\n assert x.shape[1] == y.shape[0]\n\n z = np.zeros(x.shape[0])\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n z[i] += x[i, j] * y[j]\n return z", "def dot_prod(u,v):\n each_product = []\n for i in range(len(u)):\n each_product.append(u[i] * v[i])\n return sum(each_product)", "def outer_product(x,y):\n\n return x[:,0]*y[:,1] -x[:,1]*y[:,0]", "def matmul(xs: List[List[float]],\n ys: List[List[float]]) -> List[List[float]]:\n product = []\n for x_row in range(len(xs)):\n row = []\n for y_col in range(len(ys[0])):\n col = [ys[y_row][y_col] for y_row in range(len(ys))]\n row.append(Math.dot(xs[x_row], col))\n product.append(row)\n return product", "def toeplitz_inverse_multiplication_prep(T_column):\n \n phi=1\n psi=2\n assert phi != 0\n assert psi != 0\n assert phi != psi\n \n n = len(T_column)\n \n x = levinson(T_column, np.concatenate( (np.array([1]), np.zeros((n-1,))) ) )\n y = levinson(T_column, np.concatenate( (np.zeros((n-1,)), np.array([1])) ) )\n\n \n \n x_0 = x[0]\n \n D_phi = (phi**(1/n))**np.arange(0,n)\n D_psi = (psi**(1/n))**np.arange(0,n)\n\n Lambda_1 = fft(D_psi*x)\n Lambda_2 = fft(D_phi*np.concatenate(([phi*y[-1]], y[0:-1])))\n Lambda_3 = fft(D_psi*np.concatenate(([psi*y[-1]], y[0:-1])))\n Lambda_4 = fft(D_phi*x)\n \n return (x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4)", "def apply(self,v):\n return np.tensordot(self._transform, v, axes=([1],[0])) \\\n + self._translation", "def __mul__(self, othertr):\n res = self.dot(othertr)\n return res", "def vectordot(a, b):\n return np.sum(a * b, 1)", "def __matmul__(self, tensor):\n return self.matmul(tensor)", "def matmul(x, y, _pub):\n if x.shape[-1] != y.shape[-2]:\n pass # TODO: REPORT ERROR\n res = paillier_gpu.matmul_impl(x.flatten(), y.flatten(order='F'), x.shape, y.shape)\n\n return res", "def mat(self) -> np.ndarray:\n Tp = ToeplitzificationOperator(P=self.P, M=self.M, dtype=self.x.dtype)\n return Tp.matvec(self.x)", "def scalar_mult(v, u):\n return [v[i] * u[i] for i in range(len(v))]", "def simple_doct_product(u, v):\n v = [i / (sum(v)) for i in v]\n\n return np.dot(u, v)", "def vdot(x, v, pub):\n x_flatten = x.flatten()\n v_flatten = v.flatten()\n mul_res = paillier_gpu.mul_impl(v_flatten, x_flatten)\n\n return paillier_gpu.sum_impl(mul_res)", "def matrix_dot(*args):\r\n rval = args[0]\r\n for a in args[1:]:\r\n rval = theano.tensor.dot(rval, a)\r\n return rval", "def naive_vector_dot(x, y):\n assert len(x.shape) == 1\n assert len(y.shape) == 1\n assert x.shape[0] == y.shape[0]\n\n z = 0\n for i in range(x.shape[0]):\n z += x[i] * y[i]\n return z", "def __mul__(left, right):\n \n if isinstance(left, Plucker) and isinstance(right, Plucker):\n # reciprocal product\n return np.dot(left.uw, right.v) + np.dot(right.uw, left.v)\n elif isinstance(left, Plucker) and arg.ismatrix(right, (4,None)):\n return left.skew @ right; # postmultiply by 4xN", "def unwhiten(self, U, A, m):\n X = np.matmul(A, U.T).T\n X += m\n\n return X", "def RHS(y,t):\r\n\r\n return np.multiply(A.dot(y),ones-y)-beta*y", "def _multiply_matrix(self, v):\n\n self.inputs.grad.data.zero_()\n\n with torch.no_grad():\n v_features = self.lpips_model.features(self.inputs.detach() +\n self.h * v)\n D_phi_v = (\n normalize_flatten_features(v_features) -\n self.input_features\n ) / self.h\n\n torch.sum(self.input_features * D_phi_v).backward(retain_graph=True)\n\n return self.inputs.grad.data.clone()", "def transforms_multiply(t0s, t1s):\r\n \r\n return ut.matrix_multiply(t0s, t1s)", "def matrix_deflation(X_curr, Y_curr, X_orig, Y_orig, u, v):\n Xp = X_curr\n Yp = Y_curr\n #u = u / (np.sqrt(np.sum(u**2)+1e-7))\n #v = v / (np.sqrt(np.sum(v**2)+1e-7))\n\n qx = np.dot(Xp.T,np.dot(X_orig,u))\n qx = qx / (np.sqrt(np.sum(qx**2)+1e-7))\n #qx = qx.astype('float16')\n Xp = Xp - np.dot(Xp,qx).dot(qx.T)\n X = Xp.reshape(1,Xp.shape[0],Xp.shape[1])\n\n qy = np.dot(Yp.T,np.dot(Y_orig,v))\n qy = qy / (np.sqrt(np.sum(qy**2)+1e-7))\n #qy = qy.astype('float16')\n Yp = Yp - np.dot(Yp,qy).dot(qy.T)\n Y = Yp.reshape(1,Yp.shape[0],Yp.shape[1])\n\n return X, Y", "def u_t(self):\n\t\tdim = self.dim \n\t\ttim_all = self.tim_all\n\t\t#ctrl = self.ctrl\n\t\tH0 = self.H0\n\t\tHctrl = self.Hctrl\n\n\t\tu_all = np.zeros((tim_all+1,dim,dim),dtype = complex)\n\t\tu_all[0,:,:] = np.eye(dim)\n\n\t\tfor tim in xrange(tim_all):\n\t\t\tH = H0 + Hctrl[tim]#np.matrix( ctrl[i,tim] * np.array(Hctrl[i]))\n\t\t\tu_all[tim+1,:,:] = np.dot(self.u_dt(H,tim), u_all[tim,:,:])\n\n\n\t\treturn u_all", "def kronecker_prod(x, y):\n if len(list(x.size())) != 3 or len(list(y.size())) != 3:\n raise ValueError(\"An input is not of the right dimension.\")\n\n z = torch.zeros(\n 2,\n x.size()[1] * y.size()[1],\n x.size()[2] * y.size()[2],\n dtype=torch.double,\n device=x.device,\n )\n\n row_count = 0\n\n for i in range(x.size()[1]):\n for k in range(y.size()[1]):\n column_count = 0\n for j in range(x.size()[2]):\n for l in range(y.size()[2]):\n\n z[0][row_count][column_count] = (x[0][i][j] * y[0][k][l]) - (\n x[1][i][j] * y[1][k][l]\n )\n z[1][row_count][column_count] = (x[0][i][j] * y[1][k][l]) + (\n x[1][i][j] * y[0][k][l]\n )\n\n column_count += 1\n row_count += 1\n\n return z", "def product_on_basis(self, t1, t2):\n return tensor( (module.monomial(x1)*module.monomial(x2) for (module, x1, x2) in zip(self._sets, t1, t2)) ) #.", "def outer_prod(x, y):\n if len(list(x.size())) != 2 or len(list(y.size())) != 2:\n raise ValueError(\"An input is not of the right dimension.\")\n\n z = torch.zeros(2, x.size()[1], y.size()[1], dtype=torch.double, device=x.device)\n z[0] = torch.ger(x[0], y[0]) - torch.ger(x[1], -y[1])\n z[1] = torch.ger(x[0], -y[1]) + torch.ger(x[1], y[0])\n\n return z", "def __mul__(self, other):\n if isinstance(other, Vector):\n # Matrix vector product\n v = Vector(list())\n for n in range(len(other.vectors)):\n v += scale(other.vectors[n][n], self.vectors[n])\n return v\n elif isinstance(other, Matrix):\n # Matrix matrix product\n if self.n != other.m:\n raise ValueError(\"Wrong fucking sizes, nøøb\")\n\n selfVectors = self.vectors\n selfColVectors = self.transpose()\n otherVectors = other.vectors\n otherColVectors = other.transpose()\n vectors = list()\n for col in range(other.n):\n cordinator = []\n\n for row in range(self.m):\n coord = 0\n\n for k in range(other.m):\n coord += (\n selfVectors[row].coords[k]\n * otherColVectors.vectors[col].coords[k]\n )\n\n cordinator.append(coord)\n\n v = Vector(cordinator)\n vectors.append(v)\n matrix = Matrix(vectors)\n matrix = matrix.transpose()\n return matrix\n elif isinstance(other, int) or isinstance(other, float): # Skalering af matrix\n for i in range(len(self.vectors)):\n self.vectors[i] *= other\n else:\n raise ValueError(\n \"Can only multiply Matrix with Matrix, Vector, Integer or Float\"\n )", "def mult(p, q):\n if p.ndim == 1 and q.ndim > 1:\n p = np.tile(p,(q.shape[0],1))\n if q.ndim == 1 and p.ndim > 1:\n q = np.tile(q,(p.shape[0],1))\n if q.ndim == 1 and p.ndim == 1:\n p = p.reshape((1,4))\n q = q.reshape((1,4))\n\n ps = p[:,3]\n qs = q[:,3]\n pv = p[:,:3]\n qv = q[:,:3]\n\n pq = np.empty_like(p)\n pq[:,3] = ps * qs \n pq[:,3] -= arraylist_dot(pv, qv).flatten()\n pq[:,:3] = ps[:,np.newaxis] * qv \n pq[:,:3] += pv * qs[:,np.newaxis] \n pq[:,:3] += np.cross(pv , qv)\n\n #opposite sign due to different convention on the basis vectors\n #pq *= -1\n return pq", "def dot_vectors(u, v):\n return u[0] * v[0] + u[1] * v[1] + u[2] * v[2]", "def _eval_transpose(self):\n coeff, matrices = self.as_coeff_matrices()\n return MatMul(\n coeff, *[transpose(arg) for arg in matrices[::-1]]).doit()", "def __mul__(self, other): \n if isinstance(other, Iterable):\n # dot product\n return self.x * other[0] + self.y * other[1]\n else:\n # scalar product\n return Vector(self.x * other, self.y * other)", "def my_matmul(x, y):\n ##\n cmd = getattr(th, \"matmul\")\n x1, x2 = my_cut(x)\n y1, y2 = my_cut(y)\n x2y1 = cmd(x2, y1)\n x1y2 = cmd(x1, y2)\n x2y2 = cmd(x2, y2)\n ret = (x2y1 + x1y2) % int24field * int24field + x2y2\n ret = int48module(ret)\n return ret", "def matrix_dot(*args):\n rval = args[0]\n for a in args[1:]:\n rval = tm.dot(rval, a)\n return rval", "def python_nonsquare_matrix_mult(matrix):\n\n transposed_matrix = np.zeros([matrix.shape[1],matrix.shape[0]])\n start = time.time()\n # for i in range(matrix.shape[0]):\n # for j in range(matrix.shape[1]):\n # transposed_matrix[j,i] = matrix[i,j]\n\n transposed_matrix = np.transpose(matrix)\n product = matrix.dot(transposed_matrix)\n\n # transposed_matrix = np.transpose(matrix)\n end = time.time()-start\n\n # print(\"Python Golden Transpose: %s\" % product)\n # print('python transpose time: %.2E' % end)\n return [product, end]", "def test_gemm_with_vector():\r\n X, Y, Z, a, b = XYZab()\r\n v = T.vector()\r\n\r\n def my_just_gemm(o):\r\n i = [X, Y, Z, a, b, v]\r\n ishapes = [(4, 3), (3, 5), (4, 5), (), (), (5, )]\r\n rval = just_gemm(i, o, ishapes=ishapes)\r\n\r\n my_just_gemm([v + T.dot(X, Y) * a + Z * b])\r\n my_just_gemm([v + a * T.dot(X, Y) + b * Z])\r\n my_just_gemm([v + b * Z + a * T.dot(X, Y)])\r\n my_just_gemm([v + T.dot(X, Y) * a - Z * b])\r\n my_just_gemm([v + a * T.dot(X, Y) - b * Z])\r\n my_just_gemm([v + b * Z - a * T.dot(X, Y)])\r\n\r\n #with N multiplications instead of just one\r\n my_just_gemm([v + (b * b) * Z * a + (a * a) * T.dot(X, Y) * b])\r\n my_just_gemm([v + Z + T.dot(X, Y)])\r\n my_just_gemm([v + Z * b + T.dot(X, Y)])\r\n my_just_gemm([v + Z + a * b * a * T.dot(X, Y)])\r\n my_just_gemm([v + (b * b) * Z * a - (a * a) * T.dot(X, Y) * b])\r\n my_just_gemm([Z - T.dot(X, Y) + v])\r\n my_just_gemm([Z * b - T.dot(X, Y) + v])\r\n my_just_gemm([Z - a * b * a * T.dot(X, Y) + v])", "def de_mult(self,z):\n if isinstance(z,np.ndarray) and z.size>1:\n assert np.all(np.diff(z)>0.)\n return (z+1.)**(3.*(1.+self.w))", "def multiply(traj, result_list):\n z=traj.x*traj.y\n result_list[traj.v_idx] = z", "def multiply(t):\n return mul(*t)", "def rhs(t, conc, S_matrix, educt_matrix, kin_par):\n fluxes = calculate_fluxes(conc, S_matrix, educt_matrix, kin_par)\n return np.dot(S_matrix, fluxes)", "def multiply_vector(self, dv, spm):\n product = []\n for a, b in zip(dv, spm):\n product.append(a * b)\n return product", "def multiply(matrix, vector):\n result = []\n for row in matrix:\n assert len(row) == len(vector)\n result.append(sum([a*b for (a, b) in zip(row, vector)]))\n return Vector3D.from_list(result)", "def mat_vec_product(self, psi, t):\n\tx = zeros(self.vib_basis_size * len(self.my_tasks), dtype = complex)\n\n\t#Matrix vector product.\n\tfor i, j in enumerate(self.my_tasks):\n\t slice_x = slice(i * self.vib_basis_size, (i + 1) * self.vib_basis_size)\n\t slice_psi = slice(j * self.vib_basis_size, (j + 1) * self.vib_basis_size)\n\t \n\t x[slice_x] = dot(self.h_0[:,:,i], psi[slice_psi])\n\t\n\ty = dot(self.h_1, psi)\n\n\t#Weigh with field strength, and add components.\n\tpsi_final = x + self.time_function(t) * y\n\t\n\treturn psi_final", "def phi_t(self):\n\t\tdim = self.dim\n\t\ttim_all = self.tim_all \n\t\tphi_all = np.zeros((tim_all+1,dim,1),dtype = complex)\n\t\tphi_all[0,:,:] = self.phi_i[:]\n\t\tu_all = self.u_t()\n\n\t\tfor tim in xrange(tim_all):\n\t\t\tphi_all[tim+1,:,:] = np.dot(u_all[tim+1,:,:], phi_all[0,:,:])\n\t\t\n\t\treturn phi_all", "def dot_product(u, v):\n ret = 0.0\n for i in range(len(u)):\n ret += float(float(u[i]) * float(v[i]))\n return ret", "def matrix_multiplication_loop(x_matrix, y_matrix):\n result = []\n for i, row in enumerate(x_matrix):\n row_vector = []\n for j in range(len(y_matrix[0])):\n product = 0\n for k in range(len(row)):\n product += x_matrix[i][k] * y_matrix[k][j]\n row_vector.append(product)\n result.append(row_vector)\n return result", "def transform(self, v):\n #matrix vector multiply, convert from matrix to array type at the end\n return np.array( v * self.M )", "def f(t, x, n, v):\n total = 0\n for i in range(n+1):\n for j in range(n+1):\n for k in range(v):\n total = t[i][j] * x[i][j][k]", "def dot_product_scores(q_vectors: T, ctx_vectors: T) -> T:\n # q_vector: n1 x D, ctx_vectors: n2 x D, result n1 x n2\n r = torch.matmul(q_vectors, torch.transpose(ctx_vectors, 0, 1))\n return r", "def hadamard(u: np.ndarray, v: np.ndarray) -> np.ndarray:\n\n return u * v", "def _compute_t_matrix(self):\n self.t_matrix = self._kronecker_product(\n tf.diag(tf.reshape(self.likelihood_variances, [-1])),\n tf.eye(self.n_points_int, dtype=tf.float64))\n return", "def test_trotter_hamiltonian_scalar_mul(nqubits=3):\n local_ham = hamiltonians.TFIM(nqubits, h=1.0, trotter=True)\n target_ham = 2 * hamiltonians.TFIM(nqubits, h=1.0, numpy=True)\n local_dense = (2 * local_ham).dense\n np.testing.assert_allclose(local_dense.matrix, target_ham.matrix)\n\n local_ham = hamiltonians.TFIM(nqubits, h=1.0, trotter=True)\n local_dense = (local_ham * 2).dense\n np.testing.assert_allclose(local_dense.matrix, target_ham.matrix)", "def scalar_mult(x, y, out=None):\n if out is None:\n out = torch.zeros_like(y)\n else:\n if out is x or out is y:\n raise RuntimeError(\"Can't overwrite an argument!\")\n\n out[0] = (x[0] * y[0]) - (x[1] * y[1])\n out[1] = (x[0] * y[1]) + (x[1] * y[0])\n\n return out", "def monomio(x,datos_x,datos_y):\n matriz=np.zeros([datos_x.shape[0],datos_x.shape[0]])\n for j in range(datos_x.shape[0]): #Se contruye la matriz de vandermonde\n matriz[:,j]= datos_x**(j)\n matriz,datos_y=pivoteo_parcial(matriz,datos_y)\n x1= descompo_LU(matriz,datos_y)# se resulve el sistema de ecuaciones por metodo directo\n\n puntos=[] #se almacenan los valores de y para cada punto de x que se quiera calcular \n\n for p in x: #va a ir tomando los valores de x uno por uno \n prod=np.zeros(x1.shape[0])\n for i in range(x1.shape[0]):\n if i==0:\n prod[i]=1\n else:\n prod[i]=prod[i-1]*p #Se hace el calculo de los polimonios con todos los valores de x \n solucion=x1@prod\n puntos.append(solucion) # se agregan los valores de y a la lista final \n puntos=np.array(puntos)# se convierte la lista en array para mejor manejo\n\n return puntos", "def matrix_mult(m1, m2):\n pass", "def calcul_travail_ext(x,modU):\n\tr = np.sqrt(x[:,0]*x[:,0] + x[:,1]*x[:,1])\n\tf = r[:]*modU[:]*modU[:]\n\tW = PointMilieu(r,f)\n\treturn W", "def __matmul__(self, qubit):\n if isinstance(qubit, str):\n qubit = self.get_index(qubit)\n return self.compiled[qubit].y", "def interior_tensor_product(mx, dim_a, dim_b, e=None):\n assert _np.shape(mx) == (dim_a * dim_b, dim_a * dim_b), \"Dimensions do not agree with matrix size\"\n assert _np.shape(e)[0] == _np.shape(e)[1], \"e should be a square matrix\"\n basis_a = matrix_units(dim_a)\n basis_b = matrix_units(dim_b)\n return sum((_np.trace(_np.dot(mx, _np.kron(unit_a, unit_b).T)) * multikron([unit_a, e, unit_b])\n for unit_a in basis_a for unit_b in basis_b))", "def test_mueller_product(self, ):\n mdims = ('mueller_v', 'mueller_h')\n mm_1 = xr.DataArray(np.random.rand(4, 4, ), dims=mdims, )\n mm_2 = xr.DataArray(np.identity(4, ), dims=mdims, )\n sv_1 = xr.DataArray(np.random.rand(4, ), dims=('stokes', ), )\n\n assert_almost_equal(mm_1.values, mueller_product(mm_1, mm_2).values, )\n assert_almost_equal(mm_1.values, mueller_product(mm_2, mm_1).values, )\n assert_almost_equal(sv_1.values, mueller_product(mm_2, sv_1).data, )", "def matrix_mult_vec(matrix_a, x):\n m = len(matrix_a)\n b = [0 for i in xrange(m)]\n for i in xrange(m):\n b[i] = dot_product(matrix_a[i], x)\n return b", "def alternate_ls (u_num, Y, P, C, reg):\n\n\t# get # of items/users and # of latent factors\n\t[i_num, f_num] = Y.shape\n\n\t# output buffer\n\tX = np.zeros((u_num, f_num))\n\n\t# precalculate YtY to improve the performance\n\tYtY = Y.T * Y\n\n\t# iterate over each user/item\n\tfor u in range(u_num):\n\n\t\t# store the diagonal elements of the matrix Cu discussed in the paper in a vector\n\t\tCu = C[u,:]\n\n\t\t# store the coresponding row/column of the preference matrix\n\t\tPu = P[u,:]\n\n\t\t# compute Cu-I\n\t\tCu_I = Cu - 1\n\n\t\t# calculate Yt(Cu-I)Y\n\t\tYtCu_IY = np.zeros((f_num, f_num))\n\t\tCuIY = np.multiply(Y, Cu_I.T) # weight each row of Y with Cu-I\n\t\tfor row in range(f_num):\n\t\t\tfor col in range(f_num):\n\t\t\t\tYtCu_IY[row,col] = Y[:,row].T * CuIY[:,col]\n\t\t\n\t\t# left term : ((YtCuY + regI)^(-1)) = (YtY + Yt(Cu-I)Y + regI)^(-1)\n\t\tleft_inv = YtY + YtCu_IY + reg*np.eye(f_num)\n\t\tleft = np.linalg.inv(left_inv)\n\n\t\t# right term : YtCuPu\n\t\tright = Y.T * np.multiply(Cu.T, Pu.T)\n\n\t\t# compute the latent factor of the user/item\n\t\tx = left * right\n\t\t\n\t\t# store it in a matrix\n\t\tX[u,:] = x.T\n\n\t# return an MxF or NxF matrix\n\treturn np.matrix(X)", "def test_vector_dot_product(self):\n\n # Example 1.2\n vector_p = np.array([0.5, 0.0, 0.5])\n vector_q = np.array([0.5, 0.5, 0.0])\n crystal = crystal_system.Tetragonal(0.5, 1.0)\n magnitude_ref_nm = 5.0/16.0\n\n vector_d = vector_p - vector_q\n magnitude_nm = vector.dot_product(crystal, vector_d, vector_d)\n\n self.assertAlmostEqual(magnitude_ref_nm, magnitude_nm, 5)\n\n # Example 1.3\n vector_p = np.array([1.0, 2.0, 0.0])\n vector_q = np.array([3.0, 1.0, 1.0])\n crystal = crystal_system.Tetragonal(0.5, 1.0)\n magnitude_ref_nm = 5.0/4.0\n\n magnitude_nm = vector.dot_product(crystal, vector_p, vector_q)\n self.assertAlmostEqual(magnitude_ref_nm, magnitude_nm, 5)\n\n magnitude_nm = vector.dot_product(crystal, vector_q, vector_p)\n self.assertAlmostEqual(magnitude_ref_nm, magnitude_nm, 5)\n\n #self.fail(\"Test if the testcase is working.\")", "def _factorsY(self, inputs):\n return tensor.dot(inputs[1], self.wyf)", "def compute_hessian_vector_product(self, function, arguments):", "def __mul__(self, other):\n #\n # TODO - your code here\n #\n final_matrix = []\n for i in range(self.h):\n temp_row = []\n for j in range(other.w):\n # take dot-product of row of\n # matrix in 1st arg with col of\n # matrix in 2nd arg\n temp_row.append(dot_product(get_row(self.g, i), get_col(other.g, j)))\n final_matrix.append(temp_row)\n return Matrix(final_matrix)\n # TODO - your code here", "def Pol_Newton_un_punto(x,datos_x,datos_y):\n n = datos_x.shape[0]\n matriz=np.ones([n,n])\n for j in range(n):\n for i in range(n):\n if j>i:\n matriz[i][j]=0\n else:\n producto=1\n for k in range(j):\n producto=producto*(datos_x[i]-datos_x[k])\n matriz[i][j]=producto\n matriz,datos_y1= pivoteo_parcial(matriz,datos_y)\n x1 = descompo_LU(matriz,datos_y1)\n prod=np.zeros(x1.shape[0])\n for i in range(n):\n if i==0:\n prod[i]=1\n else: \n prod[i]=prod[i-1]*(x-datos_x[i-1])\n solucion=x1@prod\n return solucion", "def predict_mat(self):\n return self.u.dot(self.v.T)", "def learned_RHS(t,y,q,x,desc):\n \n \n Ux_mat = create_Ux_mat(x)\n Uxx_mat = create_Uxx_mat(x)\n\n return (q[desc.index('u_{x}')]*Ux_mat.dot(y) + \n q[desc.index('u_{xx}')]*Uxx_mat.dot(y) +\n q[desc.index('u^2')]*y**2 +\n q[desc.index('u')]*y + \n q[desc.index('u^2u_{x}')]*(y**2)*Ux_mat.dot(y) + \n q[desc.index('uu_{x}')]*y*Ux_mat.dot(y) + \n q[desc.index('u^2u_{xx}')]*(y**2)*Uxx_mat.dot(y) + \n q[desc.index('uu_{xx}')]*y*Uxx_mat.dot(y) + \n q[desc.index('u_{x}^2')]*Ux_mat.dot(y)**2)", "def __mul__(self, tensor):\n return self.mul(tensor)", "def inner_product(alpha, F, beta):\n return np.dot(alpha, np.dot(F, beta))", "def test_matmul_vv(self):\n self.check_dot_vv(matmul_usecase, \"'@'\")", "def lazy_matrix_mul(m_a, m_b):\n return np.dot(m_a, m_b)", "def matTimesVec(M, x):\n return [dot(m, x) for m in M]", "def tensdot(polyList,order,trunc):\n\n def reshape(poly,expo):\n\n poly.coef = poly[:][:,expo]\n poly.expo = expo\n return poly\n\n dim = len(polyList)\n expo = indextens(order,dim,trunc)\n nbrPoly = expo.shape[1]\n coef = np.eye(nbrPoly)\n\n # Tensor product of the univariate basis\n\n for i in range(dim): polyList[i] = reshape(polyList[i],expo[i])\n for i in range(nbrPoly): coef[i] = np.prod([polyList[j][expo[j,i]] for j in range(dim)],axis=0)\n\n poly = Polynomial(expo,coef,1)\n return poly", "def __mul__(self,v2):\n\t\tif(isinstance(v2,Vect2D)):\n\t\t\treturn np.dot(self._vec,v2._vec)\n\t\telse:\n\t\t\treturn Vect2D(v2*self._vec)", "def T(self) -> BaseMatrix:", "def T(self) -> BaseMatrix:", "def eval_f(self, u, t):\n f = self.f_init\n f[:] = self.A.dot(u.flatten()).reshape(self.nvars)\n return f", "def __matmul__(self, csys):\n self._transform(csys)\n return self", "def vectorMultiply(v, f):\n return [x * f for x in v]", "def outer_product(x):\n return keras.backend.batch_dot(\n x[0]\n , x[1]\n , axes=[1,1]\n ) / x[0].get_shape().as_list()[1]", "def forward(self, x: torch.Tensor) -> torch.Tensor:\n y = x.matmul(self.melmat)\n return y", "def mul_dense(x, y): # pragma: no cover\n return x * y", "def vec_dot(x, y):\r\n return sum(a * b for a, b in zip(x, y))", "def _parameter_dot_product(x: JaxComplexArray, y: JaxComplexArray, n_axes: int) -> JaxRealArray:\n axes = tuple(range(-n_axes, 0))\n return jnp.sum(x * y, axis=axes).real", "def scalar_product(self, u, v):\n sp = 0.0\n n1 = len(u)\n n2 = len(v)\n i = j = 0\n d = self.dictionary_db\n while (i < n1 and j < n2):\n if u[i].word_info(d).index > v[j].word_info(d).index:\n j += 1\n elif v[j].word_info(d).index > u[i].word_info(d).index:\n i += 1\n else:\n sp += self.tf_idf(u[i]) * self.tf_idf(v[j])\n i += 1\n j += 1\n\n return sp", "def apply(self, v):\n u = np.zeros(self.Dimension, dtype=complex)\n for me in self.Elements:\n for index in range(v.Elements.size):\n if index == me.j:\n u[me.i] += me.val * v.Elements[index]\n u = Vector(u) \n return u" ]
[ "0.7003199", "0.6513981", "0.64759356", "0.6454179", "0.6377554", "0.6326698", "0.6245358", "0.620894", "0.6208685", "0.61977005", "0.6195611", "0.61694974", "0.6168602", "0.6134469", "0.6106113", "0.60868716", "0.6082444", "0.60823506", "0.6070701", "0.60688484", "0.6063607", "0.60420716", "0.60411894", "0.6021142", "0.6020375", "0.60189974", "0.5976037", "0.5971896", "0.59694517", "0.5931813", "0.5929974", "0.5898846", "0.5893105", "0.5855257", "0.58545524", "0.58442146", "0.5836224", "0.5832956", "0.5814374", "0.58080685", "0.57904613", "0.57875574", "0.57833064", "0.57730234", "0.5772904", "0.57687515", "0.57664347", "0.57564145", "0.57501626", "0.5744411", "0.5741991", "0.57292855", "0.57247925", "0.5714161", "0.5701248", "0.5698843", "0.56850976", "0.56831443", "0.56521136", "0.56448597", "0.5643709", "0.5640813", "0.5639851", "0.56366783", "0.5632932", "0.5632897", "0.5624747", "0.5620976", "0.5618426", "0.5617202", "0.5615725", "0.5612312", "0.5605161", "0.5604705", "0.5597466", "0.5597071", "0.5594331", "0.5587542", "0.5583664", "0.55818576", "0.5576618", "0.55596435", "0.55546635", "0.55527973", "0.55502707", "0.5547592", "0.5543349", "0.55428696", "0.55401945", "0.55401945", "0.5539136", "0.553875", "0.55359316", "0.55332065", "0.5527199", "0.55229384", "0.5518009", "0.55145466", "0.54998636", "0.5499449" ]
0.63380134
5
Read in labels from digitStruct.mat file to create a dict of image file name and corresponding labels
def read_labels(digitstruct_file): labels = dict() for dsObj in tdqm(yieldNextDigitStruct(digitstruct_file), ncols=50): image_labels = [] for bbox in dsObj.bboxList: image_labels.append(bbox.label) labels[dsObj.name] = image_labels return labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_imagenet_as_dict(self):\n real_file_path = os.path.realpath(self.map_file)\n if not os.path.exists(real_file_path):\n raise IOError(\"map file {} not exists\".format(self.map_file))\n\n label_dict = {}\n with open(real_file_path) as fp:\n line = fp.readline()\n while line:\n labels = line.split(\" \")\n label_dict[labels[1]] = labels[0]\n line = fp.readline()\n\n # get all the dir which are n02087046, n02094114, n02109525\n dir_paths = {}\n for item in label_dict:\n real_path = os.path.join(self.image_dir, label_dict[item])\n if not os.path.isdir(real_path):\n logger.warning(\"{} dir is not exist\".format(real_path))\n continue\n dir_paths[item] = real_path\n\n if not dir_paths:\n raise PathNotExistsError(\"not valid image dir in {}\".format(self.image_dir))\n\n # get the filename, label and image binary as a dict\n for label in dir_paths:\n for item in os.listdir(dir_paths[label]):\n file_name = os.path.join(dir_paths[label], item)\n if not item.endswith(\"JPEG\") and not item.endswith(\"jpg\"):\n logger.warning(\"{} file is not suffix with JPEG/jpg, skip it.\".format(file_name))\n continue\n data = {}\n data[\"file_name\"] = str(file_name)\n data[\"label\"] = int(label)\n\n # get the image data\n real_file_path = os.path.realpath(file_name)\n image_file = open(real_file_path, \"rb\")\n image_bytes = image_file.read()\n image_file.close()\n if not image_bytes:\n logger.warning(\"The image file: {} is invalid.\".format(file_name))\n continue\n data[\"image\"] = image_bytes\n yield data", "def extract_labels(filename, num_images):\n filepath = os.path.join(WORK_DIRECTORY, filename)\n print('Extracting', filepath)\n with open(filepath, mode='rb') as bytestream:\n buf = bytestream.read(1 * num_images)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def read_stanford_labels():\n # First get the hardi data\n fetch_stanford_hardi()\n hard_img, gtab = read_stanford_hardi()\n\n # Fetch and load\n files, folder = fetch_stanford_labels()\n labels_file = pjoin(folder, \"aparc-reduced.nii.gz\")\n labels_img = nib.load(labels_file)\n return hard_img, gtab, labels_img", "def get_imagenet_classnames():\r\n return np.loadtxt(open(path_data+'/ilsvrc_2012_labels.txt'), dtype=object, delimiter='\\n')", "def get_images_and_labels(tampered_path, authentic_path):\n tampered_dir = tampered_path\n authentic_dir = authentic_path\n images = {}\n for im in glob.glob(authentic_dir):\n images[im] = {}\n images[im]['mat'] = cv2.imread(im)\n images[im]['label'] = 0\n for im in glob.glob(tampered_dir):\n images[im] = {}\n images[im]['mat'] = cv2.imread(im)\n images[im]['label'] = 1\n return images", "def load_labels(path, kmer=True, rg=True, clip=True, rna=True, go=True):\n\n labels = dict()\n if go: labels[\"X_GO\"] = gzip.open(os.path.join(path,\n \"matrix_GeneOntology.tab.gz\")).readline().split(\"\\t\")\n if kmer: labels[\"X_KMER\"] = gzip.open(os.path.join(path,\n \"matrix_RNAkmers.tab.gz\")).readline().split(\"\\t\")\n if rg: labels[\"X_RG\"] = gzip.open(os.path.join(path,\n \"matrix_RegionType.tab.gz\")).readline().split(\"\\t\")\n if clip: labels[\"X_CLIP\"] = gzip.open(os.path.join(path,\n \"matrix_Cobinding.tab.gz\")).readline().split(\"\\t\")\n if rna: labels[\"X_RNA\"] = gzip.open(os.path.join(path,\n \"matrix_RNAfold.tab.gz\")).readline().split(\"\\t\")\n return labels", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)\n return labels", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def read_label_file(self, label_file_name = None): #completed\n if label_file_name is None:\n label_file_name = self.label_file_name\n try:\n label_data = sp.loadmat(label_file_name)['labels'].astype(np.int32)\n return label_data#[:,1], label_data[:,0]#in MATLAB format\n except IOError:\n print \"Unable to open \", label_file_name, \"... Exiting now\"\n sys.exit()", "def get_pet_labels(images_dir):\r\n \r\n # Creates a list of files in directory from pet images directory\r\n in_files = listdir(images_dir)\r\n \r\n # Process each of the files such that the created dictionary would have\r\n # key = filename and the value = picture label\r\n \r\n # Create an empty dictionary to hold pet labels\r\n petlabels_dic = dict()\r\n \r\n \r\n \r\n for idx in range(0, len(in_files), 1): \r\n if in_files[idx][0] != \".\":\r\n pet_image_name = in_files[idx].split(\"_\")\r\n # Check if the first character is uppercase letter. If it is, then lowercase that first character\r\n if pet_image_name[0].isupper() : \r\n pet_image_name = pet_image_name.lower()\r\n # Create a temporary label variable to hold pet label name\r\n pet_label = \" \"\r\n \r\n # Process each of the character strings(words) split by '_' in \r\n # the list pet_image_name\r\n for word in pet_image_name: \r\n if word.isalpha():\r\n pet_label += word + \" \"\r\n pet_label = pet_label.strip()\r\n if in_files[idx] not in petlabels_dic:\r\n petlabels_dic[in_files[idx]] = [pet_label]\r\n else: \r\n print(\" Warning: Duplicate files exist in dictionary\", in_files[idx])\r\n \r\n \r\n # Return dictionary of pet lables\r\n return(petlabels_dic)", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)\n return labels", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def get_image_labels_mapping(images_fp, labels_fp):\n name_map = {}\n\n for f in images_fp():\n image_name = f[0]['file']\n vars = {k.upper():v for k,v in f[0].items() if k!='file' }\n label_name = labels_fp.get_matching(**vars)[0]['file']\n name_map[image_name] = label_name\n return name_map", "def retrieve_labels(file, label_indices):\n\n\t# Initialize numpy matrix to store the images\n\tlabels = np.zeros((len(label_indices), 10))\n\n\twith open(file, \"rb\") as f:\n\t\t# Intialize counters\n\t\ti = 0\n\t\tlabel_number = 0\n\n\t\t# Read first byte\n\t\tbyte = f.read(1)\n\n\t\t# Find each image in the data file\n\t\tfor label_index in label_indices:\n\t\t\t# Read in bytes until you arrive at the label\n\t\t\twhile byte and (i < (label_index + 8)):\n\t\t\t\tbyte = f.read(1)\n\t\t\t\ti += 1\n\n\t\t\t# Store label value in numpy array\n\t\t\tvalue = int.from_bytes(byte, \"big\")\n\t\t\tlabels[label_number] = np.zeros(10)\n\t\t\tlabels[label_number, value] = 1\n\n\t\t\t# Increment to next label\n\t\t\tlabel_number += 1\n\n\treturn labels", "def get_images_and_labels_nc():\n refs = get_ref_df()\n images = {}\n for _, data in refs.iterrows():\n if data['ProbeFileName'] in images:\n continue\n im = data['ProbeFileName']\n images[im] = 1 if data['IsTarget'] == 'Y' else 0\n return images", "def make_label_map(path, label_list):\r\n \r\n img = []\r\n for name in path:\r\n now = np.zeros((224,224))\r\n im = cv2.resize(cv2.imread(name), (224,224)).tolist()\r\n for y, i in enumerate(im):\r\n for x, j in enumerate(i):\r\n try:\r\n now[y, x] = label_list.index(j)\r\n\r\n except ValueError:\r\n now[y, x] = 0\r\n\r\n img.append(now)\r\n return img", "def parse_labelfile(path):\n with open(path, \"r\") as FILE:\n lines = FILE.readlines()\n\n\n labels = {x.split(\":\")[0]: x.split(\":\")[1] for x in lines[1:]}\n\n for key in labels:\n labels[key] = np.array(labels[key].split(\",\")).astype(\"uint8\")\n\n return labels", "def extract_labels(pdbbind_label_file):\n assert os.path.isfile(pdbbind_label_file)\n labels = {}\n with open(pdbbind_label_file) as f:\n content = f.readlines()\n for line in content:\n if line[0] == \"#\":\n continue\n line = line.split()\n # lines in the label file have format\n # PDB-code Resolution Release-Year -logKd Kd reference ligand-name\n #print line[0], line[3]\n labels[line[0]] = line[3]\n return labels", "def create_labelmapDict_patch(list_all_images, path_dataset):\n list_all_classes = []\n for idx, name_image_ in enumerate(list_all_images):\n _, tail = os.path.split(name_image_)\n temp_obj = []\n name_file_xml_all = os.path.join(path_dataset, 'LABELS', tail[0:-3] + 'xml')\n if os.path.exists(name_file_xml_all):\n with tf.gfile.GFile(name_file_xml_all, 'rb') as fid:\n xml_str = fid.read()\n xml = etree.fromstring(xml_str)\n data = tfrecord_util.recursive_parse_xml_to_dict(xml)['annotation']\n if 'object' in data:\n for obj in data['object']:\n name_in_obj_ = obj['name'].replace(' ', '').strip()\n if name_in_obj_ != 'INCOMPLETAS':\n list_all_classes.append(name_in_obj_)\n temp_obj.append(obj)\n # list_all_classes = unique_list(list_all_classes)\n list_all_classes = list(set(list_all_classes))\n list_all_classes.sort()\n list_all_classes.insert(0, 'background')\n labelmap_ = {el: k for k, el in enumerate(list_all_classes)}\n return labelmap_", "def read_idx_2_label():\n with open('../Data/imagenet_class_index.json') as f:\n dictionary = json.load(f)\n return dictionary", "def extract_labels(filename, num_images):\n gt_imgs = []\n for i in range(1, num_images+1):\n imageid = \"satImage_%.3d\" % i\n image_filename = filename + imageid + \".png\"\n if os.path.isfile(image_filename):\n print ('Loading ' + image_filename)\n img = mpimg.imread(image_filename)\n gt_imgs.append(img)\n else:\n print ('File ' + image_filename + ' does not exist')\n\n num_images = len(gt_imgs)\n gt_patches = [img_crop(gt_imgs[i], IMG_PATCH_SIZE, IMG_PATCH_SIZE, 0, False) for i in range(num_images)]\n data = numpy.asarray([gt_patches[i][j] for i in range(len(gt_patches)) for j in range(len(gt_patches[i]))])\n labels = numpy.asarray([value_to_class(numpy.mean(data[i])) for i in range(len(data))])\n\n # Convert to dense 1-hot representation.\n return labels.astype(numpy.float32)", "def load_data():\n\n training_files_dir = \"digits/trainingDigits\"\n training_files = os.listdir(training_files_dir)\n file_num = len(training_files)\n hw_labels = []\n\n training_mat = zeros((file_num, 32 * 32))\n for i in xrange(file_num):\n filename = training_files[i]\n file_label = int((filename.split(\".\")[0]).split(\"_\")[0])\n hw_labels.append(file_label)\n training_mat[i, :] = img2vector(training_files_dir + '/' + filename)\n\n return training_mat, hw_labels", "def load_label(self, file, variable_name=\"group\"):\n data = scipy.io.loadmat(file)\n self.logger.info(\"loading mat file %s\", file)\n label = data[variable_name].todense().astype(np.int)\n label = np.array(label)\n print(label.shape, type(label), label.min(), label.max())\n return label", "def load_labels(path):\n with open(path, \"r\", encoding=\"utf-8\") as f:\n lines = f.readlines()\n labels = {}\n for row_number, content in enumerate(lines):\n pair = re.split(r\"[:\\s]+\", content.strip(), maxsplit=1)\n if len(pair) == 2 and pair[0].strip().isdigit():\n labels[int(pair[0])] = pair[1].strip()\n else:\n labels[row_number] = pair[0].strip()\n # print(labels)\n return labels", "def create_readable_names_for_imagenet_labels():\n\n base_url = 'http://cnbj1-fds.api.xiaomi.net/ml-datasets/imagenet/' # noqa\n synset_url = '{}/imagenet_lsvrc_2015_synsets.txt'.format(base_url)\n synset_to_human_url = '{}/imagenet_metadata.txt'.format(base_url)\n\n filename, _ = urllib.urlretrieve(synset_url)\n synset_list = [s.strip() for s in open(filename).readlines()]\n num_synsets_in_ilsvrc = len(synset_list)\n assert num_synsets_in_ilsvrc == 1000\n\n filename, _ = urllib.urlretrieve(synset_to_human_url)\n synset_to_human_list = open(filename).readlines()\n num_synsets_in_all_imagenet = len(synset_to_human_list)\n assert num_synsets_in_all_imagenet == 21842\n\n synset_to_human = {}\n for s in synset_to_human_list:\n parts = s.strip().split('\\t')\n assert len(parts) == 2\n synset = parts[0]\n human = parts[1]\n synset_to_human[synset] = human\n\n label_index = 1\n labels_to_names = {0: 'background'}\n for synset in synset_list:\n name = synset_to_human[synset]\n labels_to_names[label_index] = name\n label_index += 1\n\n return labels_to_names", "def labels_for_training_data():\n current_id = 0\n label_ids = dict()\n faces, faces_ids = list(), list()\n\n # Go through directories and find label and path to image\n for root, dirs, files in walk('data/'):\n for file in files:\n if file.endswith('.jpg') or file.endswith('.png'):\n img_path = path.join(root, file)\n label = path.basename(root).replace(' ', '-').lower()\n if label not in label_ids:\n label_ids[label] = current_id\n current_id += 1\n id_ = label_ids[label]\n\n test_img = cv2.imread(img_path)\n test_img = cv2.cvtColor(test_img, cv2.COLOR_BGR2GRAY)\n if test_img is None:\n print('Image not loaded properly')\n continue\n\n faces.append(test_img)\n faces_ids.append(id_)\n\n # Make directory with labels doesn't exist make directory and file with labels\n if not path.exists('labels/'):\n makedirs('labels/')\n with open('labels/face-labels.pickle', 'wb') as file:\n pickle.dump(label_ids, file)\n\n return faces, faces_ids", "def load_label(self, idx):\n im = open('{}/GTTXT/{}.txt'.format(root_dir, idx))\n\t#print(type(im.readlines()[0].rstrip(\"\\n\")))\n rgb_label = [i.rstrip(\"\\n\").split(\" \") for i in im.readlines()]\n\tlabel=[]\t\n\tfor i in rgb_label:\n\t\tlabel+=[int(j) for j in i]\n\tlabel=np.array(label).reshape(720,960)\n\tlabel[label==-1]=12\n\t#print(np.unique(label))\n #label = label[np.newaxis, ...]\n return label", "def ExtractLabel(ImgName):\n # Each img has name notation \"*****a0X*\" where X is PlasticType\n PlasticType = ImgName[7] \n return {\n '1': 0, # PET\n '2': 1, # HDPE\n '4': 2, # LDPE\n '5': 3, # PP\n '6': 4, # PS\n '7': 5, # Other\n }[PlasticType]", "def read_image_with_label(dir, file):\n assert type(file) == str, \"File name is not string.\"\n f = os.path.join(dir, file)\n info = file.split(\"_\")\n try:\n label = [int(info[x]) for x in range(1, 3)]\n except:\n print(\"The format of file name is not correct.\")\n else:\n return Image.open(f), label", "def get_pet_labels(image_dir):\n # Create dictionary\n petlabels_dic = {}\n\n # Retrieve the filenames from folder pet_images/\n # Try to catch exceptions (folder does not exists, etc..)\n try:\n filename_list = listdir(image_dir)\n except:\n print('** Error: unable to list files in \"{}\" folder.'.format(image_dir))\n exit()\n else:\n for idx in range(0,len(filename_list)):\n #if filename_list[idx] not in petlabels_dic: # required? probably not\n # Remove extension from filename\n filename = filename_list[idx].split('.')[0]\n # Create a list of words from filename, removing digits\n filename_labels = list(filter(lambda label: label.isalpha(), filename.split('_')))\n # Create key->value item in dictonary\n petlabels_dic[filename_list[idx]] = [\" \".join(filename_labels).lower()]\n\n # Return dictionary\n return petlabels_dic", "def _read_color_labels(filename):\n line_parser = lambda line: (int(line.split(',')[0]), line.split(',')[-1])\n with open(filename, 'r') as labels:\n label_map = dict([line_parser(line.strip()) for line in labels])\n return label_map", "def extract_labels(filename):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(10000)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def extract_labels(filename, num_images):\n\n# this function definition has been taken from internet \n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64) #Interpret a buffer as a 1-dimensional array\n return labels", "def load_labeled_data():\n\n images = []\n labels = []\n\n for i in range(1, 10):\n path = (\"selflabeled\", str(i), \"*.jpg\")\n filenames = glob.glob(\"/\".join(path))\n images_one_type = [cv2.imread(img) for img in filenames]\n labels_one_type = [i] * len(images_one_type)\n images += images_one_type\n labels += labels_one_type\n\n return images, labels", "def read_images(self, img_name, label_name):\n image_string = tf.read_file(img_name)\n image_decoded = tf.image.decode_jpeg(image_string, channels=3)\n label_string = tf.read_file(label_name)\n label_decoded = tf.image.decode_jpeg(label_string, channels=1)\n return image_decoded, label_decoded", "def load_labels(filename):\n\n file_path = os.path.join(DATA_DIR, filename)\n with open(file_path, 'rb') as f:\n b = f.read()\n\n magic, n_labels = (struct.unpack('>i', b[i*4:(i+1)*4]) for i in range(2))\n\n assert magic[0] == 2049, \"bad magic number, what do?\"\n\n label_stream = array.array('B', b[8:])\n \n assert len(label_stream) == n_labels[0], \"mismatch in label length\"\n \n # label_stream is actually type array.array, which is iterable surely.\n # i'll convert it anyway...\n return tuple(label_stream)", "def _extract_labels(self, filename, one_hot=False):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = self._read32(bytestream)\n if magic != 2049:\n raise ValueError(\n 'Invalid magic number %d in MNIST label file: %s' %\n (magic, filename))\n num_items = self._read32(bytestream)\n buf = bytestream.read(num_items)\n labels = np.frombuffer(buf, dtype=np.uint8)\n if one_hot:\n return self._dense_to_one_hot(labels)\n return labels", "def unpack_labels(self, labels,\n is_box = False):\n unpacked_labels = {}\n count = 0\n for level in range(self.min_level, self.max_level + 1):\n feat_size_y = tf.cast(self.image_size[0] / 2 ** level, tf.int32)\n feat_size_x = tf.cast(self.image_size[1] / 2 ** level, tf.int32)\n steps = feat_size_y * feat_size_x * self.anchors_per_location\n if is_box:\n unpacked_labels[level] = tf.reshape(labels[count:count + steps],\n [-1, 4])\n else:\n unpacked_labels[level] = tf.reshape(labels[count:count + steps],\n [feat_size_y, feat_size_x, -1])\n count += steps\n return unpacked_labels", "def read_label_from_xml(label_path):\n labels = parseXML(label_path)\n label_dic = {}\n for label in labels:\n first_frame = label.firstFrame\n nframes = label.nFrames\n size = label.size\n obj_type = label.objectType\n for index, place, rotate in zip(range(first_frame, first_frame+nframes), label.trans, label.rots):\n if index in label_dic.keys():\n label_dic[index][\"place\"] = np.vstack((label_dic[index][\"place\"], place))\n label_dic[index][\"size\"] = np.vstack((label_dic[index][\"size\"], np.array(size)))\n label_dic[index][\"rotate\"] = np.vstack((label_dic[index][\"rotate\"], rotate))\n else:\n label_dic[index] = {}\n label_dic[index][\"place\"] = place\n label_dic[index][\"rotate\"] = rotate\n label_dic[index][\"size\"] = np.array(size)\n return label_dic, size", "def get_labeled_data(imagefile, labelfile):\n # Open the images with gzip in read binary mode\n images = open(imagefile, 'rb')\n labels = open(labelfile, 'rb')\n\n # Read the binary data\n # We have to get big endian unsigned int. So we need '>I'\n\n # Get metadata for images\n images.read(4) # skip the magic_number\n number_of_images = images.read(4)\n number_of_images = unpack('>I', number_of_images)[0]\n rows = images.read(4)\n rows = unpack('>I', rows)[0]\n cols = images.read(4)\n cols = unpack('>I', cols)[0]\n\n # Get metadata for labels\n labels.read(4) # skip the magic_number\n N = labels.read(4)\n N = unpack('>I', N)[0]\n\n if number_of_images != N:\n raise Exception('number of labels did not match the number of images')\n\n # Get the data\n X = np.zeros((N, rows * cols), dtype=np.uint8) # Initialize numpy array\n y = np.zeros(N, dtype=np.uint8) # Initialize numpy array\n for i in range(N):\n for id in range(rows * cols):\n tmp_pixel = images.read(1) # Just a single byte\n tmp_pixel = unpack('>B', tmp_pixel)[0]\n X[i][id] = tmp_pixel\n tmp_label = labels.read(1)\n y[i] = unpack('>B', tmp_label)[0]\n return (X, y)", "def get_labeled_data(imagefile, labelfile):\n # Open the images with gzip in read binary mode\n images = open(imagefile, 'rb')\n labels = open(labelfile, 'rb')\n\n # Read the binary data\n # We have to get big endian unsigned int. So we need '>I'\n\n # Get metadata for images\n images.read(4) # skip the magic_number\n number_of_images = images.read(4)\n number_of_images = unpack('>I', number_of_images)[0]\n rows = images.read(4)\n rows = unpack('>I', rows)[0]\n cols = images.read(4)\n cols = unpack('>I', cols)[0]\n\n # Get metadata for labels\n labels.read(4) # skip the magic_number\n N = labels.read(4)\n N = unpack('>I', N)[0]\n\n if number_of_images != N:\n raise Exception('number of labels did not match the number of images')\n\n # Get the data\n X = np.zeros((N, rows * cols), dtype=np.uint8) # Initialize numpy array\n y = np.zeros(N, dtype=np.uint8) # Initialize numpy array\n for i in range(N):\n for id in range(rows * cols):\n tmp_pixel = images.read(1) # Just a single byte\n tmp_pixel = unpack('>B', tmp_pixel)[0]\n X[i][id] = tmp_pixel\n tmp_label = labels.read(1)\n y[i] = unpack('>B', tmp_label)[0]\n return (X, y)", "def load_labels(label_path):\r\n\r\n with open(label_path, \"r\") as f:\r\n\r\n lines = f.readlines()\r\n \r\n label = {}\r\n index = []\r\n for i, line in enumerate(lines):\r\n sp = line.split()\r\n label[sp[0]] = [int(sp[1]),int(sp[2]),int(sp[3])]\r\n index.append([int(sp[3]),int(sp[2]),int(sp[1])])\r\n\r\n return label, index", "def read_labeled_image_list(image_list_file):\n f = open(image_list_file, 'r')\n filenames = []\n labels = []\n for line in f:\n filename, label = line[:-1].split(' ')\n filenames.append(filename)\n labels.append(int(label))\n return filenames, labels", "def prep_image_data(arg_dict):\n cat_df = pd.read_csv(arg_dict['category_file'],\n skiprows=1,\n sep='\\s+')\n bbox_df = pd.read_csv(arg_dict['bbox_file'],\n skiprows=1,\n sep='\\s+')\n img_dir = arg_dict['image_dir']\n\n combo_df = pd.merge(cat_df, bbox_df, how='outer', on='image_name')\n combo_df['image_name'] = combo_df['image_name'].apply(\n lambda x: x[len('img'):-len('.jpg')])\n labels = Labels(combo_df, img_dir, n_images_loaded=-1)\n labels.set_data_target('raw_image', chunksize=3000)\n return labels", "def image_classes():\n\n image_data_path = PROJECT_ROOT + \"/data/CUB_200_2011/\"\n\n # <class_id> <class_name>\n classes = open(image_data_path + \"classes.txt\").readlines()\n classes = [i.strip().split() for i in classes]\n\n # <image_id> <class_id>\n labels = open(image_data_path + \"image_class_labels.txt\").readlines()\n labels = [i.strip().split() for i in labels]\n\n class_ids = {}\n for i in classes:\n class_ids[i[1]] = int(i[0])\n\n label_ids = {}\n for i in labels:\n label_ids[int(i[0])] = int(i[1])\n\n return class_ids, label_ids", "def read_label_data(mode, image_type):\n return np.loadtxt(parse_path(mode, image_type, True), dtype=int, delimiter='\\n')", "def get_label(img_path):\n img_name = img_path.stem\n label_name = img_name + \".txt\"\n label_path = img_path.parent / label_name\n with open(label_path) as f:\n label = json.load(f)\n return label", "def getList(self):\n labelMap = {}\n imageMap = {}\n key = []\n index = 0\n\n for root, dirs, files in os.walk(self.path_data):\n for file in files:\n # If .png or .jpg file found then\n if file.endswith(tuple(config.imageFormat)):\n key.append(index)\n labelMap[index] = preprocessing.getLabel(file)\n imageMap[index] = os.path.join(root, file)\n\n index += 1\n\n else:\n continue\n\n return key, imageMap, labelMap", "def extract_labels(filename, num_images, starting_id, context_factor):\n gt_imgs = []\n for i in range(starting_id, num_images+starting_id):\n imageid = \"satImage_%.3d\" % i\n image_filename = filename + imageid + \".png\"\n if os.path.isfile(image_filename):\n print ('Loading ' + image_filename)\n img = mpimg.imread(image_filename)\n gt_imgs.append(img)\n else:\n print ('File ' + image_filename + ' does not exist')\n\n num_images = len(gt_imgs)\n # it means that we base our labels only on the core of the patch, not including the contet added\n context_factor = 0\n gt_patches = [img_crop_context(gt_imgs[i], IMG_PATCH_SIZE, IMG_PATCH_SIZE,context_factor) for i in range(num_images)]\n data = np.asarray([gt_patches[i][j] for i in range(len(gt_patches)) for j in range(len(gt_patches[i]))])\n labels = np.asarray([value_to_class(np.mean(data[i])) for i in range(len(data))])\n\n # Convert to dense 1-hot representation.\n return labels.astype(np.float32)", "def __read_img_file(filename, label):\n image = cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB)\n height, width, _ = image.shape\n image = cv2.resize(image, (img_size, img_size))\n # A label is consist of [y1, x1, y2, x2, class_idx]\n label = np.reshape(label, (-1, 5))\n rel_bboxes = label[..., 0:4] / np.array([height, width, height, width], np.float32)\n label = np.concatenate([rel_bboxes, np.expand_dims(label[..., -1], 1)], axis=-1)\n return image, label", "def load_pixel_labels(pixel_labels_dir, photo_id):\n\n pixel_labels_path = os.path.join(pixel_labels_dir, '%s.npy' % photo_id)\n if not os.path.exists(pixel_labels_path):\n raise ValueError('Could not find ground truth labels at \"%s\"' % pixel_labels_path)\n\n return np.load(pixel_labels_path)", "def load_pixel_labels(pixel_labels_dir, photo_id):\n\n pixel_labels_path = os.path.join(pixel_labels_dir, '%s.npy' % photo_id)\n if not os.path.exists(pixel_labels_path):\n raise ValueError('Could not find ground truth labels at \"%s\"' % pixel_labels_path)\n\n return np.load(pixel_labels_path)", "def load_labels(path):\n with open(path, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n labels = {}\n for row_number, content in enumerate(lines):\n pair = re.split(r'[:\\s]+', content.strip(), maxsplit=1)\n if len(pair) == 2 and pair[0].strip().isdigit():\n labels[int(pair[0])] = pair[1].strip()\n else:\n labels[row_number] = pair[0].strip()\n return labels", "def read_labeled_image_list(image_list_file):\n\tf = open(image_list_file, 'r')\n\tfilenames = []\n\tlabels = []\n\tfor line in f:\n\t\tline = line.rstrip('\\n')\n\n\t\tfilename, _, label = line.partition(LABEL_SEP)#line[:-1].split(LABEL_SEP)\n\t\tfilenames.append(filename)\n\t\tlabels.append(int(label))\n\t\t#print (filename+LABEL_SEP+\":) \"+label)\n\treturn filenames, labels", "def __init__(self, path, type = 'mrk') :\n stim = np.loadtxt(path, skiprows = 1, usecols = (0,1), dtype = np.dtype(int))\n labels = np.loadtxt(path, skiprows = 1, usecols = 2, dtype = np.dtype(str))\n\n self.dic = dict.fromkeys(labels)\n for key, _ in self.dic.items() : self.dic[key] = []\n for k in range(len(stim)) :\n self.dic[labels[k]].append(stim[k, :])\n return None", "def detect_labels(path):\n client = vision.ImageAnnotatorClient()\n\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.types.Image(content=content)\n\n response = client.label_detection(image=image)\n labels = response.label_annotations\n #print('Labels:')\n\n #for label in labels:\n # print(label.description)\n return labels", "def extract_labels(nlabels,filename, one_hot=False):\n print('Extracting', filename,'bbbccicicicicib')\n\n labels=numpy.loadtxt(filename,dtype='int64')\n \n if one_hot:\n print(\"LABELS ONE HOT\")\n print(labels.shape)\n XXX=dense_to_one_hot(labels,nlabels)\n print(XXX.shape)\n return dense_to_one_hot(labels,nlabels)\n print(\"LABELS\")\n print(labels.shape)\n return labels", "def read_rich_labels(path):\n\tlocation_dict = {}\n\twith open(os.path.join(path,'rich_labels.txt')) as f:\n\t\tcontent = f.readlines()\n\tfor line in content:\n\t\tlinecontent = line.split()\n\n\t\t# make sure each line is structured as follows:<image name> <latitude> <longitude>\n\t\tassert len(linecontent) >= 3, \"Unexpectedly short line in rich_labels.txt: \" + line\n\t\tif len(linecontent) > 3: \n\t\t\twarnings.warn('Unexpected line in rich_labels.txt: ' + line + \n\t\t\t \t\t\t '\\n Using first three words: ' + str(linecontent), stacklevel=0)\n\t\ttry:\n\t\t\tlocation_dict[linecontent[0]] = (float(linecontent[1]),float(linecontent[2]))\n\n\t\t\t# make sure you have latitude and longitude coordinates are not flipped\n\t\t\t# assuming that images are from North America\n\t\t\tassert float(linecontent[1]) <= float(linecontent[2])\n\n\t\texcept ValueError as e:\n\t\t\twarnings.warn(\"Unexpected lat/long in rich_labels.txt: \" + \n\t\t\t\t\t\t str(linecontent[1:3]), stacklevel=0)\n\treturn location_dict", "def load_data(self):\n sets = ['train', 'val']\n images = []\n labels = []\n self.labels_dic = {}\n file = open(self.path + 'wnids.txt')\n train_labels = file.read().split()\n if self.train:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n for i in os.listdir(self.path + 'train/' + f + '/images/'):\n images.append(Image.open(self.path + 'train/' + f + '/images/' + i))\n labels.append(f)\n #image label n link to folder names of TinyImageNet\n self.labels_dic[f] = fn\n\n else:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n self.labels_dic[f] = fn\n file_val = open(self.path + 'val/val_annotations.txt')\n val_labels = file_val.read().split('\\n')\n for im in val_labels:\n im_data = im.split(\"\t\")[:2]\n if len(im_data) < 2:\n continue\n if im_data[1] in self.labels_dic:\n images.append(Image.open(self.path + 'val/images/' + im_data[0]))\n labels.append(im_data[1])\n\n self.images = images\n self.labels = labels", "def file_reader(image_file, label_file):\n\n image = im.imread(image_file)\n\n with open(label_file, \"r\") as file:\n label = float(file.read())\n\n return image, label", "def _pickle_load(filename):\n with open(filename, 'rb') as f:\n save = pickle.load(f)\n image = save['image'].astype(np.float32)\n label = np.float32(save['label'])\n label = reformat_labels(label)\n return image, label", "def load_labels(path, encoding='utf-8'):\r\n with open(path, 'r', encoding=encoding) as f:\r\n lines = f.readlines()\r\n if not lines:\r\n return {}\r\n\r\n if lines[0].split(' ', maxsplit=1)[0].isdigit():\r\n pairs = [line.split(' ', maxsplit=1) for line in lines]\r\n return {int(index): label.strip() for index, label in pairs}\r\n else:\r\n return {index: line.strip() for index, line in enumerate(lines)}", "def load_labels():\n filename = os.path.join(config['inference']['model_dir'], 'output_labels.txt')\n global labels\n labels = [line.rstrip() for line in tf.gfile.FastGFile(filename)]", "def load_label(path: str) -> dict:\n if not os.path.exists(path):\n print(f\"Warning, try to load non-exist label {path}\")\n return None\n return np.load(path, allow_pickle=True).tolist()", "def read_label_file(file_path):\n with open(file_path, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n ret = {}\n for row_number, content in enumerate(lines):\n pair = re.split(r'[:\\s]+', content.strip(), maxsplit=1)\n if len(pair) == 2 and pair[0].strip().isdigit():\n ret[int(pair[0])] = pair[1].strip()\n else:\n ret[row_number] = pair[0].strip()\n return ret", "def extract_labels(filename,tag,one_hot):\n print('Extracting labels',filename)\n return extractdb_labels(filename,tag,one_hot=one_hot)", "def detect_labels(path):\n client = vision.ImageAnnotatorClient()\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n image = vision.types.Image(content=content)\n response = client.label_detection(image=image)\n labels = response.label_annotations\n print('Labels:')\n return response", "def _read_labels(test_data=False):\n if not test_data:\n filename = os.path.join(FOLDER_PATH, 'train-labels.idx1-ubyte')\n else:\n filename = os.path.join(FOLDER_PATH, 't10k-labels.idx1-ubyte')\n if not os.path.exists(filename):\n raise ValueError('The file dose not exist.')\n \n # Create a queue that produces the filenames to read.\n filename_queue = tf.train.string_input_producer([filename])\n \n # The first 8 bytes contain file information:\n # [offset] [type] [value] [description]\n # 0000 32 bit integer 0x00000801(2049) magic number\n # 0004 32 bit integer 60000/10000 number of items \n # ...(label value)\n header_bytes = 8\n # Every record consists of a label, with a fixed number of bytes for each.\n record_bytes = 1\n \n # Create a FixedLengthRecordReader to read record.\n reader = tf.FixedLengthRecordReader(record_bytes=record_bytes,\n header_bytes=header_bytes)\n _, value = reader.read(filename_queue)\n\n # Convert from a string to a vector of uint8, then cast to int32.\n record = tf.cast(tf.decode_raw(value, tf.uint8), tf.int32)\n \n # Reshape from [1] to a scalar shape [].\n label = tf.reshape(record, [])\n\n return label", "def load_data(fname):\n pathname = \"data/\" + fname\n data = pickle.load(open(pathname, 'rb'), encoding='latin1')\n images = np.array([img[:-1] for img in data])\n ys = [int(img[-1]) for img in data]\n length = len(ys)\n labels = np.zeros((length, 10))\n\n for i in range(length):\n labels[i, ys[i]] = 1\n\n return images, labels", "def get_labels(label_file):\n labels = None\n with open(label_file, 'r') as infile:\n reader = csv.reader(infile)\n labels = dict((rows[0], rows[1]) for rows in reader)\n return labels", "def get_img_labels(task, nb_img=None):\n # Read the csv file matching the ids of the images with the classes\n labels = OrderedDict()\n\n with open('data/' + ('id_train' if task == 'training' else 'sample_submission4') + '.csv', 'rb') as csvfile:\n rows = reader(csvfile, delimiter=',')\n rows.next() # Skip the header\n for row in rows:\n if nb_img is not None and len(labels) >= nb_img:\n break\n labels[row[0]] = int(row[1]) # Integer conversion of the labels\n\n return labels", "def get_label_vectors():\n print(\"Retrieving label vectors...\")\n label_dict = {} # instantiate dict for labels:vectors\n categories = sorted([c for c in os.listdir('images/') if c[0] != '.']) # ignore hidden files\n x = np.zeros(len(categories)) # zero vector of number of categories\n for i, c in enumerate(categories): # get index and category for images\n y = x.copy() # use copy of x\n y[i] = 1 # set label index to true\n label_dict[c] = y.copy() # create label:vector\n\n return label_dict", "def extract_labels(filename):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = _read32(bytestream)\n if magic != 2049:\n raise ValueError(\n 'Invalid magic number %d in MNIST label file: %s' %\n (magic, filename))\n num_items = _read32(bytestream)\n buf = bytestream.read(num_items)\n labels = np.frombuffer(buf, dtype=np.uint8)\n return dense_to_one_hot(labels)", "def _load_images_and_labels(image_dir):\n\n print('Extracting images from: ', image_dir)\n\n image_paths = _load_image_paths(image_dir)\n images = _extract_images(image_paths)\n num_images = len(image_paths)\n labels = np.ones(num_images, dtype=np.int64)\n\n return images, labels", "def load_leaf():\n # List of image file names\n dataset_directory = os.path.join(root_directory,'Leaf_2')\n filenames = os.listdir(dataset_directory)\n filenames.sort()\n\n # List of bitmap Shapes; each row is a Image of the dataset\n data = []\n\n # Numpy array of labels associated to each class of image\n target = np.empty([len(filenames), ])\n\n previous_label = ''\n class_num = -1\n index = 0\n\n for index, filename in enumerate(filenames):\n data.append(Bitmap(io.imread(os.path.join(dataset_directory, filename))))\n file_label = filename.split('-')[0]\n\n if(previous_label != file_label):\n previous_label = file_label\n class_num += 1\n target[index] = class_num\n else:\n target[index] = class_num\n\n return {'bitmaps': data, 'targets': target}", "def _parse_raw_labels(self, lines):\r\n images = []\r\n labels = []\r\n idx = 0\r\n while idx < len(lines):\r\n image_path = lines[idx].strip()\r\n images.append(self._real_image_path(image_path))\r\n idx += 1\r\n\r\n num = int(lines[idx])\r\n idx += 1\r\n\r\n labels_ = []\r\n for _ in range(num):\r\n x1, y1, w, h, blur, expression, illumination, invalid, \\\r\n occlusion, pose = [int(v) \r\n for v in lines[idx].strip().split()]\r\n x2, y2 = x1 + w - 1, y1 + h - 1 # -1 to get the read x2, y2\r\n\r\n labels_.append([x1, y1, x2, y2])\r\n idx += 1\r\n \r\n labels.append(np.array(labels_))\r\n\r\n self._data_map[self._real_image_path(image_path)] = np.array(labels_)\r\n return np.array(images), np.array(labels)", "def extract_labels(filename, one_hot=False):\n\tprint('Extracting', filename)\n\twith gzip.open(filename) as bytestream:\n\t\tmagic = _read32(bytestream)\n\t\tif magic != 2049:\n\t\t\traise ValueError('Invalid magic number %d in MNIST label file: %s' %(magic, filename))\n\t\tnum_items = _read32(bytestream)\n\t\tbuf = bytestream.read(num_items)\n\t\tlabels = numpy.frombuffer(buf, dtype=numpy.uint8)\n\t\tif one_hot:\n\t\t\treturn dense_to_one_hot(labels)\n\t\treturn labels", "def label_mapping(filename):\n\n\t\n\n\n\twith open(filename, 'r') as infile:\n\t\treader = csv.reader(infile)\n\t\tnext(reader, None) # ignore first line since they're column labels\n\n\t\t#filename, artist, title, style, genre, date\n\t\tfor line in reader:\n\t\t\timg = line[0]\n\t\t\tartist = line[1]\n\t\t\tstyle = line[3]\n\t\t\tgenre = line[4]\n\t\t\tdate = re.findall(r'\\d+', line[5]) #parse any unwanted stuff\n\n\t\t\t#img and artist fields always present, no need to check\n\t\t\tartist_labels[img] = artist\n\n\n\t\t\tif style != '' and style in style_check:\n\t\t\t\t#if sum(x == style for x in style_labels.values()) < max_examples: # avoid imbalance\n\t\t\t\tstyle_labels[img] = style\n\n\n\t\t\tif genre != '' and genre in genre_check:\n\t\t\t\t#if sum(x == genre for x in genre_labels.values()) < max_examples:\n\t\t\t\tgenre_labels[img] = genre\n\n\n\t\t\tif len(date) > 0:\n\t\t\t\tbucket_len = 10 #buckets of 10 years\n\t\t\t\tbucket = (int(date[0]) // bucket_len) * bucket_len \n\t\t\t\tperiod = str(bucket) + '-' + str(bucket + (bucket_len - 1))\n\n\t\t\t\tif period in date_check:\n\t\t\t\t\t#if sum(x == period for x in date_labels.values()) <= max_examples:\n\t\t\t\t\tdate_labels[img] = period #parsed_date", "def mapping_image_to_label (self, labels_df, polygons, fpath_tiff): \n \n unread_tiff = rasterio.open(fpath_tiff)\n\n #Projecting the coordinates to that CRS \n proj = Proj(init='epsg:32618')\n data = []\n labels = []\n failed = []\n \n src = rasterio.open(fpath_tiff, 'r')\n outfolder = '/train/batch'\n \n print (\"Hold on tight! Mapping each image to its respective label...\")\n \n \n for num, row in labels_df.iterrows():\n try:\n \n \n roof_material_num = 0\n polygon0 = polygons [num]\n polygon0['coordinates'] = self.transforming_coordinates(polygon0['coordinates'], proj)\n masked_image, out_transform = rasterio.mask.mask(src,[polygon0], filled = True, crop=True, nodata = 0)\n img_image = reshape_as_image (masked_image)\n \n #Defining the name of the image file as \"buildingID+roofMaterial+png\" and its path \n img_path = os.path.join (outfolder, str (row['id'])+'-'+ str (row['roof_material'])+'.png')\n \n #swapping the color channels from RGB2BGR\n img_image = cv2.cvtColor (img_image, cv2.COLOR_RGB2BGR) #img_image is a numpy array\n \n #resizing the image dimensions to 128x128 to match ImageNet dimensions\n img_image = cv2.resize(img_image, (128, 128))\n \n #writing the image in the file\n #cv2.imwrite (img_path, img_image)\n # update the data and labels lists, respectively\n data.append(img_image) #data is a list\n labels.append(row['roof_material'])\n \n except Exception as e:\n print (e)\n failed.append (num)\n \n \n #print number of images we failed to crop and write \n print (\"Bad News First: Failed to write\", len(failed), \"Images.\")\n print (\"Good News: Successfully mapped\", len (data), \"Images.\")\n data = np.array(data)\n labels = np.array(labels)\n #batch = data.sample(frac=0.5, replace=False, random_state=1)\n #print(\"Size and shape of validY: {}\\n\".format(batch.shape))\n return data, labels", "def load_letter(folder,label,image_size=28,sample_num=-1):\n\n image_files = os.listdir(folder)\n dataset = np.ndarray(shape=(len(image_files), image_size, image_size),\n dtype=image_data_type)\n num_images = 0\n if sample_num == -1:\n sample_num = len(image_files)\n for image in image_files:\n image_file = os.path.join(folder, image)\n try:\n image_data = ndimage.imread(image_file).astype(image_data_type)\n if image_data.shape != (image_size, image_size):\n raise Exception('Unexpected image shape: %s' % str(image_data.shape))\n dataset[num_images, :, :] = image_data\n num_images = num_images + 1\n if num_images >= sample_num:\n break\n except IOError as e:\n print('Could not read:', image_file, ':', e, '- it\\'s ok, skipping.')\n\n dataset = dataset[0:num_images, :, :]\n data_label = np.ndarray(shape=(num_images), dtype=np.int8)\n data_label.fill(label)\n return dataset,data_label", "def extract_labels(filename, one_hot=False):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = _read32(bytestream)\n if magic != 2049:\n raise ValueError(\n 'Invalid magic number %d in MNIST label file: %s' %\n (magic, filename))\n num_items = _read32(bytestream)[0]\n #print('check', magic, num_items)\n buf = bytestream.read(num_items)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8)\n if one_hot:\n return dense_to_one_hot(labels)\n return labels", "def load_tiny_imagenet(directory):\n path_train, path_val, path_test = directory + '/train', directory + '/val', directory + '/test'\n labels = os.listdir(path_train)\n train_data = []\n train_labels = []\n for label in labels:\n imgs_path = os.path.join(path_train, label, 'images')\n imgs = os.listdir(imgs_path)\n for img_name in imgs:\n img_path = os.path.join(imgs_path, img_name)\n img = cv2.imread(img_path)\n b, g, r = cv2.split(img)\n img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3)\n train_data.append(img)\n train_labels.append(label)\n train_data = np.concatenate(train_data)\n train_labels = np.array(train_labels, dtype='str')\n \n test_data = []\n test_labels = []\n with open(path_val+'/val_annotations.txt', 'r') as f:\n val_annotations = [line.strip().split('\\t') for line in f]\n val_annotations = np.array(val_annotations)\n imgs_path = os.path.join(path_val, 'images')\n imgs = os.listdir(imgs_path)\n for img_name in imgs:\n img_path = os.path.join(imgs_path, img_name)\n img = cv2.imread(img_path)\n b, g, r = cv2.split(img)\n img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3)\n test_data.append(img)\n label = val_annotations[val_annotations[:, 0] == img_name, 1].astype('U9')\n test_labels.append(label)\n test_data = np.concatenate(test_data)\n test_labels = np.concatenate(test_labels)\n test_labels = np.array(test_labels, dtype='str')\n \n _, train_labels = np.unique(train_labels, return_inverse=True)\n _, test_labels = np.unique(test_labels, return_inverse=True)\n \n del r, g, b, label, labels, imgs_path, img_name, img, imgs, val_annotations\n \n return train_data, train_labels, test_data, test_labels", "def load_matrix(self, src_dir, key_word=\"funneled\"):\r\n X = []\r\n Y = []\r\n label = 0\r\n for root, dirs, files in os.walk(src_dir):\r\n if files != []:\r\n for file in files:\r\n if key_word in file:\r\n img = cv2.imread(os.path.join(root, file), cv2.IMREAD_GRAYSCALE)\r\n min_value = np.min(img)\r\n max_value = np.max(img)\r\n X.append((img.flatten() - min_value)/(max_value - min_value)) # Normalize the data to [0, 1]\r\n Y.append(label)\r\n label +=1\r\n \r\n return dict(X = np.asarray(X), \r\n Y = np.asarray(Y))", "def createDictionaryFromFile(inputfile):\n logger.info('loading file: %s' % inputfile)\n dic = {}\n with open(inputfile) as fin:\n for n, line in enumerate(fin, start=1):\n arr = line.strip().split()\n path = arr[0]\n\n labels = []\n for label in arr[1:]:\n labels.append(ast.literal_eval(label))\n\n cpath = path.split('/')\n id_img = int(cpath[-1].replace('.jpg', ''))\n size_img = cpath[-2]\n activity = cpath[-3]\n id_data = int((cpath[-4])[-1])\n home = '/'.join(cpath[:-4])\n\n if dic.has_key(id_data):\n if dic[id_data].has_key(activity):\n if dic[id_data][activity].has_key(size_img):\n dic[id_data][activity][size_img][id_img] = labels\n else:\n dic[id_data][activity][size_img] = {id_img: labels}\n else:\n dic[id_data][activity] = {size_img: {id_img: labels}}\n else:\n dic[id_data] = {activity: {size_img: {id_img: labels}}}\n return n, home, dic", "def main():\n labels, data = load_image_data()\n print(labels.shape, data.shape)", "def hume_matfile_loader(matfile_path):\n mat_struct = loadmat(matfile_path)\n\n # build a list of keys and values for each entry in the structure\n vals = mat_struct['stageData'][0, 0] # <-- set the array you want to access.\n keys = mat_struct['stageData'][0, 0].dtype.descr\n\n # Assemble the keys and values into variables with the same name as that used in MATLAB\n mat_dict = {}\n for i in range(len(keys)):\n key = keys[i][0]\n if len(vals[key].shape) > 1 and vals[key].shape[0] > vals[key].shape[1]:\n vals[key] = vals[key].T\n if len(vals[key][0]) > 1:\n val = np.squeeze(vals[key][0])\n else:\n val = np.squeeze(vals[key][0][0]) # squeeze is used to covert matlat (1,n) arrays into numpy (1,) arrays.\n mat_dict[key] = val\n\n return mat_dict", "def get_output(path, label_file = None):\n img_id = path.split('/')[-1]\n labels = label_file.loc[img_id].values\n return labels", "def load_labels(path):\n with open(path, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n labels = []\n for row_number, content in enumerate(lines):\n pair = re.split(r'[:\\s]+', content.strip(), maxsplit=1)\n #if len(pair) == 2 and pair[0].strip().isdigit():\n labels.append(np.array([int(pair[0].strip()),pair[1].strip()]))\n #else:\n # labels.append(pair[0].strip())\n return np.array(labels)", "def read_dataset(data_txt_file, image_data_path):\n data = {}\n data['image'] = []\n data['label'] = []\n\n indexFile = open(data_txt_file, 'r')\n for sample in indexFile:\n sample = sample.split(',')\n\n _id = sample[0]\n label = int(sample[1])\n imageData = io.imread(image_data_path+_id+'.jpg')\n\n data['label'].append(label)\n data['image'].append(imageData)\n\n data['image'] = np.array(data['image'])\n data['label'] = np.array(data['label'])\n\n return data", "def read_data(case_dir):\n dict_images = dict()\n list_files = ['MR_512.nii.gz', 'landmarks_512.csv', ]\n # In fact, there is no Mask during inference, so we cannot load it.\n\n for file_name in list_files:\n file_path = case_dir + '/' + file_name\n assert os.path.exists(file_path), case_dir + ' does not exist!'\n\n if file_name.split('.')[-1] == 'csv':\n landmarks = pd.read_csv(file_path)\n dict_images['list_landmarks'] = landmark_extractor(landmarks)\n elif file_name.split('.')[0].split('_')[0] == 'MR':\n dict_images['MR'] = sitk.ReadImage(file_path, sitk.sitkFloat32)\n dict_images['MR'] = sitk.GetArrayFromImage(dict_images['MR'])[np.newaxis, :, :, :]\n elif file_name.split('.')[0].split('_')[0] == 'Mask':\n dict_images['Mask'] = sitk.ReadImage(file_path, sitk.sitkInt16)\n dict_images['Mask'] = sitk.GetArrayFromImage(dict_images['Mask'])[np.newaxis, :, :, :]\n\n return dict_images", "def get_label_dict(self):\n with open(self.labels_file_path, mode='r') as f:\n label_file = f.read()\n\n inverse_label_dict = json.loads(label_file)\n label_dict = {int(value): key for key,\n value in inverse_label_dict.items()}\n return label_dict", "def get_labels(self):\n\n print 'Loading label data from', self.label_file, '...'\n labels = {}\n with open(self.label_file, 'rb') as f:\n f.next() # skip header line\n for line in f:\n index, answer = line.rstrip('\\n').split(',')\n labels[index] = answer\n\n return labels", "def load_mnist(dataset=\"training\", digits=np.arange(10), path=\".\"):\n\n if dataset == \"training\":\n fname_img = os.path.join(path, 'train-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 'train-labels-idx1-ubyte')\n elif dataset == \"testing\":\n fname_img = os.path.join(path, 't10k-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 't10k-labels-idx1-ubyte')\n else:\n raise ValueError(\"dataset must be 'testing' or 'training'\")\n\n flbl = open(fname_lbl, 'rb')\n magic_nr, size = struct.unpack(\">II\", flbl.read(8))\n lbl = pyarray(\"b\", flbl.read())\n flbl.close()\n\n fimg = open(fname_img, 'rb')\n magic_nr, size, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n img = pyarray(\"B\", fimg.read())\n fimg.close()\n\n ind = [ k for k in range(size) if lbl[k] in digits ]\n N = len(ind)\n\n images = zeros((N, rows, cols), dtype=uint8)\n labels = zeros((N, 1), dtype=int8)\n for i in range(len(ind)):\n images[i] = array(img[ ind[i]*rows*cols : (ind[i]+1)*rows*cols ]).reshape((rows, cols))\n labels[i] = lbl[ind[i]]\n\n return images, labels", "def build_features_dict(image, image_id, filename, image_format=None,\n bboxes=None, masks=None, label_ids=None,\n label_names=None, masks_format=\"png\"):\n\n # Add channel dimension if needed.\n if len(image.shape) == 3:\n pass\n elif len(image.shape) == 2:\n image = np.expand_dims(image, -1)\n else:\n raise Exception(f\"Wrong image shape: {image.shape}\")\n\n # Get image shape.\n image_width, image_height, image_channel = image.shape\n\n # Encode image.\n image_encoded = imaging.encode_image(image, image_format)\n\n # Create te feature dict.\n feature_dict = {}\n\n # Image features\n feature_dict['image_height'] = int64_feature(image_height)\n feature_dict['image_width'] = int64_feature(image_width)\n feature_dict['image_channel'] = int64_feature(image_channel)\n feature_dict['image_filename'] = bytes_feature(filename.encode('utf8'))\n feature_dict['image_id'] = bytes_feature(str(image_id).encode('utf8'))\n feature_dict['image_encoded'] = bytes_feature(image_encoded.numpy())\n feature_dict['image_format'] = bytes_feature(image_format.encode('utf8'))\n\n # Object features\n if bboxes is not None:\n if bboxes.shape[0] > 0:\n bboxes_x = bboxes[:, 0]\n bboxes_y = bboxes[:, 1]\n bboxes_width = bboxes[:, 2]\n bboxes_height = bboxes[:, 3]\n else:\n bboxes_x = []\n bboxes_y = []\n bboxes_width = []\n bboxes_height = []\n\n feature_dict['bboxes_x'] = float_list_feature(bboxes_x)\n feature_dict['bboxes_y'] = float_list_feature(bboxes_y)\n feature_dict['bboxes_width'] = float_list_feature(bboxes_width)\n feature_dict['bboxes_height'] = float_list_feature(bboxes_height)\n\n if label_ids is not None:\n feature_dict['label_ids'] = int64_list_feature(label_ids)\n\n if label_names is not None:\n feature_dict['label_names'] = bytes_list_feature(label_names)\n\n if masks is not None:\n # Encode masks.\n masks_encoded = []\n for mask in masks:\n mask = image = np.expand_dims(mask, -1)\n mask_encoded = imaging.encode_image(mask, masks_format)\n masks_encoded.append(mask_encoded.numpy())\n\n feature_dict['masks_encoded'] = bytes_list_feature(masks_encoded)\n feature_dict['masks_format'] = bytes_feature(masks_format.encode(\"utf8\"))\n\n return feature_dict", "def label_visualize(img_dir):\n img = scipy.misc.imread(img_dir).astype(np.uint8)\n yo = np.nonzero(img == 1)\n visual = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n\n for i in range(0, 34):\n index = np.nonzero(img == i)\n visual[index + (0,)] = labels[i][0]\n visual[index + (1,)] = labels[i][1]\n visual[index + (2,)] = labels[i][2]\n\n scipy.misc.imsave('./' + img_dir.split('/')[-1], visual)", "def get_pet_labels(image_dir):\n results_dic = dict()\n \n# # Retrieves the file names from the folder specified as 'image_dir' \n filenames_list = listdir(image_dir)\n \n# # Processes the filenames to create the pet image labels\n# # Retrieves the filenames from folder pet_images/\n for i in range (0, len(filenames_list), 1):\n# # Skips file if starts with . (like .DS_Store of Mac OSX) because it \n# # isn't an pet image file\n if filenames_list[i][0] != \".\":\n# # Reads respectively indexed element from filenames_list into temporary string variable 'pet_image' \n pet_image = filenames_list[i]\n# # Sets all characters in 'pet_image' to lower case \n pet_image_lower = pet_image.lower()\n# # Creates list called 'pet_image_word_list' that contains every element in pet_image_lower seperated by '_'\n pet_image_word_list = pet_image_lower.split(\"_\")\n# # Creates temporary variable 'pet_label' to hold pet label name extracted starting as empty string\n pet_image_alpha = \"\"\n# # Iterates through every word in 'pet_image_word_list' and appends word to 'pet_label_alpha' only if word consists \n# # purely of alphabetic characters \n for word in pet_image_word_list:\n if word.isalpha():\n pet_image_alpha += word + \" \"\n# # Removes possible leading or trailing whitespace characters from 'pet_pet_image_alpha' and add stores final label as 'pet_label' \n pet_label = pet_image_alpha.strip()\n\n# # Adds the original filename as 'key' and the created pet_label as 'value' to the 'results_dic' dictionary if 'key' does \n# # not yet exist in 'results_dic', otherwise print Warning message \n if filenames_list[i] not in results_dic:\n results_dic[filenames_list[i]] = [pet_label]\n else:\n print(\"** Warning: Key = \", filenames_list[i], \" already in 'results_dic' with value = \", results_dic[filenames_list[i]])\n \n# # Iterates through the 'results_dic' dictionary and prints its keys and their associated values\n print(\"\\nPrinting: All 'key' - 'value' pairs in dictionary results_dic: \")\n for key in results_dic:\n print(\"Filename = \", key, \" Pet Label = \", results_dic[key])\n \n# # Returns results_dic\n return results_dic", "def decode_labels(mask, num_images=1, num_classes=21, task='seg'):\n n, h, w, c = mask.shape\n assert (n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (n, num_images)\n outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8)\n for i in range(num_images):\n if task == 'normal':\n outputs[i] = mask[i]\n elif task == 'seg':\n img = Image.new('RGB', (w, h), (255, 255, 255)) # unlabeled part is white (255, 255, 255)\n pixels = img.load()\n for j_, j in enumerate(mask[i, :, :, 0]):\n for k_, k in enumerate(j):\n if k < num_classes:\n pixels[k_, j_] = label_colours[k]\n outputs[i] = np.array(img)\n else:\n raise Exception('task name is not recognized!')\n\n return outputs", "def load_imgsLabels(self, image_paths):\n \n# label = image_paths[-1]\n \n images = self.load_images(image_paths)\n \n images = self.resize_images(images)\n \n images_list = self.greyscale_images(images)\n\n return images_list", "def load_imagenet(directory):\n path_train, path_val = directory + '/ILSVRC2012_img_train', directory + '/ILSVRC2012_img_val'\n train_labels = os.listdir(path_train)\n train_data = []\n for label in train_labels:\n imgs_path = os.path.join(path_train, label)\n imgs = os.listdir(imgs_path)\n for img_name in imgs:\n img_path = os.path.join(imgs_path, img_name)\n img = cv2.imread(img_path)\n b, g, r = cv2.split(img)\n img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3)\n train_data.append(img)\n train_labels.append(label)\n train_data = np.concatenate(train_data)\n train_labels = np.array(train_labels, dtype='str')\n \n test_labels = os.listdir(path_val)\n test_data = []\n for label in test_labels:\n imgs_path = os.path.join(path_val, label)\n for img_name in imgs:\n img_path = os.path.join(imgs_path, img_name)\n img = cv2.imread(img_path)\n b, g, r = cv2.split(img)\n img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3)\n test_data.append(img)\n test_labels.append(label)\n test_data = np.concatenate(test_data)\n test_labels = np.array(test_labels, dtype='str')\n \n _, train_labels = np.unique(train_labels, return_inverse=True)\n _, test_labels = np.unique(test_labels, return_inverse=True)\n \n del r, g, b, imgs_path, img_name, img, imgs\n \n return train_data, train_labels, test_data, test_labels", "def load_data(data_file):\n data = pickle.load(open(data_file, \"rb\"))\n images = data[\"images\"]\n labels = data[\"labels\"]\n\n return images, labels" ]
[ "0.7442581", "0.67145514", "0.6680717", "0.66700083", "0.6651974", "0.6599294", "0.65706545", "0.6568262", "0.65624034", "0.65466106", "0.6527709", "0.65229243", "0.65100825", "0.6500305", "0.649048", "0.6466592", "0.6466018", "0.6442053", "0.6429563", "0.6409631", "0.63989353", "0.6398331", "0.6392108", "0.63798136", "0.63793224", "0.63647455", "0.6362771", "0.63621897", "0.6351548", "0.6340121", "0.6311418", "0.63105685", "0.6301381", "0.6298731", "0.629819", "0.62820655", "0.6281838", "0.6269306", "0.6267734", "0.62660563", "0.62660563", "0.62610793", "0.62308514", "0.62302977", "0.62213755", "0.62192297", "0.62170714", "0.62042874", "0.6204238", "0.6200295", "0.6173856", "0.6173856", "0.61581635", "0.61481947", "0.61384934", "0.61376095", "0.61330664", "0.6125069", "0.61185175", "0.61180514", "0.6090818", "0.6082656", "0.6071819", "0.6068334", "0.6067384", "0.6058544", "0.60574967", "0.6052241", "0.6052068", "0.6048583", "0.60455567", "0.60393196", "0.6034254", "0.6014672", "0.60122997", "0.5985232", "0.5973246", "0.5965362", "0.59591544", "0.59546065", "0.59541255", "0.59513205", "0.5950326", "0.59467643", "0.5935829", "0.5924551", "0.5921776", "0.5919206", "0.59134555", "0.5908433", "0.5904006", "0.58992493", "0.58726734", "0.58699334", "0.58655876", "0.5853104", "0.58282125", "0.5825043", "0.5816673", "0.5813983" ]
0.8415674
0
ref CLRS pg326, solution to the basic supply chain problem using the book notation for variables name
def fastestWay( a, t, e, x, n ): import pdb;pdb.set_trace() f1.append( ( e[0] , 1 ) ) f2.append( ( e[1] , 2 ) ) for i in xrange(n): f11 = f1[i][0]+a[0][i] f12 = f2[i][0]+a[1][i]+t[1][i+1] f22 = f2[i][0]+a[1][i] f21 = f1[i][0]+a[0][i]+t[0][i+1] f1.append( ( min( f11, f12 ), 1 ) if f11 < f12 else ( min( f11, f12 ), 2 ) ) f2.append( ( min( f21, f22 ), 2 ) if f22 < f21 else ( min( f22, f21 ), 1 ) ) f1x, f2x = f1[n][0]+x[0], f2[n][0]+x[1] return ( min( f1x, f2x ) , f1 ) if f1x < f2x else ( min( f1x, f2x ), f2 )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exercise_b2_39():\r\n pass", "def exercise_b2_113():\r\n pass", "def exercise_b2_93():\r\n pass", "def exercise_b2_98():\r\n pass", "def exercise_b2_27():\r\n pass", "def exercise_b2_106():\r\n pass", "def exercise_b2_53():\r\n pass", "def exercise_b2_43():\r\n pass", "def exercise_b2_69():\r\n pass", "def name_supply(stems=string.ascii_lowercase, drop_zero=True):\n k = 0\n while 1:\n for a in stems:\n yield a+str(k) if (k or not drop_zero) else a\n k = k+1", "def exercise_b2_107():\r\n pass", "def _block_name_base(stage, block):\n if block < 27:\n block = \"%c\" % (block + 97) # 97 is the ascii number for lowercase 'a'\n conv_name_base = \"res\" + str(stage) + block + \"_branch\"\n bn_name_base = \"bn\" + str(stage) + block + \"_branch\"\n return conv_name_base, bn_name_base", "def exercise_b2_82():\r\n pass", "def exercise_b2_70():\r\n pass", "def exercise_b2_52():\r\n pass", "def exercise_b2_95():\r\n pass", "def exercise_b2_26():\r\n pass", "def chain_full_name(alignment, chain):\n return '%s_%s' % (alignment, chain)", "def exercise_b2_56():\r\n pass", "def _make_vbenf_label(chain_parts):\n\n # toy label for development: run simple and dijet independently.\n # simple makes Et cuts on two jets. Independently (sharing possible)\n # of jets choosean by simple, the dijet\n # scenario requires a dijet of mass > 900, and opening angle in phi > 2.6\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n assert scenario.startswith('vbenf')\n args = _args_from_scenario(scenario)\n if not args:\n return 'and([]simple([(50et)(70et)])combgen([(2)] dijet([(900djmass, 26djdphi)])))' \n arg_res = [\n re.compile(r'(?P<lo>\\d*)(?P<key>fbet)(?P<hi>\\d*)'),\n re.compile(r'(?P<lo>\\d*)(?P<key>mass)(?P<hi>\\d*)'),\n re.compile(r'(?P<lo>\\d*)(?P<key>et)(?P<hi>\\d*)'),\n ]\n\n defaults = {\n 'et': ('101', 'inf'),\n 'mass': ('800', 'inf'),\n 'fbet': ('501', 'inf'),\n }\n\n argvals = {}\n while args:\n assert len(args) == len(arg_res)\n arg = args.pop()\n for r in arg_res:\n m = r.match(arg)\n if m is not None:\n arg_res.remove(r)\n gd = m.groupdict()\n key = gd['key']\n try:\n lo = float(gd['lo'])\n except ValueError:\n lo = defaults[key][0]\n argvals[key+'lo'] = lo \n try:\n hi = float(gd['hi'])\n except ValueError:\n hi = defaults[key][1]\n argvals[key+'hi'] = hi\n\n assert len(args) == len(arg_res)\n assert len(args) == 0\n\n return \"\"\"\n and\n (\n []\n simple\n (\n [(%(etlo).0fet, 500neta)(%(etlo).0fet, peta500)]\n )\n combgen\n (\n [(10et, 0eta320)]\n dijet\n (\n [(%(masslo).0fdjmass, 26djdphi)]\n ) \n simple\n (\n [(10et, 0eta320)(20et, 0eta320)]\n )\n )\n )\"\"\" % argvals", "def main():\n var_name = prompt_for_var_name()\n var_words = normalize_var_name(var_name)\n for case in CASES:\n out_var_name = render_case(var_words, case)\n print(out_var_name)", "def getParseParams(residue, name):\n atomname = name\n resname = residue.name\n\n # Terminal/Water Substitutions\n\n nterm = residue.get(\"isNterm\")\n cterm = residue.get(\"isCterm\")\n if nterm and resname != \"ACE\":\n if resname == \"PRO\" and nterm == 2:\n resname = \"PR+\"\n if atomname == \"H2\":\n atomname = \"HN1\"\n elif atomname == \"H3\":\n atomname = \"HN2\"\n elif resname == \"PRO\" and nterm == 1:\n resname = \"PRN\"\n if atomname == \"H2\" or atomname == \"H3\": atomname = \"HN\"\n elif nterm == 2: # Neutral\n if atomname in [\"N\", \"H\", \"H2\", \"H3\", \"CA\", \"HA\", \"C\", \"O\"]:\n resname = \"BKN\"\n if atomname == \"H\":\n atomname = \"H1\"\n if atomname == 'H3':\n atomname = 'H2'\n elif nterm == 3: # Positive\n if atomname in [\"N\", \"H\", \"H2\", \"H3\", \"CA\", \"HA\", \"C\", \"O\"]:\n resname = \"BK+\"\n if atomname == \"H\": atomname = \"H1\"\n elif cterm:\n if atomname == \"O\":\n atomname = \"O1\"\n elif atomname == \"OXT\":\n atomname = \"O2\"\n if cterm == 1 and atomname in [\"N\", \"H\", \"HA\", \"CA\", \"C\", \"O1\", \"O2\"]:\n resname = \"BK-\"\n elif cterm == 2 and atomname in [\"N\", \"H\", \"HA\", \"CA\", \"C\", \"O1\", \"O2\", \"HO\"]:\n if atomname == \"HO\": atomname = \"H2\"\n resname = \"BKC\"\n # print 'Cterm resname is',resname\n elif residue.get(\"type\") == 3:\n resname = \"H2O\"\n if atomname == \"O\":\n atomname = \"OH\"\n elif atomname == \"H1\":\n atomname = \"HH1\"\n elif atomname == \"H2\":\n atomname = \"HH2\"\n\n # Residue Substitutions\n if resname == \"HSD\":\n resname = \"HID\"\n elif resname in [\"HIE\", \"HSE\"]:\n resname = \"HIS\"\n elif resname in [\"HIP\", \"HSP\"]:\n resname = \"HI+\"\n elif resname == \"ILE\":\n if atomname == \"HG12\":\n atomname = \"HG11\"\n elif atomname == \"HG13\":\n atomname = \"HG12\"\n elif atomname == \"CD\":\n atomname = \"CD1\"\n elif resname == \"CYS\" and \"HG\" not in residue.get(\"map\"):\n resname = \"CSS\"\n #\n # Histidine\n #\n elif resname == \"HIS\":\n if \"HD1\" in residue.get(\"map\") and \"HE2\" in residue.get(\"map\"):\n resname = \"HI+\"\n elif \"HD1\" in residue.get(\"map\"):\n resname = \"HID\"\n elif \"HE2\" in residue.get(\"map\"):\n resname = \"HIS\"\n elif resname == \"GLU\" or resname == \"GLH\":\n if \"HE1\" in residue.get(\"map\"):\n resname = \"GL0\"\n if atomname == \"HE1\":\n atomname = \"HE2\"\n elif atomname == \"OE1\":\n atomname = \"OE2\"\n elif atomname == \"OE2\":\n atomname = \"OE1\"\n elif \"HE2\" in residue.get(\"map\"):\n resname = \"GL0\"\n elif resname == \"ASP\" or resname == \"ASH\":\n if \"HD1\" in residue.get(\"map\"):\n resname = \"AS0\"\n if atomname == \"HD1\":\n atomname = \"HD2\"\n elif atomname == \"OD1\":\n atomname = \"OD2\"\n elif atomname == \"OD2\":\n atomname = \"OD1\"\n elif \"HD2\" in residue.get(\"map\"):\n resname = \"AS0\"\n elif resname == \"ACE\":\n if atomname == \"HH31\":\n atomname = \"HA1\"\n elif atomname == \"HH32\":\n atomname = \"HA2\"\n elif atomname == \"HH33\":\n atomname = \"HA3\"\n elif atomname == \"CH3\":\n atomname = \"CA\"\n elif resname == \"TYR\":\n if not \"HH\" in residue.get(\"map\"):\n resname = \"TYM\"\n elif resname == \"TYM\":\n resname = \"TY-\"\n elif resname == \"CYM\":\n resname = \"CY-\"\n elif resname == \"LYN\":\n resname = \"LY0\"\n #\n # Neutral LYS and neutral ARG detection based on hydrogens - added by Jens\n #\n elif resname == \"LYS\":\n if not \"HZ3\" in residue.get(\"map\"):\n resname = \"LY0\"\n elif resname == \"ARG\":\n if not \"HE\" in residue.get(\"map\"):\n resname = \"AR0\"\n elif resname == \"NME\":\n resname = \"N-M\"\n if atomname == \"CH3\":\n atomname = \"CA\"\n elif atomname == \"H\":\n atomname = \"H1\"\n elif atomname.startswith(\"HH\"):\n atomname = \"HA\" + atomname[-1]\n\n # Hydrogen Substitutions\n\n if atomname == \"H\":\n atomname = \"HN\"\n elif atomname == \"HA2\":\n atomname = \"HA1\"\n elif atomname == \"HA3\":\n atomname = \"HA2\"\n elif atomname == \"HB2\" and resname not in [\"ALA\"]:\n atomname = \"HB1\"\n elif atomname == \"HB3\" and resname not in [\"ALA\"]:\n atomname = \"HB2\"\n elif atomname == \"HD2\" and resname not in [\"HIS\", \"HI+\", \"HID\", \"AS0\"]:\n atomname = \"HD1\"\n elif atomname == \"HD3\" and resname not in [\"HIS\", \"HI+\", \"HID\"]:\n atomname = \"HD2\"\n elif atomname == \"HE2\" and resname not in [\"TRP\", \"HIS\", \"HI+\", \"HID\", \"GL0\"]:\n atomname = \"HE1\"\n elif atomname == \"HE3\" and resname not in [\"TRP\", \"HIS\", \"HI+\", \"HID\"]:\n atomname = \"HE2\"\n elif atomname == \"HG2\":\n atomname = \"HG1\"\n elif atomname == \"HG3\":\n atomname = \"HG2\"\n elif atomname == \"HZ2\" and resname == \"LY0\":\n atomname = \"HZ1\"\n elif atomname == \"HZ3\" and resname == \"LY0\":\n atomname = \"HZ2\"\n\n return resname, atomname", "def test_expanded_name( self ):\n\t\tself.doBasicTest(\n\t\t\t'''s := something +\n\t\t\t>something< := r\n\t\t\tr := [ab]\n\t\t\tv := [c]\n\t\t\t''',\n\t\t\t's',\n\t\t\t'abammmab',\n\t\t\t(1,[\n\t\t\t\t('r',0,1, NullResult),\n\t\t\t\t('r',1,2, NullResult),\n\t\t\t\t('r',2,3, NullResult),\n\t\t\t],3)\n\t\t)", "def test_bnd_mate_name():\n\n mate = bnd_mate_name(ALT, \"2\")\n assert mate == \"17\"", "def test_initialization_of_TCRsubset_alpha_beta_case_chain_names():\n assert isinstance(dist_a_subset, pd.DataFrame)\n assert isinstance(dist_b_subset, pd.DataFrame)\n assert isinstance(clone_df_subset, pd.DataFrame)\n TCRsubset(clone_df = clone_df_subset, \n organism = \"mouse\",\n epitopes = [\"PA\"] ,\n epitope = \"PA\",\n chains = [\"alpha\",\"beta\"],\n dist_a = dist_a_subset,\n dist_b = dist_b_subset)", "def part_2():\n instructions[\"b\"] = 46065\n print(\"Part 2: \" + str(evaluate(\"a\")))", "def name(n, b=\"s\"):\n print(\"This is the \" + n + b)\n return print(\"Hi\")", "def var():\n def _var(quoted_name):\n name = quoted_name.subexpression.name\n if (value := get_name(name)) is not None:\n return value\n else:\n raise TypeError(f\"Binding {name} not found\")\n yield (\"(λ &[name] . any)\", _var)", "def _expandVariables (self, st : String) -> String:\n\n Logging.trace(\">>: %r\", st)\n cls = self.__class__\n\n # collect identifiers embedded in value and replace them by\n # their value\n ParseState_inLimbo = 0\n ParseState_inString = 1\n ParseState_inEscape = 2\n ParseState_inIdentifier = 3\n parseStateToString = { 0 : \"-\", 1 : \"S\",\n 2 : cls._escapeCharacter, 3 : \"I\" }\n\n parseState = ParseState_inLimbo\n result = \"\"\n identifier = \"\"\n fsaTrace = \"\"\n\n for ch in st:\n # process finite state automaton with three states based\n # on next character in string\n fsaTrace += (iif(fsaTrace == \"\", \"\", \" \")\n + \"[%s] %s\" % (parseStateToString[parseState], ch))\n\n if parseState == ParseState_inLimbo:\n if cls._identifierCharRegExp.search(ch):\n identifier = ch\n parseState = ParseState_inIdentifier\n else:\n result += ch\n if ch == cls._doubleQuoteCharacter:\n parseState = ParseState_inString\n elif parseState == ParseState_inString:\n result += ch\n if ch == cls._doubleQuoteCharacter:\n parseState = ParseState_inLimbo\n elif ch == cls._escapeCharacter:\n parseState = ParseState_inEscape\n elif parseState == ParseState_inEscape:\n result += ch\n parseState = ParseState_inString\n elif parseState == ParseState_inIdentifier:\n if cls._identifierCharRegExp.search(ch):\n identifier += ch\n else:\n identifierValue = self._findIdentifierValue(identifier)\n result += identifierValue\n result += ch\n parseState = iif(ch == cls._doubleQuoteCharacter,\n ParseState_inString, ParseState_inLimbo)\n\n if parseState == ParseState_inIdentifier:\n identifierValue = self._findIdentifierValue(identifier)\n result += identifierValue\n \n Logging.trace(\"--: accumulatedFSATrace = %s\", fsaTrace)\n Logging.trace(\"<<: %r\", result)\n return result", "def psea(pname): # -> str:\n ...", "def test_get_alias():\n c = Curve(data=np.linspace(1, 20, 2), mnemonic='DT')\n alias = {'Sonic': ['DT', 'foo']}\n assert c.get_alias(alias) == ['Sonic']", "def printname(bruce):", "def _make_dijet_label(chain_parts):\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario.startswith('dijet')\n\n arg_res = [\n re.compile(r'^(?P<lo>\\d*)(?P<key>djmass)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>j1et)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>j1eta)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>j2et)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>j2eta)(?P<hi>\\d*)$'),\n ]\n\n defaults = {\n 'j1et': ('100', 'inf'),\n 'j2et': ('100', 'inf'),\n 'j1eta': ('0', '320'),\n 'j2eta': ('0', '320'),\n 'djmass': ('1000', 'inf'),\n }\n\n\n args = _args_from_scenario(scenario)\n argvals = {}\n while args:\n assert len(args) == len(arg_res)\n arg = args.pop()\n for r in arg_res:\n m = r.match(arg)\n if m is not None:\n arg_res.remove(r)\n gd = m.groupdict()\n key = gd['key']\n\n try:\n lo = float(gd['lo'])\n except ValueError:\n lo = defaults[key][0]\n argvals[key+'lo'] = lo \n try:\n hi = float(gd['hi'])\n except ValueError:\n hi = defaults[key][1]\n argvals[key+'hi'] = hi\n\n assert len(args) == len(arg_res)\n assert len(args) == 0\n\n return \"\"\"\n combgen(\n [(2)(%(j1etlo).0fet, %(j1etalo).0feta%(j1etahi).0f)\n (%(j1etlo).0fet, %(j1etalo).0feta%(j1etahi).0f)\n ]\n \n dijet(\n [(%(djmasslo).0fdjmass)])\n simple([(%(j1etlo).0fet, %(j1etalo).0feta%(j1etahi).0f)\n (%(j2etlo).0fet, %(j2etalo).0feta%(j2etahi).0f)])\n )\"\"\" % argvals", "def procs2(variable=\"\", category=\"\"):\n assert variable\n assert category\n out = {}\n\n # out = {\"VV\": [\"*VV\", \"*singleT\"],\n # \"ZLL\": [\"*ZLL\"],\n # }\n\n # out[\"W+QCD\"] = [\"W\", \"QCD\"]\n\n return out", "def symbolic_objective(ingredients) -> Tuple[List[sp.Symbol], sp.Eq]:\n assignments = sp.symbols(' '.join(ingredients.keys()))\n\n # Skip negative logic due to differentiability requirements\n objective = 1\n for attribute in filter(lambda x: x != \"calories\", next(iter(ingredients.values())).keys()):\n objective *= sum(ingredients[str(x)][attribute] * x for x in assignments)\n\n return assignments, objective", "def __init__(self,\n point_size: int,\n max_levels=6,\n min_levels=3,\n mutation_prob=0.5\n ) -> None:\n self.rec_refs = {}\n self.mutation_prob = mutation_prob\n\n vars1 = []\n vars2 = []\n for i in range(point_size):\n vars1 += [f\"X1[{i}]\"]\n\n for i in range(point_size):\n vars2 += [f\"X2[{i}]\"]\n\n self.grammar = {\n **{f\"<expr_{i}>\": [f\"<expr_{i+1}> <op> <expr_{i+1}>\", f\"<func> ( <expr_{i+1}> <op> <expr_{i+1}> )\"] for i in range(min_levels)},\n **{f\"<expr_{min_levels + i}>\": [f\"<expr_{min_levels + i+1}> <op> <expr_{min_levels + i+1}>\", f\"<func> ( <expr_{min_levels + i + 1}> <op> <expr_{min_levels + i + 1}> )\", \"<term>\"] for i in range(max_levels - min_levels)},\n f\"<expr_{max_levels}>\": [\"<term_1> <op> <term_2>\", \"<term_2> <op> <term_1>\"],\n \"<term>\": [\n \"<term_1>\", \"<term_2>\"\n ],\n \"<term_1>\": [\n \"<var_1>\",\n \"<pre-op> ( <var_1> )\",\n ],\n \"<term_2>\": [\n \"<var_2>\",\n \"<pre-op> ( <var_2> )\",\n ],\n \"<pre-op>\": [\n \"1/\",\n \"-\",\n \"+\",\n \"abs\",\n \"numpy.math.sqrt\"\n ],\n \"<func>\": [\n \"abs\",\n \"\"\n ],\n \"<op>\": [\n \"+\",\n \"*\",\n \"-\",\n \"/\",\n ],\n \"<var_1>\": vars1,\n \"<var_2>\": vars2,\n }\n\n self.non_terminals = sorted(self.grammar.keys())\n\n # these two lines are described in the pseudocode of the reference paper\n rec_refs = self.countRecursiveReferences()\n self.ref_count = {\n key: self.findReferences(key, *rec_refs) for key in self.grammar.keys()\n }", "def exercise_b2_86():\r\n pass", "def _make_simple_label(chain_parts):\n \n if not _select_simple_chainparts(chain_parts):\n msg = 'Jet Configuration error: '\\\n 'chain fails substring selection: not \"simple\" '\n\n raise NotImplementedError(msg)\n \n label = 'simple(['\n for cp in chain_parts:\n smcstr = str(cp['smc'])\n jvtstr = str(cp['jvt'])\n if smcstr == 'nosmc':\n smcstr = ''\n for i in range(int(cp['multiplicity'])):\n # condition_str = '(%set,%s,%s)' % (str(cp['threshold']),\n # str(cp['etaRange']),\n # smcstr,)\n condition_str = '(%set,%s' % (str(cp['threshold']),\n str(cp['etaRange']),)\n if smcstr: # Run 2 chains have \"INF\" in the SMC substring\n condition_str += ',%s)' % smcstr.replace('INF','')\n elif jvtstr:\n condition_str += ',%s)' % jvtstr\n else:\n condition_str += ')'\n label += condition_str\n label += '])'\n return label", "def main():\n\n args = get_args()\n codons = {\n 'A': 4, 'C': 2, 'D': 2, 'E': 2, 'F': 2, 'G': 4, 'H': 2, 'I': 3,\n 'K': 2, 'L': 6, 'M': 1, 'N': 2, 'P': 4, 'Q': 2, 'R': 6, 'S': 6,\n 'T': 4, 'V': 4, 'W': 1, 'Y': 2, '*': 3,\n }\n print(product(map(codons.get, args.protein + '*')) % args.modulo)", "def _optionvarkey(name):\n return \"ragdoll%s\" % (name[0].upper() + name[1:])", "def exercise_2b():\n\n return", "def main(supply):\n\n # Define four parallel arrays: start_nodes, end_nodes, capacities, and unit costs\n # between each pair. For instance, the arc from node 0 to node 1 has a\n # capacity of 15 and a unit cost of 4.\n\n start_nodes = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 7, 8, 8, 8, 8, 8, 9, 9, 9]\n end_nodes = [8, 2, 4, 6, 5, 4, 7, 6, 9, 8, 9, 0, 3, 4, 2, 5, 1, 0, 2, 5, 1, 8, 3, 4, 1, 0, 8, 1, 1, 0, 9, 5, 6, 1, 8, 2]\n capacities = [23, 10, 25, 15, 17, 14, 10, 21, 17, 11, 22, 27, 14, 6, 19, 9, 11, 8, 29, 16, 22, 29, 20, 13, 18, 14, 20, 25, 13, 8, 10, 24, 5, 9, 20, 28]\n unit_costs = [6, 9, 7, 8, 8, 5, 8, 5, 6, 9, 6, 5, 6, 6, 9, 7, 8, 6, 9, 6, 5, 5, 8, 7, 5, 8, 7, 9, 7, 6, 9, 6, 5, 5, 6, 7]\n\n # Define an array of supplies at each node.\n supplies = supply\n\n\n # Instantiate a SimpleMinCostFlow solver.\n min_cost_flow = pywrapgraph.SimpleMinCostFlow()\n\n # Add each arc.\n for i in range(0, len(start_nodes)):\n min_cost_flow.AddArcWithCapacityAndUnitCost(start_nodes[i], end_nodes[i],\n capacities[i], unit_costs[i])\n\n # Add node supplies.\n\n for i in range(0, len(supplies)):\n min_cost_flow.SetNodeSupply(i, supplies[i])\n\n\n # Find the minimum cost flow between node 0 and node 4.\n if min_cost_flow.Solve() == min_cost_flow.OPTIMAL:\n print('Minimum cost:', min_cost_flow.OptimalCost())\n print('')\n print(' Arc Flow / Capacity Cost')\n flag = 1\n optimal_flows = np.zeros(36)\n for i in range(min_cost_flow.NumArcs()):\n cost = min_cost_flow.Flow(i) * min_cost_flow.UnitCost(i)\n print('%1s -> %1s %3s / %3s %3s' % (\n min_cost_flow.Tail(i),\n min_cost_flow.Head(i),\n min_cost_flow.Flow(i),\n min_cost_flow.Capacity(i),\n cost))\n # save answer to the variable\n optimal_flows[i] = min_cost_flow.Flow(i)\n return flag, optimal_flows\n else:\n print('There was an issue with the min cost flow input.')\n flag = 0\n return flag, 0", "def __init__(self, name):\n self.name = name\n self.difficulty = 0\n self.description = \"\"\n self.prerequisites = {}\n self.cost = {}\n self.effects = {}\n self.strings = {}", "def __init__(self):\n self.name = ''\n self.variables = []\n self.assumptions = []\n self.guarantees = []", "def _make_combinationsTest_label(chain_parts):\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario == 'combinationsTest'\n\n \n\n return \"\"\"\n combgen(\n [(2)(20et, 0eta320)]\n \n simple([(40et, 0eta320) (50et, 0eta320)])\n simple([(35et, 0eta240) (55et, 0eta240)])\n )\"\"\"", "def getAmberParams(residue, name):\n atomname = name\n type = residue.get(\"type\")\n if type == 4:\n resname = residue.get(\"naname\")\n else:\n resname = residue.get(\"name\")\n\n # Residue Substitutions\n\n if residue.get(\"name\") == \"CYS\" and \"HG\" not in residue.get(\"map\"):\n resname = \"CYX\"\n elif residue.get(\"name\") == \"HIS\":\n if \"HD1\" in residue.get(\"map\") and \"HE2\" in residue.get(\"map\"):\n resname = \"HIP\"\n elif \"HD1\" in residue.get(\"map\"):\n resname = \"HID\"\n elif \"HE2\" in residue.get(\"map\"):\n resname = \"HIE\"\n else:\n resname = \"HID\" # Default for no hydrogens\n elif residue.get(\"name\") == \"HSP\":\n resname = \"HIP\"\n elif residue.get(\"name\") == \"HSE\":\n resname = \"HIE\"\n elif residue.get(\"name\") == \"HSD\":\n resname = \"HID\"\n elif residue.get(\"name\") == \"GLU\" or residue.get(\"name\") == \"GLH\":\n if \"HE1\" in residue.get(\"map\"):\n resname = \"GLH\"\n if atomname == \"HE1\":\n atomname = \"HE2\"\n elif atomname == \"OE1\":\n atomname = \"OE2\"\n elif atomname == \"OE2\":\n atomname = \"OE1\"\n elif \"HE2\" in residue.get(\"map\"):\n resname = \"GLH\"\n elif residue.get(\"name\") == \"ASP\" or residue.get(\"name\") == \"ASH\":\n if \"HD1\" in residue.get(\"map\"):\n resname = \"ASH\"\n if atomname == \"HD1\":\n atomname = \"HD2\"\n elif atomname == \"OD1\":\n atomname = \"OD2\"\n elif atomname == \"OD2\":\n atomname = \"OD1\"\n elif \"HD2\" in residue.get(\"map\"):\n resname = \"ASH\"\n\n if residue.get(\"isCterm\"):\n resname = \"C\" + resname\n elif residue.get(\"isNterm\"):\n resname = \"N\" + resname\n\n # Atom Substitutions\n\n if resname == \"WAT\":\n if atomname == \"O\":\n atomname = \"OW\"\n elif atomname == \"H1\":\n atomname = \"HW\"\n elif atomname == \"H2\":\n atomname = \"HW\"\n elif resname == \"ILE\":\n if atomname == \"CD\": atomname = \"CD1\"\n if resname[0] == \"N\" and resname != \"NME\": # N-terminal\n if atomname == \"H\": atomname = \"H1\"\n if (resname == \"CCYS\" or resname == \"NCYS\") and atomname == \"HG\": atomname = \"HSG\"\n if resname == \"CYM\" and atomname == \"H\": atomname = \"HN\"\n if residue.get(\"isNterm\") and resname == \"NPRO\" and atomname == \"HN2\":\n atomname = \"H2\"\n if residue.get(\"isNterm\") and resname == \"NPRO\" and atomname == \"HN1\":\n atomname = \"H3\"\n return resname, atomname", "def linenames():\n linenamesdic = {}\n\n linenamesdic['ovi1'] = ['OVI $\\\\lambda$1032' , 1031.9261, 'right' , 'Morton1991tab2']\n linenamesdic['ovi2'] = ['OVI $\\\\lambda$1038' , 1037.6167, 'left' , 'Morton1991tab2']\n linenamesdic['lyb'] = ['Ly$\\\\beta$ $\\\\lambda$1025' , 1025.7219, 'right' , 'Morton1991tab5']\n linenamesdic['lya'] = ['Ly$\\\\alpha$ $\\\\lambda$1216' , 1215.6737, 'right' , 'Morton1991tab5']\n linenamesdic[ 'NV1240'] = ['NV $\\\\lambda$1239' , 1238.821 , 'right' , 'Morton1991tab5']\n linenamesdic['nv2'] = ['NV $\\\\lambda$1243' , 1242.804 , 'left' , 'Morton1991tab5']\n linenamesdic['cii'] = ['CII $\\\\lambda$1336' , 1335.6627, 'right' , 'Morton1991tab5']\n linenamesdic['Siiv1'] = ['SiIV $\\\\lambda$1394' , 1393.755 , 'right' , 'Morton1991tab5']\n linenamesdic['oiv1'] = ['OIV $\\\\lambda$1397' , 1397.232 , 'right' , 'Morton1991tab5']\n linenamesdic['oiv2'] = ['OIV $\\\\lambda$1400' , 1399.780 , 'left' , 'Morton1991tab5']\n linenamesdic['Siiv2'] = ['SiIV $\\\\lambda$1403' , 1402.770 , 'left' , 'Morton1991tab5']\n linenamesdic['CIV1548'] = ['CIV $\\\\lambda$1548' , 1548.195 , 'right' , 'Morton1991tab5']\n linenamesdic['CIV1551'] = ['CIV $\\\\lambda$1551' , 1550.770 , 'left' , 'Morton1991tab5']\n linenamesdic['HeII1640'] = ['HeII $\\\\lambda$1640' , 1640.420 , 'right' , 'vandenberk+2001']\n linenamesdic['OIII1661'] = ['OIII] $\\\\lambda$1661' , 1660.809 , 'right' , 'Morton1991tab2']\n linenamesdic['OIII1666'] = ['OIII] $\\\\lambda$1666' , 1666.150 , 'left' , 'Morton1991tab2']\n linenamesdic['ciii1'] = ['[CIII] $\\\\lambda$1907' , 1907. , 'right' , 'stark+2015']\n linenamesdic['CIII1908'] = ['CIII] $\\\\lambda$1909' , 1909. , 'left' , 'stark+2015']\n linenamesdic['ciib'] = ['CII] $\\\\lambda$2326' , 2326.113 , 'right' , 'Morton1991tab5']\n linenamesdic['mgii1'] = ['MgII] $\\\\lambda$2796' , 2795.528 , 'right' , 'Morton1991tab5']\n linenamesdic['mgii2'] = ['MgII] $\\\\lambda$2803' , 2802.705 , 'left' , 'Morton1991tab5']\n linenamesdic['OII3727'] = ['[OII] $\\\\lambda$3726' , 3726. , 'right' , 'Pradhan2006']\n linenamesdic['oii2'] = ['[OII] $\\\\lambda$3729' , 3729. , 'left' , 'Pradhan2006']\n\n return linenamesdic", "def probability(cpts, term, obs):\r\n \r\n \r\n # term is a list e.g., ['x_1', '0']\r\n # flip refers to the assignment either '0' false or '1' true\r\n flip = term[1]\r\n # the term itself\r\n term = term[0]\r\n # accumulator variable\r\n answer = 0\r\n # this loop locates where in the CPT we're looking\r\n for clause in range(len(cpts)):\r\n if cpts[clause][0] == term:\r\n index = clause\r\n # focus on our term\r\n cpt = cpts[index]\r\n # this loop checks if there are no preconditions\r\n # if not, then we immediately know the probability and can return\r\n for m in range(len(cpt[1])):\r\n if cpt[1][m][-2][1] == '1':\r\n if cpt[1][m][0] == [[]]:\r\n answer = cpt[1][m][-1]\r\n # list of the variables we have observed\r\n have = []\r\n if obs != []:\r\n for k in obs:\r\n have.append(k[0])\r\n # list of variables we need to know in order to calculate the probability\r\n needed = []\r\n for prob in range(len(cpt[1])):\r\n for j in cpt[1][prob][0]:\r\n if j != []:\r\n if j[0] not in needed:\r\n needed.append(j[0])\r\n # conditional logic based on the known variables\r\n for required in needed:\r\n if required not in have:\r\n # deep copy our observations list\r\n obs2 = []\r\n obs3 = []\r\n for observs in obs:\r\n obs2.append(observs)\r\n obs3.append(observs)\r\n # if we need to know a variable but don't have it\r\n # then we allow it to be either 0 or 1\r\n obs3.append([required,'1'])\r\n obs2.append([required,'0'])\r\n # computes probability if the unknown term is true, times \r\n # the probability that the unknown term is true, plus the\r\n # probability if the unknown term is false, times the \r\n # probability that the unknown term is false\r\n answer = (probability(cpts, [term,flip], obs3) * probability(cpts, [required,'1'], obs)) + (probability(cpts, [term,flip], obs2) * (probability(cpts, [required,'0'], obs)))\r\n # this loop looks complicated but all it's doing is finding the correct\r\n # line in the CPT\r\n if cpt[1][prob][-2][1] == '1':\r\n count = 1\r\n for i in range(len(cpt[1][prob][0])):\r\n if cpt[1][prob][0][i] in obs:\r\n count *= 1\r\n else:\r\n count = 0\r\n if count == 1:\r\n answer += cpt[1][prob][-1]\r\n\r\n\r\n # this computes the probability that the term is true, so if we asked \r\n # for the probability that it is false, just return 1 - answer\r\n if flip == '0':\r\n return 1 - answer\r\n return answer", "def Problem11():\n return 'Ductile Coulomb-Mohr'", "def _get_vars(symbol: Union[str, int]) -> str:\n if isinstance(symbol, str):\n return {\n 'circle': 'var b1=n.round(t,2);',\n 'square': 'var b1=n.round(t,2);',\n 'diamond': 'var b1=n.round(t*1.3,2);',\n 'hexagram': 'var b1=n.round(t,2);var b2=n.round(t/2,2);var b3=n.round(t*Math.sqrt(3)/2,2);'\n }[symbol]\n return {\n 37: 'var d1=n.round(t*1.2,2);var d2=n.round(t*1.6,2);var d3=n.round(t*0.8,2);',\n 38: 'var d1=n.round(t*1.2,2);var d2=n.round(t*1.6,2);var d3=n.round(t*0.8,2);',\n 39: 'var d1=n.round(t*1.2,2);var d2=n.round(t*1.6,2);var d3=n.round(t*0.8,2);',\n 40: 'var d1=n.round(t*1.2,2);var d2=n.round(t*1.6,2);var d3=n.round(t*0.8,2);',\n 34: 'var d1=n.round(t,2);',\n 33: 'var d1=n.round(t*1.4,2);',\n 35: 'var d1=n.round(t*1.2,2);var d2=n.round(t*0.85,2);',\n 36: 'var d1=n.round(t/2,2);var d2=n.round(t,2);'\n }[symbol]", "def validVarConstructName(self,varname):\r\n if (len(varname[0])>32):\r\n return False, ''\r\n if not(varname[0][0].isalpha()):\r\n return False, '' \r\n for ch in varname[0][1:]:\r\n if not(ch.isalpha() or ch.isdigit() or ch=='_'):\r\n return False, ''\r\n \r\n return True, varname", "def getCharmmParams(residue, name):\n resname = residue.get(\"name\")\n atomname = name\n\n # Nucleic Acid Substitutions\n\n if residue.get(\"type\") == 4:\n resname = resname[0]\n if resname == \"A\":\n resname = \"ADE\"\n elif resname == \"C\":\n resname = \"CYT\"\n elif resname == \"G\":\n resname = \"GUA\"\n elif resname == \"T\":\n resname = \"THY\"\n if atomname == \"C7\":\n atomname = \"C5M\"\n elif atomname == \"H71\":\n atomname = \"H51\"\n elif atomname == \"H72\":\n atomname = \"H52\"\n elif atomname == \"H73\":\n atomname = \"H53\"\n elif resname == \"U\":\n resname = \"URA\"\n\n if atomname == \"H5'1\":\n atomname = \"H5'\"\n elif atomname == \"H5'2\":\n atomname = \"H5''\"\n elif atomname == \"H2'1\":\n atomname = \"H2'\"\n elif atomname in [\"H2'2\", \"HO'2\"]:\n atomname = \"H2''\"\n\n if residue.getAtom(\"O2'\") is None:\n if atomname in [\"C2'\", \"H2'\", \"H2''\"]: resname = \"DEO1\"\n\n if residue.getAtom(\"H5T\") is not None:\n if atomname in [\"H5T\", \"O5'\", \"C5'\"]: resname = \"5TER\"\n if residue.getAtom(\"H3T\") is not None:\n if atomname in [\"H3T\", \"O3'\", \"C3'\"]: resname = \"3TER\"\n\n # Terminal/Water Substitutions\n\n if residue.get(\"isNterm\"):\n if resname == \"GLY\" and atomname in [\"N\", \"H\", \"H2\", \"H3\", \"CA\", \"HA2\", \"HA3\"]:\n resname = \"GLYP\"\n if atomname == \"H\":\n atomname = \"HT1\"\n elif atomname == \"H2\":\n atomname = \"HT2\"\n elif atomname == \"H3\":\n atomname = \"HT3\"\n elif resname == \"PRO\" and atomname in [\"N\", \"HN1\", \"HN2\", \"CD\", \"CA\", \"HD1\", \"HD2\", \"HA\", \"H2\", \"H3\"]:\n resname = \"PROP\"\n if atomname == \"H2\":\n atomname = \"HN1\"\n elif atomname == \"H3\":\n atomname = \"HN2\"\n elif resname == \"ACE\":\n if atomname == \"CH3\":\n atomname = \"CAY\"\n elif atomname == \"HH31\":\n atomname = \"HY1\"\n elif atomname == \"HH32\":\n atomname = \"HY2\"\n elif atomname == \"HH33\":\n atomname = \"HY3\"\n elif atomname == \"C\":\n atomname = \"CY\"\n elif atomname == \"O\":\n atomname = \"OY\"\n else:\n if atomname in [\"N\", \"H\", \"H2\", \"H3\", \"CA\", \"HA\"]:\n resname = \"NTER\"\n if atomname == \"H\":\n atomname = \"HT1\"\n elif atomname == \"H2\":\n atomname = \"HT2\"\n elif atomname == \"H3\":\n atomname = \"HT3\"\n elif residue.get(\"isCterm\"):\n if atomname in [\"O\", \"OXT\", \"C\"]:\n resname = \"CTER\"\n if atomname == \"O\":\n atomname = \"OT1\"\n elif atomname == \"OXT\":\n atomname = \"OT2\"\n elif residue.get(\"type\") == 3:\n resname = \"TP3M\"\n if atomname == \"O\": atomname = \"OH2\"\n\n # Residue substitutions\n\n if resname == \"ILE\":\n if atomname == \"CD1\":\n atomname = \"CD\"\n elif atomname == \"HD11\":\n atomname = \"HD1\"\n elif atomname == \"HD12\":\n atomname = \"HD2\"\n elif atomname == \"HD13\":\n atomname = \"HD3\"\n elif atomname == \"HG12\":\n atomname = \"HG11\"\n elif atomname == \"HG13\":\n atomname = \"HG12\"\n elif resname == \"CYS\" and \"HG\" not in residue.get(\"map\"):\n resname = \"CYS\"\n if atomname == \"CB\":\n resname = \"DISU\"\n atomname = \"1CB\"\n elif atomname == \"SG\":\n resname = \"DISU\"\n atomname = \"1SG\"\n elif resname == \"HIS\":\n if \"HD1\" in residue.get(\"map\") and \"HE2\" in residue.get(\"map\"):\n resname = \"HSP\"\n elif \"HD1\" in residue.get(\"map\"):\n resname = \"HSD\"\n elif \"HE2\" in residue.get(\"map\"):\n resname = \"HSE\"\n elif resname == \"GLU\" or resname == \"GLH\":\n if \"HE1\" in residue.get(\"map\"):\n if atomname == \"HE1\":\n atomname = \"HE2\"\n elif atomname == \"OE1\":\n atomname = \"OE2\"\n elif atomname == \"OE2\":\n atomname = \"OE1\"\n if atomname in [\"CG\", \"HG3\", \"HG1\", \"HG2\", \"CD\", \"OE1\", \"OE2\", \"HE2\"]:\n resname = \"GLUP\"\n else:\n resname = \"GLU\"\n elif \"HE2\" in residue.get(\"map\"):\n if atomname in [\"CG\", \"HG3\", \"HG1\", \"HG2\", \"CD\", \"OE1\", \"OE2\", \"HE2\"]:\n resname = \"GLUP\"\n else:\n resname = \"GLU\"\n elif resname == \"ASP\" or resname == \"ASH\":\n if \"HD1\" in residue.get(\"map\"):\n if atomname == \"HD1\":\n atomname = \"HD2\"\n elif atomname == \"OD1\":\n atomname = \"OD2\"\n elif atomname == \"OD2\":\n atomname = \"OD1\"\n if atomname in [\"CB\", \"HB3\", \"HB1\", \"HB2\", \"CG\", \"OD1\", \"OD2\", \"HD2\"]:\n resname = \"ASPP\"\n else:\n resname = \"ASP\"\n elif \"HD2\" in residue.get(\"map\"):\n if atomname in [\"CB\", \"HB3\", \"HB1\", \"HB2\", \"CG\", \"OD1\", \"OD2\", \"HD2\"]:\n resname = \"ASPP\"\n else:\n resname = \"ASP\"\n\n # HETATM Substitutions\n\n if resname == \"ACE\":\n if atomname == \"CH3\":\n atomname = \"CAY\"\n elif atomname == \"HH31\":\n atomname = \"HY1\"\n elif atomname == \"HH32\":\n atomname = \"HY2\"\n elif atomname == \"HH33\":\n atomname = \"HY3\"\n elif atomname == \"C\":\n atomname = \"CY\"\n elif atomname == \"O\":\n atomname = \"OY\"\n elif resname == \"ADP\":\n atomname = atomname.replace(\"*\", \"\\'\")\n elif resname == \"NME\":\n resname = \"CT3\"\n if atomname == \"HH31\":\n atomname = \"HT1\"\n elif atomname == \"HH32\":\n atomname = \"HT2\"\n elif atomname == \"HH33\":\n atomname = \"HT3\"\n elif atomname == \"CH3\":\n atomname = \"CAT\"\n elif atomname == \"N\":\n atomname = \"NT\"\n elif atomname == \"H\":\n atomname = \"HNT\"\n\n # Hydrogen Substitutions\n\n if atomname == \"H\":\n atomname = \"HN\"\n elif atomname == \"HA2\":\n atomname = \"HA1\"\n elif atomname == \"HA3\":\n atomname = \"HA2\"\n elif atomname == \"HB2\" and resname not in [\"ALA\"]:\n atomname = \"HB1\"\n elif atomname == \"HB3\" and resname not in [\"ALA\"]:\n atomname = \"HB2\"\n elif atomname == \"HD2\" and resname not in [\"HSP\", \"HSE\", \"HSD\", \"ASPP\"]:\n atomname = \"HD1\"\n elif atomname == \"HD3\" and resname not in [\"HIS\", \"HSE\", \"HSD\"]:\n atomname = \"HD2\"\n elif atomname == \"HE2\" and resname not in [\"TRP\", \"HSP\", \"HSE\", \"HSD\", \"GLUP\"]:\n atomname = \"HE1\"\n elif atomname == \"HE3\" and resname not in [\"TRP\", \"HSP\", \"HSE\", \"HSD\"]:\n atomname = \"HE2\"\n elif atomname == \"HG2\":\n atomname = \"HG1\"\n elif atomname == \"HG3\":\n atomname = \"HG2\"\n elif atomname == \"HG\" and resname in [\"SER\", \"CYS\"]:\n atomname = \"HG1\"\n\n return resname, atomname", "def let():\n def from_many(*kv_pairs):\n new_bindings = {}\n for entry in kv_pairs:\n with match(entry) as case:\n with case('Quoted(Sexpr(Name(name), expr))') as [m]:\n new_bindings[m.name] = m.expr\n\n def _from_many(quoted_body):\n return EvaluateInContext(\n push_subscope_with(new_bindings),\n pop_subscope,\n quoted_body.subexpression\n )\n\n return e.Function({parse_fn(\"(λ &[any] . any)\"): _from_many})\n yield (\"(λ ...&[(name any)] . (λ &[any] . any))\", from_many)\n\n def from_one(key, value, quoted_body):\n return EvaluateInContext(\n push_subscope_with({key.subexpression.name: value}),\n pop_subscope,\n quoted_body.subexpression\n )\n yield (\"(λ &[name] any &[any] . any)\", from_one)", "def lookup(name):", "def lookup(name):", "def _parse_var_initsol(self,varname) :\n\t\tinitsol = self.ss.constraint.initsol\n\t\tparams = getattr(initsol,varname)\n\t\tnvars = len(self.ss.variables) # num of variables\n\n\t\tif varname in ('alpha','beta') : \n\t\t\tself.initsol[varname] = np.ones(nvars)\n\t\t\tkeys = params.keys()\n\t\t\tself.initsol[varname][:] = params['defaultInitialValue']\n\t\t\tfor key in keys : \n\t\t\t\tif re.match(varname+'_\\d+',key)\t:\n\t\t\t\t\tidx = int(key.split('_')[1])\n\t\t\t\t\tself.initsol[varname][idx-1] = params[key]\n\t\telif varname in ('g','h') :\n\t\t\tself.initsol[varname] = np.ones([nvars,nvars])\n\t\t\tkeys = params.keys()\n\t\t\tself.initsol[varname][:] = params['defaultInitialValue']\n\t\t\tfor key in keys : \n\t\t\t\tif re.match(varname+'_\\d+_\\d+',key)\t:\n\t\t\t\t\tidr,idc = map(int,(key.split('_')[1:3]))\n\t\t\t\t\tself.initsol[varname][idr-1][idc-1] = params[key]\n\t\t\n\t\telse :\n\t\t\tlogging.error(\"Unrecognized varname %s quitting..\" \\\n\t\t\t%(varname))\n\t\t\tsys.exit(1)", "def test_add_var_desc():\n v = dd.vars['WGT']\n \n assert add_var_desc('Housing ', dd, 'WGT') == 'WGT'\n assert v.vardesc == 'Housing'\n\n \"\"\" Test add second line \"\"\"\n assert add_var_desc(' Unit Weight', dd, 'WGT') == 'WGT'\n assert v.vardesc == 'Housing Unit Weight'\n\n \"\"\" Test prevention against duplication \"\"\"\n assert add_var_desc('Housing Unit Weight', dd, 'WGT') == 'WGT'\n assert add_var_desc('HousingUnit Weight', dd, 'WGT') == 'WGT'\n\n assert add_var_desc('Person', dd, 'PWGT') == None", "def prIn(nm, form=\"\", *args):\n # variables set in \"prSet()\"\n global lPr\n\n # init\n if not 'lPr' in globals():\n prSet(3)\n\n # print\n if form == \"\":\n pr('%s', nm)\n else:\n pr('%s: ' + form, nm, *args)\n\n # self add\n lPr = lPr + 1", "def test_get_call_name2(self):\n tree = ast.parse(\"a.b.c.d(x,y)\").body[0].value\n\n name = b_utils.get_call_name(tree, {\"a\": \"alias.x.y\"})\n self.assertEqual(\"alias.x.y.b.c.d\", name)\n\n name = b_utils.get_call_name(tree, {\"a.b\": \"alias.x.y\"})\n self.assertEqual(\"alias.x.y.c.d\", name)\n\n name = b_utils.get_call_name(tree, {\"a.b.c.d\": \"alias.x.y\"})\n self.assertEqual(\"alias.x.y\", name)", "def _make_simple_comb_label(chain_dict):\n\n cps = chain_dict['chainParts']\n if not (_select_simple_chainparts(cps)):\n raise NotImplementedError(\n 'chain fails substring selection: not \"simple\": %s' % (\n chain_dict['chainName']))\n \n simple_strs = []\n\n for cp in cps:\n print(cp)\n simple_strs.append(_make_simple_label([cp]))\n\n label = 'combgen([(%d)]' % len(cps)\n for s in simple_strs:\n label += ' %s ' % s\n label += ')'\n return label", "def test_expand_var(self):\n self.assertEqual(\"test\",\n grammar._EXPAND_VAR.parseString(\"$test\").name)", "def _make_partitionsTest_label(chain_parts):\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario == 'partitionsTest'\n\n \n\n return \"\"\"\n partgen(\n [(20et, 0eta320)]\n \n simple([(40et, 0eta320) (50et, 0eta320)])\n simple([(35et, 0eta240) (55et, 0eta240)])\n )\"\"\"", "def standard_name_to_long_name(prop_dict, context=None):\n########################################################################\n # We assume that standar_name has been checked for validity\n # Make the first char uppercase and replace each underscore with a space\n if 'standard_name' in prop_dict:\n standard_name = prop_dict['standard_name']\n if len(standard_name) > 0:\n long_name = standard_name[0].upper() + re.sub(\"_\", \" \", standard_name[1:])\n else:\n long_name = ''\n # End if\n # Next, substitute a decimal point for the p in [:digit]p[:digit]\n match = real_subst_re.match(long_name)\n while match is not None:\n long_name = match.group(1) + '.' + match.group(2)\n match = real_subst_re.match(long_name)\n # End while\n else:\n long_name = ''\n if 'local_name' in prop_dict:\n lname = ' {}'.format(prop_dict['local_name'])\n else:\n lname = ''\n # End if\n ctxt = context_string(context)\n raise CCPPError('No standard name to convert{} to long name{}'.format(lname, ctxt))\n # End if\n return long_name", "def _linab(arg, symbol):\n arg = arg.expand()\n ind, dep = arg.as_independent(symbol)\n if not arg.is_Add:\n b = 0\n a, x = ind, dep\n else:\n b = ind\n a, x = separatevars(dep).as_independent(symbol, as_Add=False)\n if x.could_extract_minus_sign():\n a = -a\n x = -x\n return a, b, x", "def __splitVariableNames(self, name, indexes):\n if name == 'x':\n var = self.xCoordinates[indexes[0]][indexes[1]]\n elif name == 'y':\n var = self.yCoordinates[indexes[0]][indexes[1]]\n elif name == 'z':\n var = self.zCoordinates[indexes[0]][indexes[1]]\n elif name == 'colorMap':\n var = self.colorMapCoordinates[indexes[0]][indexes[1]]\n elif name == 'clusterLabels':\n var = self.clusterLabels[indexes[0]][indexes[1]]\n elif name == 'mixtureLabels':\n var = self.mixtureLabels[indexes[0]][indexes[1]]\n elif name == 'mixtureMeans':\n var = self.mixtureMeans[indexes[0]][indexes[1]]\n elif name == 'mixtureCovars':\n var = self.mixtureCovars[indexes[0]][indexes[1]]\n\n # The variable can contain brackets {} (when the symbol \"|\" is present in\n # the variable name), e.g.:\n # DataName|Input|{RavenAuxiliary|variableName|initial_value}\n # or it can look like:\n # DataName|Input|variableName\n\n if var is not None:\n result = [None] * 3\n if '|input|' in var.lower():\n match = re.search(r\"(\\|input\\|)\", var.lower())\n elif '|output|' in var.lower():\n match = re.search(r\"(\\|output\\|)\", var.lower())\n else:\n self.raiseAnError(IOError, f'In Plot {self.name}, the input coordinate {name} has not specified an \"Input\" or \"Output\" (case insensitive). e.g., sourceName|Input|aVariable) in {var}')\n startLoc, endLoc = match.start(), match.end()\n result = [var[:startLoc].strip(), var[startLoc+1:endLoc-1].strip(), var[endLoc:].strip()]\n if '{' in result[-1] and '}' in result[-1]:\n locLower, locUpper = result[-1].find(\"{\"), result[-1].rfind(\"}\")\n result[-1] = result[-1][locLower + 1:locUpper].strip()\n else:\n result = None\n\n return result", "def r_4(comps: 'list(Compound)', is_input):\r\n react: str\r\n\r\n if is_input:\r\n iSiMe = Compound(\"Al\")\r\n if len(comps) == 1:\r\n if \"iSi\" in comps[0].comp_type:\r\n iSiMe = comps[0]\r\n else:\r\n if \"iSi\" in comps[0].comp_type:\r\n iSiMe = comps[0]\r\n else:\r\n iSiMe = comps[1]\r\n\r\n me = list(iSiMe.formula.consist.keys())[0].name\r\n if me in [\"Au\", \"Pt\"]:\r\n return \"\"\r\n me_oxs = get_me_oxs(me)\r\n if me_oxs == 0:\r\n return \"\"\r\n if is_me_activer(\"H\", 1, me, me_oxs):\r\n return \"\"\r\n\r\n iSaNo = Compound(iSaNo_create(me, me_oxs, \"NO3\", 1))\r\n\r\n react = f\"{iSiMe} + HNO3 -> {iSaNo} + N2 + H2O\"\r\n else:\r\n iSaNo = Compound(\"Al(NO3)3\")\r\n if len(comps) == 1:\r\n if \"iSaNo\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n elif len(comps) == 2:\r\n if \"iSaNo\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n elif \"iSaNo\" in comps[1].comp_type:\r\n iSaNo = comps[1]\r\n else:\r\n if \"iSaNo\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n elif \"iSaNo\" in comps[1].comp_type:\r\n iSaNo = comps[1]\r\n else:\r\n iSaNo = comps[2]\r\n\r\n ((me, me_oxs), (an, an_oxs)) = iSa_oxs(iSaNo.formula)\r\n if an != \"NO3\":\r\n return \"\"\r\n if me in [\"Au\", \"Pt\"]:\r\n return \"\"\r\n if is_me_activer(\"H\", 1, me, me_oxs):\r\n return \"\"\r\n\r\n iSiMe = Compound(simple(me))\r\n\r\n react = f\"{iSiMe} + HNO3 -> {iSaNo} + N2 + H2O\"\r\n\r\n return Reaction(react)", "def _build_sub(self) -> str:\n return dedent(\n \"\"\"\n @SP\n M=M-1\n A=M\n D=M\n @SP\n M=M-1\n A=M\n M=M-D\n @SP\n M=M+1\n \"\"\"\n )", "def demo():\n # Create some nonterminals\n S, NP, VP, PP = nonterminals('S, NP, VP, PP')\n N, V, P, Det = nonterminals('N, V, P, Det')\n VP_slash_NP = VP/NP\n\n print 'Some nonterminals:', [S, NP, VP, PP, N, V, P, Det, VP/NP]\n print ' S.symbol() =>', `S.symbol()`\n print\n\n # Create some CFG Productions\n prods = [CFGProduction(S, [NP, VP]), CFGProduction(PP, [P, NP]),\n CFGProduction(NP, [Det, N]), CFGProduction(NP, [NP, PP]),\n CFGProduction(VP, [V, NP]), CFGProduction(VP, [VP, PP]),\n CFGProduction(Det, ['a']), CFGProduction(Det, ['the']),\n CFGProduction(N, ['dog']), CFGProduction(N, ['cat']), \n CFGProduction(V, ['chased']), CFGProduction(V, ['sat']),\n CFGProduction(P, ['on']), CFGProduction(P, ['in'])]\n\n prod = prods[2]\n print 'A CFG production:', `prod`\n print ' prod.lhs() =>', `prod.lhs()`\n print ' prod.rhs() =>', `prod.rhs()`\n print\n\n # Create and print a CFG\n cfg = CFG(S, prods)\n print 'A CFG grammar:', `cfg`\n print ' cfg.start() =>', `cfg.start()`\n print ' cfg.productions() =>',\n # Use string.replace(...) is to line-wrap the output.\n print `cfg.productions()`.replace(',', ',\\n'+' '*25)\n print\n\n # Create some probabilistic CFG Productions\n A, B, C = nonterminals('A, B, C')\n pcfg_prods = [PCFGProduction(A, [B, B], prob=0.3),\n PCFGProduction(A, [C, B, C], prob=0.7),\n PCFGProduction(B, [B, 'b'], prob=0.5),\n PCFGProduction(B, [C], prob=0.5),\n PCFGProduction(C, ['a'], prob=0.1),\n PCFGProduction(C, ['b'], prob=0.9)] \n \n pcfg_prod = pcfg_prods[2]\n print 'A PCFG production:', `pcfg_prod`\n print ' pcfg_prod.lhs() =>', `pcfg_prod.lhs()`\n print ' pcfg_prod.rhs() =>', `pcfg_prod.rhs()`\n print ' pcfg_prod.prob() =>', `pcfg_prod.prob()`\n print\n\n # Create and print a PCFG\n pcfg = PCFG(S, pcfg_prods)\n print 'A PCFG grammar:', `pcfg`\n print ' pcfg.start() =>', `pcfg.start()`\n print ' pcfg.productions() =>',\n # Use string.replace(...) is to line-wrap the output.\n print `pcfg.productions()`.replace(',', ',\\n'+' '*26)\n print", "def variable_string(self, name):\n return \"$(\" + name + \")\"", "def examples():\r\n\r\n # get some data for a single name\r\n x = blp.bdp('BDEV LN Equity', 'px_last')\r\n print(x)\r\n print('the type of x', type(x))\r\n print('the value of x:', x.iloc[0]['px_last'])\r\n\r\n\r\n # get multiple data for a single name\r\n y = blp.bdp('BDEV LN Equity', flds=['px_bid', 'px_ask'])\r\n print(y)\r\n\r\n\r\n # get multiple data for multiple names\r\n z = blp.bdp(tickers=['BDEV LN Equity', 'BARC LN Equity'], flds=['px_bid', 'px_ask'])\r\n print(z)\r\n print('here is the bdev ask >>>', z.loc['BDEV LN Equity','px_ask'])", "def part(expr,address):\n for num in address:\n expr = expr.args[num]\n return expr", "def test_variablepresentations_get(self):\n pass", "def test():\n assert str(Polynomial(0, 1, 0, -1, 4, -2, 0, 1, 3, 0)) == \"3x^8 + x^7 - 2x^5 + 4x^4 - x^3 + x\"\n assert str(Polynomial([-5, 1, 0, -1, 4, -2, 0, 1, 3, 0])) == \"3x^8 + x^7 - 2x^5 + 4x^4 - x^3 + x - 5\"\n assert str(Polynomial(x7=1, x4=4, x8=3, x9=0, x0=0, x5=-2, x3=-1, x1=1)) == \"3x^8 + x^7 - 2x^5 + 4x^4 - x^3 + x\"\n assert str(Polynomial(x2=0)) == \"0\"\n assert str(Polynomial(x0=0)) == \"0\"\n assert Polynomial(x0=2, x1=0, x3=0, x2=3) == Polynomial(2, 0, 3)\n assert Polynomial(x2=0) == Polynomial(x0=0)\n assert str(Polynomial(x0=1) + Polynomial(x1=1)) == \"x + 1\"\n assert str(Polynomial([-1, 1, 1, 0]) + Polynomial(1, -1, 1)) == \"2x^2\"\n pol1 = Polynomial(x2=3, x0=1)\n pol2 = Polynomial(x1=1, x3=0)\n assert str(pol1 + pol2) == \"3x^2 + x + 1\"\n assert str(pol1 + pol2) == \"3x^2 + x + 1\"\n assert str(Polynomial(x0=-1, x1=1) ** 1) == \"x - 1\"\n assert str(Polynomial(x0=-1, x1=1) ** 2) == \"x^2 - 2x + 1\"\n pol3 = Polynomial(x0=-1, x1=1)\n assert str(pol3 ** 4) == \"x^4 - 4x^3 + 6x^2 - 4x + 1\"\n assert str(pol3 ** 4) == \"x^4 - 4x^3 + 6x^2 - 4x + 1\"\n assert str(Polynomial(x0=2).derivative()) == \"0\"\n assert str(Polynomial(x3=2, x1=3, x0=2).derivative()) == \"6x^2 + 3\"\n assert str(Polynomial(x3=2, x1=3, x0=2).derivative().derivative()) == \"12x\"\n pol4 = Polynomial(x3=2, x1=3, x0=2)\n assert str(pol4.derivative()) == \"6x^2 + 3\"\n assert str(pol4.derivative()) == \"6x^2 + 3\"\n assert Polynomial(-2, 3, 4, -5).at_value(0) == -2\n assert Polynomial(x2=3, x0=-1, x1=-2).at_value(3) == 20\n assert Polynomial(x2=3, x0=-1, x1=-2).at_value(3, 5) == 44\n pol5 = Polynomial([1, 0, -2])\n assert pol5.at_value(-2.4) == -10.52\n assert pol5.at_value(-2.4) == -10.52\n assert pol5.at_value(-1, 3.6) == -23.92\n assert pol5.at_value(-1, 3.6) == -23.92", "def vars_formula ( self , formula , vars , name = '' , title = '' ) :\n\n assert vars and len ( vars ) , 'Variables must be specified!'\n\n vvars = []\n for v in vars :\n if isinstance ( v , ROOT.RooAbsArg ) :\n vvars.append ( v )\n elif isinstance ( v , string_types ) :\n try :\n vv = self.parameter ( v )\n vvars.append ( vv ) \n except :\n raise TypeError ( \"Unknown parameter name %s\" % v)\n else :\n raise TypeError( \"Unknown parameter type %s/%s\" % ( v , type ( v ) ) ) \n\n vlst = ROOT.RooArgList()\n for v in vvars : vlst.add ( v )\n\n has_at = '@' in formula\n has_percent = '%' in formula\n import re\n has_index = re.search ( r'\\[( *)(?P<degree>\\d*)( *)\\]' , formula )\n has_format1 = re.search ( r'\\{( *)(?P<degree>\\d*)( *)\\}' , formula )\n has_format2 = re.search ( r'\\{( *)(?P<degree>\\w*)( *)\\}' , formula )\n\n formula_ = formula \n if has_at : pass \n elif has_index : pass \n elif has_percent : \n vnames = tuple ( [ p.name for p in vlst ] )\n formula_ = formula % vnames\n elif has_format1 : \n vnames = tuple ( [ p.name for p in vlst ] )\n formula_ = formula.format ( *vnames ) \n elif has_format2 :\n kw = {}\n for p in vlist : kw [ p.name ] = p.name\n formula_ = formula.format ( *kw )\n \n name = name if name else 'Formula_%s ' % self.name \n title = title if title else 'Formula:%s/%s' % ( formula , self.name )\n \n rfv = ROOT.RooFormulaVar ( self.var_name ( name ) , title , formula_ , vlst )\n \n self.aux_keep.append ( vlst )\n self.aux_keep.append ( rvf )\n \n return rfv", "def enrich_varname(varname):\r\n greek = (\"alpha beta gamma delta epsilon varepsilon zeta eta theta \"\r\n \"vartheta iota kappa lambda mu nu xi pi rho sigma tau upsilon \"\r\n \"phi varphi chi psi omega\").split()\r\n\r\n # add capital greek letters\r\n greek += [x.capitalize() for x in greek]\r\n\r\n # add hbar for QM\r\n greek.append('hbar')\r\n\r\n # add infinity\r\n greek.append('infty')\r\n\r\n if varname in greek:\r\n return ur\"\\{letter}\".format(letter=varname)\r\n else:\r\n return varname.replace(\"_\", r\"\\_\")", "def get_basis(n):\n\treturn ' '.join('e{}'.format(i) for i in range(n))", "def solution(s):", "def pulp_smash():", "def Get_LonghurstProvinceName4Num(input):\n LonghurstProvinceDict = {\n 'ALSK': 'AlaskaDownwellingCoastalProvince',\n 'ANTA': 'AntarcticProvince',\n 'APLR': 'AustralPolarProvince',\n 'ARAB': 'NWArabianUpwellingProvince',\n 'ARCH': 'ArchipelagicDeepBasinsProvince',\n 'ARCT': 'AtlanticArcticProvince',\n 'AUSE': 'EastAustralianCoastalProvince',\n 'AUSW': 'AustraliaIndonesiaCoastalProvince',\n 'BENG': 'BenguelaCurrentCoastalProvince',\n 'BERS': 'N.PacificEpicontinentalProvince',\n 'BPLR': 'BorealPolarProvince(POLR)',\n 'BRAZ': 'BrazilCurrentCoastalProvince',\n 'CAMR': 'CentralAmericanCoastalProvince',\n 'CARB': 'CaribbeanProvince',\n 'CCAL': 'CaliforniaUpwellingCoastalProvince',\n 'CHIL': 'ChilePeruCurrentCoastalProvince',\n 'CHIN': 'ChinaSeaCoastalProvince',\n 'CHSB': 'CheasapeakeBayProvince',\n 'CNRY': 'CanaryCoastalProvince(EACB)',\n 'EAFR': 'E.AfricaCoastalProvince',\n 'ETRA': 'EasternTropicalAtlanticProvince',\n 'FKLD': 'SWAtlanticShelvesProvince',\n 'GFST': 'GulfStreamProvince',\n 'GUIA': 'GuianasCoastalProvince',\n 'GUIN': 'GuineaCurrentCoastalProvince',\n 'INDE': 'E.IndiaCoastalProvince',\n 'INDW': 'W.IndiaCoastalProvince',\n 'ISSG': 'IndianS.SubtropicalGyreProvince',\n 'KURO': 'KuroshioCurrentProvince',\n 'LAKE': 'CaspianSea,AralSea',\n 'MEDI': 'MediterraneanSea,BlackSeaProvince',\n 'MONS': 'IndianMonsoonGyresProvince',\n 'NADR': 'N.AtlanticDriftProvince(WWDR)',\n 'NASE': 'N.AtlanticSubtropicalGyralProvince(East)(STGE)',\n 'NASW': 'N.AtlanticSubtropicalGyralProvince(West)(STGW)',\n 'NATR': 'N.AtlanticTropicalGyralProvince(TRPG)',\n 'NECS': 'NEAtlanticShelvesProvince',\n 'NEWZ': 'NewZealandCoastalProvince',\n 'NPPF': 'N.PacificPolarFrontProvince',\n 'NPSE': 'N.PacificSubtropicalGyreProvince(East)',\n 'NPSW': 'N.PacificSubtropicalGyreProvince(West)',\n 'NPTG': 'N.PacificTropicalGyreProvince',\n 'NWCS': 'NWAtlanticShelvesProvince',\n 'OCAL': 'OffshoreCaliforniaCurrentProvince',\n 'PEQD': 'PacificEquatorialDivergenceProvince',\n 'PNEC': 'N.PacificEquatorialCountercurrentProvince',\n 'PSAE': 'PacificSubarcticGyresProvince(East)',\n 'PSAW': 'PacificSubarcticGyresProvince(West)',\n 'REDS': 'RedSea,PersianGulfProvince',\n 'SANT': 'SubantarcticProvince',\n 'SARC': 'AtlanticSubarcticProvince',\n 'SATL': 'SouthAtlanticGyralProvince(SATG)',\n 'SPSG': 'S.PacificSubtropicalGyreProvince',\n 'SSTC': 'S.SubtropicalConvergenceProvince',\n 'SUND': 'SundaArafuraShelvesProvince',\n 'TASM': 'TasmanSeaProvince',\n 'WARM': 'W.PacificWarmPoolProvince',\n 'WTRA': 'WesternTropicalAtlanticProvince'\n }\n return LonghurstProvinceDict[input]", "def change_variables((a,b,c,d), (n,r,m)): \n return ( n*a**2 + r*a*b + m*b**2, 2*(n*a*c + m*b*d) + r*(a*d + c*b), \\\n n*c**2 + r*c*d + m*d**2 )", "def createkey(*args): # {{{2\n return '-'.join(map(simplifyname, args))", "def SSt_theo_old(D, k):\n\ta1b = k[\"A1B1\"]\n\tba1 = k[\"B1A1\"]\n\tca1 = k[\"C1A1\"]\n\tcb = k[\"B1C1\"]\n\tnum = a1b*ba1*ca1*ca1 + ba1*ba1*ca1*ca1 + 3*a1b*ba1*ca1*cb + 2*ba1*ba1*ca1*cb + \\\n\t\t\ta1b*ca1*ca1*cb + 2*ba1*ca1*ca1*cb + 2*a1b*ba1*cb*cb + ba1*ba1*cb*cb + \\\n\t\t\t2*a1b*ca1*cb*cb + 2*ba1*ca1*cb*cb + ca1*ca1*cb*cb + \\\n\t\t\t\\\n\t\t\t(a1b*ba1*ba1*ca1 + ba1*ba1*ba1*ca1 + a1b*ba1*ca1*ca1 + a1b*ca1*ca1*ca1 + \\\n\t\t\t3*ba1*ca1*ca1*ca1 + 2*a1b*ba1*ba1*cb + ba1*ba1*ba1*cb + 2*a1b*ba1*ca1*cb + \\\n\t\t\t3*ba1*ba1*ca1*cb + 4*a1b*ca1*ca1*cb + 5*ba1*ca1*ca1*cb + 3*ca1*ca1*ca1*cb + \\\n\t\t\t2*a1b*ba1*cb*cb + 2*ba1*ba1*cb*cb + 2*a1b*ca1*cb*cb + 4*ba1*ca1*cb*cb + \\\n\t\t\t2*ca1*ca1*cb*cb) * D + \\\n\t\t\t\\\n\t\t\t(a1b*ba1*ba1*ca1 + a1b*ba1*ca1*ca1 + 4*ba1*ba1*ca1*ca1 + a1b*ca1*ca1*ca1 + \\\n\t\t\t2*ca1*ca1*ca1*ca1 + ba1*ba1*ba1*cb + 3*a1b*ba1*ca1*cb + 3*ba1*ba1*ca1*cb + \\\n\t\t\ta1b*ca1*ca1*cb + 5*ba1*ca1*ca1*cb + 3*ca1*ca1*ca1*cb + ba1*ba1*cb*cb + \\\n\t\t\t2*ba1*ca1*cb*cb + ca1*ca1*cb*cb) * D*D + \\\n\t\t\t\\\n\t\t\t(ba1*ba1*ba1*ca1 + a1b*ba1*ca1*ca1 + 3*ba1*ca1*ca1*ca1 + 2*ba1*ba1*ca1*cb + \\\n\t\t\t2*ba1*ca1*ca1*cb) * D*D*D + \\\n\t\t\t\\\n\t\t\tba1*ba1*ca1*ca1 * D*D*D*D\n\t##\n\tden = a1b*(ba1*ba1*ca1*ca1 + 2*ba1*ba1*ca1*cb + 2*ba1*ca1*ca1*cb + ba1*ba1*cb*cb + \n\t\t\t2*ba1*ca1*cb*cb + ca1*ca1*cb*cb) + \\\n\t\t\t\\\n\t\t\ta1b*(4*ba1*ca1*ca1*ca1 + 2*ba1*ba1*ca1*cb + 6*ba1*ca1*ca1*cb + 4*ca1*ca1*ca1*cb + \n\t\t\t2*ba1*ba1*cb*cb + 4*ba1*ca1*cb*cb + 2*ca1*ca1*cb*cb) * D + \\\n\t\t\t\\\n\t\t\ta1b*(2*ba1*ba1*ca1*ca1 + 4*ca1*ca1*ca1*ca1 + 2*ba1*ba1*ca1*cb + 6*ba1*ca1*ca1*cb + \n\t\t\t4*ca1*ca1*ca1*cb + ba1*ba1*cb*cb + 2*ba1*ca1*cb*cb + ca1*ca1*cb*cb) * D*D + \\\n\t\t\t\\\n\t\t\ta1b*(4*ba1*ca1*ca1*ca1 + 2*ba1*ba1*ca1*cb + 2*ba1*ca1*ca1*cb) * D*D*D + \\\n\t\t\t\\\n\t\t\ta1b*ba1*ba1*ca1*ca1 * D*D*D*D\n\t##\n\ttau = num/den\n\t##\n\treturn tau*np.log(20)", "def get_strains(names, q_dof):\n strain_functions = []\n for n, this_dof in zip(names, q_dof):\n check_qdof(n, this_dof)\n if n == 'linear_helix':\n strain_functions.append(linear_helix_strain)\n elif n == 'pure_helix':\n strain_functions.append(pure_helix_strain)\n elif n == 'torsion_helix':\n strain_functions.append(torsion_helix_strain)\n elif n == 'torsion_linear_helix':\n strain_functions.append(torsion_linear_helix_strain)\n elif n == 'quadratic':\n strain_functions.append(quadratic_strain)\n elif n == 'linear':\n strain_functions.append(linear_strain)\n elif n == 'constant':\n strain_functions.append(constant_strain)\n elif n == 'full':\n strain_functions.append(full_strain)\n else:\n print(f'{n} is not a defined strain base.')\n return strain_functions", "def address():\n # We start with generating the street name. For this we choose\n # between the most common prefixes and our own prefixes\n prefix = dice.randint(1, 100)\n if prefix <= 10: # 10%\n prefix = \"Haupt\"\n elif prefix <= 18: # 8%\n prefix = \"Schul\"\n elif prefix <= 25: # 7%\n prefix = \"Garten\"\n elif prefix <= 32: # 7%\n prefix = \"Dorf\"\n elif prefix <= 39: # 7%\n prefix = \"Bahnhof\"\n elif prefix <= 46: # 7%\n prefix = \"Wiesen\"\n elif prefix <= 52: # 6%\n prefix = \"Berg\"\n elif prefix <= 56: # 4%\n prefix = \"Kirch\"\n elif prefix <= 60: # 4%\n prefix = \"Wald\"\n elif prefix <= 64: # 4%\n prefix = \"Ring\"\n else:\n prefix = dice.choice(names.prefix)\n\n # Now we can add the suffix\n suffix = dice.randint(1, 100)\n if suffix <= 78:\n suffix = \"straße\"\n elif suffix <= 96:\n suffix = \"weg\"\n elif suffix <= 98:\n suffix = \"allee\"\n elif suffix == 99:\n suffix = \"ring\"\n elif suffix == 100:\n suffix = \"platz\"\n\n # When we have a city name as prefix, we need to capitalize the\n # suffix since it will be two words\n if prefix[-1] == \" \":\n suffix = suffix.capitalize()\n\n # Now we can add them together\n street = prefix + suffix\n\n # We need a house number as well. In Germany most numbers have\n # between one and four digits, so we will use this as base. Lower\n # numbers are more common, so we'll give it a 10% probability of\n # using 3 digits and 1% of using 4 digits\n digits = dice.randint(1, 100)\n if digits == 100:\n house_number = str(dice.randint(1000, 9999))\n elif digits >= 90:\n house_number = str(dice.randint(100, 999))\n else:\n house_number = str(dice.randint(1, 99))\n address_full = street + \" \" + house_number\n return address_full", "def __init__(self,name):\n self._name = name\n self._supplies = []\n self.generateSupplies()", "def create_extended_name(y: str, p: str) -> str:\n final_letter = y[-1]\n if final_letter == \"e\":\n extended_name = y + \"x\" + p\n elif final_letter in [\"a\", \"i\", \"o\", \"u\"]:\n extended_name = y[:-1] + \"ex\" + p\n elif final_letter == \"x\":\n if y[-2] == \"e\":\n extended_name = y + p\n else:\n extended_name = y + \"ex\" + p\n return extended_name", "def add_proper_name (w,lx):\n if ('A' <= w[0] and w[0] <= 'Z'):\n lx.add(w,'P')\n return ''\n else:\n return (w + \" isn't a proper name\")", "def add_proper_name (w,lx):\n if ('A' <= w[0] and w[0] <= 'Z'):\n lx.add(w,'P')\n return ''\n else:\n return (w + \" isn't a proper name\")", "def _make_ht_label(chain_parts):\n\n assert len(chain_parts) == 1, '_make_ht_label, no. of chain parts != 1'\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario.startswith('HT'), '_make_ht_label(): scenario does not start with HT'\n\n arg_res = [\n re.compile(r'^(?P<lo>\\d*)(?P<key>ht)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>et)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>eta)(?P<hi>\\d*)$'),\n ]\n\n defaults = {\n 'ht': ('0', 'inf'),\n 'et': ('0', 'inf'),\n 'eta': ('0', 'inf'),\n }\n\n\n args = _args_from_scenario(scenario)\n argvals = {}\n nargs = len(args)\n assert len(args) <= len(arg_res), 'bad num of args %d, expected < %d' % (len(args),\n len(arg_res))\n\n # obtain argument values frrom scenario\n while args:\n arg = args.pop()\n for r in arg_res:\n m = r.match(arg)\n if m is not None:\n arg_res.remove(r)\n gd = m.groupdict()\n key = gd['key']\n\n try:\n lo = float(gd['lo'])\n except ValueError:\n lo = float(defaults[key][0])\n argvals[key+'lo'] = lo \n try:\n hi = float(gd['hi'])\n except ValueError:\n hi = float(defaults[key][1])\n argvals[key+'hi'] = hi\n\n print (argvals)\n assert len(argvals) == 2*nargs, 'no of args: %d, expected %d' % (len(argvals), 2*nargs)\n\n print ('sent 100')\n result = \"\"\"\n ht([(%(htlo).0fht) \n (%(etlo).0fet)\n (%(etalo).0feta%(etahi).0f)\n ])\"\"\" % argvals\n print (result)\n return result", "def __init__(self):\n\n # names of atoms that make up relevant segements of each chain\n self.chains = {'a': {'C': 'C1', 'C1': 'C2', 'C2': 'C3', 'C3': 'C4', 'C4': 'C5', 'H': 'H1', 'H1': 'H2',\n 'H2': 'H3', 'H3': 'H4', 'H4': 'H5'},\n 'b': {'C45': 'C1', 'C44': 'C2', 'C43': 'C3', 'C42': 'C4', 'C41': 'C5', 'H81': 'H1', 'H80': 'H2',\n 'H79': 'H3', 'H78': 'H4', 'H77': 'H5'}\n }\n\n self.nchains = len(list(self.chains.keys()))\n\n self.chain_numbers = {'a': 0, 'b': 1} # used to number chains\n\n # self.initial_types = {'C1': 'c2', 'C2': 'ce', 'C3': 'ce', 'C4': 'c2', 'H1': 'ha', 'H2': 'ha', 'H3': 'ha',\n # 'H4': 'ha', 'H5': 'ha'}\n\n # all indices numbered from 0. D1, D2, ... correspond to dummies attached to C1, C2, ... respectively\n self.indices = {'a': {'C1': 0, 'C2': 1, 'C3': 2, 'C4': 3, 'C5': 4, 'H1': 52, 'H2': 53, 'H3': 54, 'H4': 55,\n 'H5': 56, 'D1': 136, 'D2': 137, 'D3': 138, 'D4': 139},\n 'b': {'C1': 49, 'C2': 48, 'C3': 47, 'C4': 46, 'C5': 45, 'H1': 133, 'H2': 132, 'H3': 131,\n 'H4': 130, 'H5': 129, 'D1': 140, 'D2': 141, 'D3': 142, 'D4': 143}\n }\n\n self.dummy_connectivity = {'a': {'C': 'D1', 'C1': 'D2', 'C2': 'D3', 'C3': 'D4'},\n 'b': {'C45': 'D1', 'C44': 'D2', 'C43': 'D3', 'C42': 'D4'}}\n\n self.hydrogen_connectivity = {'C': ['H1', 'H2'], 'C1': ['H3'], 'C2': ['H4'], 'C3': ['H5'],\n 'C45': ['H1', 'H2'], 'C44': ['H3'], 'C43': ['H4'], 'C42': ['H5']}\n\n self.dummy_mass = 1.008 # mass of hydrogen\n\n # write these in order of priority\n # for efficiency, don't repeat things. For example self.carbons['C1']: self.carbons['C2'] is the same as\n # self.carbons['C2']: self.carbons['C1']. Otherwise, computational expense goes up and a new reaction has\n # to be defined below.\n self.carbons = {'C1': ['C', 'C45'], 'C2': ['C1', 'C44'], 'C3': ['C2', 'C43'], 'C4': ['C3', 'C42']}\n self.bonds_with = [[self.carbons['C1'], self.carbons['C2']]]\n\n # define which improper dihedrals to remove -- written in same order as .itp file!!!\n # note that the order of the atoms may be different for each chain\n # NOTE: C3 not tested\n self.impropers = {'a': {'C1': ['H2', 'C1', 'H1', 'C2'], 'C2': ['C1', 'C3', 'C2', 'H3'],\n 'C3': ['C4', 'C2', 'C3', 'H4'], 'C4': ['C5', 'C3', 'C4', 'H5']},\n 'b': {'C1': ['C2', 'H2', 'C1', 'H1'], 'C2': ['C1', 'C3', 'C2', 'H3'],\n 'C3': ['C4', 'C2', 'C3', 'H4'], 'C4': ['C5', 'C3', 'C4', 'H5']}}", "def construct_name(p, prefix):\n name = prefix\n for key in p.keys():\n if (type(p[key]) != tuple) and (type(p[key]) != list):\n name = name + '_' + str(key) + '-' + str(p[key])\n else:\n name = name + '_' + str(key) + '-' + str(p[key][0])\n return name", "def roo_name ( prefix = 'roo_' , suffix = '' ) :\n regname = ROOT.RooNameReg.instance()\n name = prefix + suffix\n MakeVar.__numnames += 1 \n while name in MakeVar.__pdf_names or name in MakeVar.__var_names or regname.known ( name ) or not name :\n name = prefix + ''.join ( ( random.choice ( ascii_letters ) for i in range ( 6 ) ) ) + suffix \n MakeVar.__numnames += 1 \n return name", "def genPrefixAntString(self,estimatedVar,prefix=\"_\"):\n self.prefixAntString = self.antString\n for name in estimatedVar:\n self.prefixAntString = replaceVariable(self.prefixAntString,\n name,prefix+name)", "def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)", "def expand_abbrevs(name):\n key = name.upper()\n for abbrev, word in ABBREVS.iteritems():\n key = re.sub(abbrev, word, key)\n \n #Remove (.*) from the street name\n key = re.sub(r'\\(.*?(:?\\)|$)', '', key)\n \n #Unify names\n key = NUMBER_IN_NAMES_REGEX.sub(lambda i: i.group(1) + \" \", key)\n key = re.sub(u\"Ё\", u\"Е\", key)\n key = re.sub(u\"[\\\"'«»№]\", u\" \", key)\n\n # remove \"им\" prefix\n key = re.sub(ur'[^\\s]ИМ[\\.\\s]+', u' ', key)\n\n #Change name parts order\n words = key.split(r\" \")\n words.sort()\n key = \" \".join(words)\n\n key = re.sub(u\"\\s+\", u\" \", key).strip()\n\n logging.debug(\"Street name %s was converted to %s\" % (name, key))\n \n return key", "def dot_name(number):\n\tif number > 0:\n\t\treturn \"P {}\".format(number)\n\telse:\n\t\treturn \"O {}\".format(-number)", "def main():\n\n rules, evolutions = [int(i) for i in input().strip().split()]\n\n rule = {}\n for _ in range(rules):\n start, finish = input().strip().split(' -> ')\n rule[start] = finish\n\n print(lindenmayor(rule, evolutions, input().strip()))", "def gen_name():\n return choice(globals()[choice(['oc_males', 'oc_females'])]) + ' ' + choice(na_surnames)", "def get_variables():\n policer_data = {\n \"policer_data\": {\n \"name\": \"policy1\",\n \"cir\": 450,\n \"cb\": 50000,\n \"rate-type\": \"kbps\",\n \"round-type\": \"closest\",\n \"type\": \"1r2c\",\n \"conform-action\": {\n \"meter-action-type\": \"meter-action-transmit\"\n },\n \"exceed-action\": {\n \"meter-action-type\": \"meter-action-drop\"\n }\n },\n \"policer_data_oper\": {\n \"name\": \"policy1\",\n \"cir\": 450,\n \"cb\": 50000,\n \"rate-type\": \"kbps\",\n \"round-type\": \"closest\",\n \"type\": \"1r2c\",\n \"conform-action\": {\n \"meter-action-type\": \"policer:meter-action-transmit\"\n },\n \"exceed-action\": {\n \"meter-action-type\": \"policer:meter-action-drop\"\n }\n },\n \"policer_data_2\": {\n \"name\": \"policy1\",\n \"cir\": 900,\n \"cb\": 50000,\n \"rate-type\": \"kbps\",\n \"round-type\": \"closest\",\n \"type\": \"1r2c\",\n \"conform-action\": {\n \"meter-action-type\": \"meter-action-transmit\"\n },\n \"exceed-action\": {\n \"meter-action-type\": \"meter-action-drop\"\n }\n },\n \"policer_data_oper_2\": {\n \"name\": \"policy1\",\n \"cir\": 900,\n \"cb\": 50000,\n \"rate-type\": \"kbps\",\n \"round-type\": \"closest\",\n \"type\": \"1r2c\",\n \"conform-action\": {\n \"meter-action-type\": \"policer:meter-action-transmit\"\n },\n \"exceed-action\": {\n \"meter-action-type\": \"policer:meter-action-drop\"\n }\n },\n \"policer_data_3\": {\n \"name\": \"policy1\",\n \"cir\": 100,\n \"eir\": 150,\n \"cb\": 200,\n \"eb\": 300,\n \"rate-type\": \"pps\",\n \"round-type\": \"closest\",\n \"type\": \"2r3c-2698\",\n \"conform-action\": {\n \"meter-action-type\": \"meter-action-transmit\"\n },\n \"exceed-action\": {\n \"meter-action-type\": \"meter-action-mark-dscp\",\n \"dscp\": \"AF22\"\n },\n \"violate-action\": {\n \"meter-action-type\": \"meter-action-drop\"\n },\n \"color-aware\": True\n },\n \"policer_data_oper_3\": {\n \"name\": \"policy1\",\n \"cir\": 100,\n \"eir\": 150,\n \"cb\": 200,\n \"eb\": 300,\n \"rate-type\": \"pps\",\n \"round-type\": \"closest\",\n \"type\": \"2r3c-2698\",\n \"conform-action\": {\n \"meter-action-type\": \"policer:meter-action-transmit\"\n },\n \"exceed-action\": {\n \"meter-action-type\": \"policer:meter-action-mark-dscp\",\n },\n \"violate-action\": {\n \"meter-action-type\": \"policer:meter-action-drop\"\n },\n \"color-aware\": True\n },\n\n \"acl_tables\": {\n # settings for policer tables\n \"hc_acl_table\": {\n \"name\": \"table0\",\n \"nbuckets\": 2,\n \"memory_size\": 1048576,\n \"skip_n_vectors\": 12,\n \"miss_next\": \"permit\",\n \"mask\": \"00:00:00:00:00:00:00:00:00:00:00:00:ff:ff:ff:ff\"\n },\n # setting for acl sessions\n \"hc_acl_session\": {\n \"match\": \"00:00:00:00:00:00:00:00:00:00:00:00:C0:A8:7A:01\",\n \"policer_hit_next\": \"policy1\",\n \"color_classfier\": \"exceed-color\",\n },\n \"hc_acl_session2\": {\n \"match\": \"00:00:00:00:00:00:00:00:00:00:00:00:C0:A8:7A:02\",\n \"policer_hit_next\": \"policy1\",\n \"color_classfier\": \"exceed-color\",\n },\n },\n }\n return policer_data", "def test_expand_refname():\n\n if 'lref' in os.environ: del os.environ['lref']\n assert expand_refname('lref$something_tds.fits') == 'something_tds.fits', \\\n \"Without lref, didn't return None\"\n\n os.environ['lref'] = '/grp/hst/cdbs/lref/'\n full_refname = os.path.join( os.environ['lref'], 'something_tds.fits')\n assert expand_refname( 'lref$something_tds.fits' ) == full_refname, \\\n \"With lref, didn't find the file\"\n\n assert expand_refname( '' ) == '', \"didn't return blank on blank\"", "def main():\n\n precomp = {}\n for op1 in '+-*/':\n for op3 in '+-*/':\n for op5 in '+-*/':\n text = '4 ' + ' 4 '.join([op1, op3, op5]) + ' 4'\n precomp[eval2(text)] = text\n\n for _ in range(int(input())):\n number = int(input())\n if number in precomp:\n print(precomp[number], '=', number)\n else:\n print('no solution')" ]
[ "0.5789567", "0.5612758", "0.56002617", "0.5582453", "0.5527549", "0.5454671", "0.5450963", "0.5440792", "0.5437476", "0.54072136", "0.5388782", "0.53822136", "0.53622717", "0.5361482", "0.53562933", "0.5325314", "0.5280684", "0.52761763", "0.5207379", "0.5178423", "0.5170255", "0.5102485", "0.5090245", "0.50771946", "0.5020704", "0.50157154", "0.50134194", "0.50103205", "0.5000996", "0.4957311", "0.49512044", "0.49428305", "0.49393606", "0.49388877", "0.49040487", "0.48884833", "0.48830682", "0.48788056", "0.48532405", "0.48525697", "0.48352826", "0.4825802", "0.48069966", "0.48047987", "0.47963157", "0.47919402", "0.47850004", "0.47734058", "0.47691607", "0.47669044", "0.47532", "0.47264925", "0.47189394", "0.47174117", "0.47174117", "0.4716557", "0.47078517", "0.46938398", "0.46934572", "0.46931386", "0.4683028", "0.4680845", "0.46772757", "0.4661052", "0.46560183", "0.46541843", "0.46496028", "0.46492556", "0.46473423", "0.46430612", "0.4642397", "0.46387324", "0.46321943", "0.4626283", "0.4625919", "0.4621162", "0.46201724", "0.46131116", "0.46078354", "0.46051222", "0.46030036", "0.4602352", "0.4601534", "0.45941603", "0.45912838", "0.45788938", "0.4577035", "0.4577035", "0.45765793", "0.4571976", "0.45677206", "0.45626277", "0.45523113", "0.4551816", "0.45500955", "0.4547831", "0.45466644", "0.45434204", "0.4543109", "0.45427343", "0.4541705" ]
0.0
-1
This function computes the fundamental matrix by computing the SVD of Ax = 0 ; 8point algorithm
def computeFundamentalMatrix(pts1, pts2): A = np.empty((8, 9)) for i in range(len(pts1)-1): x1 = pts1[i][0] x2 = pts2[i][0] y1 = pts1[i][1] y2 = pts2[i][1] A[i] = np.array([x1 * x2, x2 * y1, x2, y2 * x1, y2 * y1, y2, x1, y1, 1]) # Compute F matrix by evaluating SVD U, S, V = np.linalg.svd(A) F = V[-1].reshape(3, 3) # Constrain the F matrix to rank 2 U1, S1, V1 = np.linalg.svd(F) # print('Old S', S) # S[2] = 0 S2 = np.array([[S1[0], 0, 0], [0, S1[1], 0], [0, 0, 0]]) # print('New S', S) F = np.dot(np.dot(U1, S2), V1) return F
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def svd0(A):\n M,N = A.shape\n if M>N: return sla.svd(A, full_matrices=True)\n else: return sla.svd(A, full_matrices=False)", "def invert_L1_svd():", "def visualize_svd():", "def svd(self):\n U, s, Vh = la.svd(self)\n S = np.zeros(self.shape)\n np.fill_diagonal(S, s)\n return (Matrix(U), Matrix(S), Matrix(Vh))", "def svd(matrix):\n u = None\n s = None\n v = None\n ### YOUR CODE HERE\n (u,s,v)=np.linalg.svd(matrix)\n ### END YOUR CODE\n\n return u, s, v", "def estimateFundamentalMatrix(x1, x2):\n A = correspondence_matrix(x1, x2)\n # compute linear least square solution\n U, S, V = np.linalg.svd(A)\n F = V[-1].reshape(3, 3)\n\n # constrain F. Make rank 2 by zeroing out last singular value\n U, S, V = np.linalg.svd(F)\n S[-1] = 0\n \n F = np.dot(U, np.dot(np.diag(S), V))\n return F", "def svd(matrix):\n u = None\n s = None\n v = None\n ### YOUR CODE HERE\n pass\n ### END YOUR CODE\n\n return u, s, v", "def svd(self, X): # [5pts]\n N,D = X.shape[0],X.shape[1]\n if X.ndim == 3:\n U = np.zeros((N,N,3))\n S = np.zeros((min(N,D),3))\n V = np.zeros((D,D,3))\n for i in range(3):\n U_temp,S_temp,V_temp = np.linalg.svd(X[:,:,i],compute_uv=True, full_matrices=True,hermitian=False)\n U[:,:,i] = U_temp\n S[:,i] = S_temp\n V[:,:,i] = V_temp\n else:\n U,S,V = np.linalg.svd(X,compute_uv=True,full_matrices=True, hermitian=False)\n return U,S,V", "def svd(matrix, approach):\n\n # Getting the eigenvalues and vectors of transpose(A) * A for V and Sigma\n a = mat_multiply(transpose(matrix), matrix)\n if approach == \"qr\":\n V, sigma, iterations = qr_eig(a)\n else:\n V, sigma, iterations = eig(a)\n\n # Sorting singular values and the colums of V accordingly\n V = transpose(V)\n\n singular_values = list()\n sorted_V = list()\n\n r = 0\n for i in range(rows(sigma)):\n singular_values.append([(sigma[i][i]), i])\n if sigma[i][i] > math.exp(-8):\n r += 1\n\n singular_values.sort(key=first_item, reverse=True)\n\n sigma_r = eye(r)\n sigma_r_inv = eye(r)\n\n # Constructing the sorted U and sigma matrices\n i, j = 0, 0\n for value in singular_values:\n if value[0] > math.exp(-8):\n sorted_V.append(V[value[1]])\n sigma_r[j][j] = value[0] ** (1 / 2)\n sigma_r_inv[j][j] = 1 / (value[0] ** (1 / 2))\n j += 1\n i += 1\n\n # Constructing U by multiplying V and sigma inverse\n sorted_U = mat_multiply(mat_multiply(matrix, transpose(sorted_V)), sigma_r_inv)\n\n return (sorted_U, sigma_r, sorted_V, r, iterations)", "def eight_points_algorithm(x1, x2, normalize=True):\n N = x1.shape[1]\n\n if normalize:\n # Construct transformation matrices to normalize the coordinates\n T1 = get_normalization_matrix(x1)\n T2 = get_normalization_matrix(x2)\n\n # Normalize inputs\n x1 = T1 @ x1\n x2 = T2 @ x2\n\n # Construct matrix A encoding the constraints on x1 and x2\n A = np.stack((x2[0, :] * x1[0, :],\n x2[0, :] * x1[1, :],\n x2[0, :],\n x2[1, :] * x1[0, :],\n x2[1, :] * x1[1, :],\n x2[1, :],\n x1[0, :],\n x1[1, :],\n np.ones((N,))), 1)\n\n # Solve for f using SVD\n U, S, V = np.linalg.svd(A)\n F = V.T[:, 8].reshape(3, 3)\n\n # Enforce that rank(F)=2\n U, S, V = np.linalg.svd(F)\n S[2] = 0\n F = (U[:, :len(S)] * S) @ V\n\n # Transform F back\n if normalize:\n F = T2.T @ F @ T1\n\n return F", "def singular_decomp(A):\n # Initialization\n n, m = A.shape\n U = np.zeros((n, m), dtype='float64')\n\n # Diagonalization of A^T * A\n rot, e, V = eigen.diag(np.dot(np.transpose(A), A))\n\n # Calculate U\n U = np.dot(A, V)\n for i in range(m):\n e[i] = np.sqrt(e[i])\n U[:, i] /= e[i]\n\n return U, e, V", "def invert_L2_svd():\n print('Starting SVD inversion')\n\n pix2avevel = np.nans(ts.size)\n pix2cumdef = np.nans(ts.size)\n\n for i in np.range(ts.WIDTH):\n print('column {0}'.format(i))\n pix2date = np.zeros(ts.LENGTH, ts.DATES)\n pix2model = np.zeros(ts.LENGTH, ts.DT)\n colPix = np.zeros(ts.LENGTH, ts.IGRAMS)\n\n # concatenate same column from each interferogram into an array\n for j, ig in enumerate(ts):\n column = np.fromfile(ig.NAME, dtype=float16, size=ts.LENGTH)\n colPix[:,j] = column\n\n pix2igram = np.isfinite(colPix)\n coverage = np.fromfile(coverage) #laod DQmap\n iterPixels = np.where(coverage >= ts.igthresh)\n\n #preform pixel-by-pixel inversion\n for k, pixel in enumerate(iterPixels):\n indIG = find(pix2igram[pixel,:])==1\n indDate = unique(ts.timeIndex[indIG,:])\n dtVector = np.diff(ts.Serial(indDate)) / 365.242 #convert years to days\n\n # Set up B matrix\n B = np.zeros(len(indIG), len(dtVector))\n\n print('Done')", "def incremental_svd(A, qr_flg=False):\n\n m = 256\n n = 7291\n\n n0 = 256\n\n if A.shape[0] != m or A.shape[1] != n: raise ValueError('Error: incorrect matrix size')\n\n start = time.clock()\n\n A0 = A[:, :n0]\n U, s, V = ln.svd(A0, full_matrices=False)\n\n # NOTE: s is a vector; np.diag(s) will produce a diagonal matrix\n for i in range(n0, n):\n\n # new matrix is just a single vector (i-th column of A)\n A1 = np.matrix(A[:, i]).T\n\n if qr_flg:\n J, K = ln.qr(A1 - np.dot(np.dot(U, U.T), A1))\n U_, s_, V_ = ln.svd(\n np.vstack((\n np.hstack((np.diag(s), np.dot(U.T, A1))),\n np.hstack((np.zeros((K.shape[0], s.shape[0])), K))\n )),\n full_matrices=False)\n\n # update the result of SVD\n U = np.dot(np.hstack((U, J)), U_)\n\n else:\n U_, s_, V_ = ln.svd(np.hstack((np.diag(s), np.dot(U.T, A1))), full_matrices=False)\n U = np.dot(U, U_)\n\n s = s_\n\n # NOTE: V from svd on NumPy is already transposed\n V = np.dot(V_,\n np.vstack((\n np.hstack((V, np.zeros((V.shape[0], i+1-V.shape[1])))),\n np.hstack((np.zeros((V_.shape[1]-V.shape[0], V.shape[1])), np.eye(V_.shape[1]-V.shape[0], i+1-V.shape[1])))\n ))\n )\n\n # for next computation, update A0\n A0 = np.hstack((A0, A1))\n\n elapsed_time = time.clock() - start\n print 'time:', elapsed_time\n\n return U, s, V", "def spd_pinv(a, rcond=1e-10, square_root=False, check_stability=True):\n N, _N = a.shape\n assert N == _N, \"Matrix is not square!\"\n # get the eigen-decomposition\n # w, v = np.linalg.eigh(a)\n v, w, u = np.linalg.svd(a)\n sort_index = np.argsort(w)\n w = w[sort_index]\n v = v[:,sort_index]\n # check positive-definiteness\n ev_min = w.min()\n if ev_min <= 0:\n msg = \"Matrix is not positive-definite: min ev = {0}\"\n raise IndefiniteError(msg.format(ev_min))\n # check stability of eigen-decomposition\n if check_stability:\n # XXX use a preconditioner?\n if not np.allclose(a, np.dot(v, w[:, np.newaxis] * v.T)):\n raise NumericalError(\n \"Instability in eigh (condition number={:g})\".format(\n (w.max() / w.min())))\n\n # invert the \"large enough\" part of s\n cutoff = rcond * w.max()\n for i in range(N):\n if w[i] > cutoff:\n if square_root:\n # square root of the pseudo-inverse\n w[i] = np.sqrt(1. / w[i])\n else:\n w[i] = 1. / w[i]\n else:\n w[i] = 0.\n # compute the pseudo-inverse (using broadcasting)\n res = np.real(np.dot(v, w[:, np.newaxis] * v.T))\n # check stability of pseudo-inverse\n if check_stability:\n if square_root:\n pa = np.dot(res, res)\n approx_a = np.dot(a, np.dot(pa, a))\n msg = \"Instability in square-root of pseudo-inverse\"\n else:\n approx_a = np.dot(a, np.dot(res, a))\n msg = \"Instability in pseudo-inverse\"\n if not np.allclose(a, approx_a):\n # be a bit laxist by looking at the Mean Squared Error\n mse = np.mean((a - approx_a) ** 2)\n if mse > 1e-16:\n raise NumericalError(\"{} (MSE={:g})\".format(msg, mse))\n return res", "def truncated_svd(A,k=None):", "def smith_nf(matrix):\n\n A=np.copy(matrix)\n if (np.around(A) != A).any():\n raise Exception('This function requires integer input.')\n\n # This looks much like an SVD algorithm that first bidiagonalizes\n # A by Givens rotations and then chases zeros, except for\n # the construction of the 2 by 2 elementary transformation.\n\n m, n = A.shape\n\n S = A\n U = np.eye(m)\n V = np.eye(n)\n\n # Bidiagonalize S with elementary Hermite transforms.\n for j in range(min(m, n)):\n # Zero column j below the diagonal.\n for i in range(j+1, m):\n if S[i, j]:\n # Construct an elementary Hermite transformation E\n # to zero S(i,j) by combining rows i and j.\n E = ehermite(S[j, j], S[i, j])\n # Apply the transform to S and U.\n S[[j, i], :] = np.dot(E, S[[j, i], :])\n # U[:, [j, i]] = U[:, [j, i]] / E\n U[:, [j, i]] = left_matrix_division(U[:, [j, i]], E) # solving the left matrix division\n\n # % Zero row j after the superdiagonal.\n for i in range(j+2, n):\n if S[j, i]:\n # Construct an elementary Hermite transformation E\n # to zero S(j,i) by combining columns j+1 and i.\n E = ehermite(S[j, j+1], S[j, i])\n # Apply the transform to S and V.\n S[:, [j+1, i]] = np.dot(S[:, [j+1, i]], E.T)\n # V[:, [j+1, i]] = V[:, [j+1, i]] / E\n V[:, [j+1, i]] = left_matrix_division(V[:, [j+1, i]], E) # solving the left matrix division\n\n # Now S is upper bidiagonal.\n # Chase the superdiagonal nonzeros away.\n\n D = np.diag(S, 1)\n while any(D):\n b = min(np.where(D))[0]\n # Start chasing bulge at first nonzero superdiagonal element.\n # To guarantee reduction in S(b,b), first make S(b,b) positive\n # and make S(b,b+1) nonnegative and less than S(b,b).\n if S[b, b] < 0:\n S[b, :] = -S[b, :]\n U[:, b] = -U[:, b]\n\n q = np.floor(S[b, b+1] / S[b, b])\n E = np.array([[1, 0], [-q, 1]])\n S[:, [b, b+1]] = np.dot(S[:, [b, b+1]], E.T)\n # V[:, [b, b+1]] = V[:, [b, b+1]] / E\n V[:, [b, b+1]] = left_matrix_division(V[:, [b, b+1]], E) # solving the left matrix division\n\n if S[b, b+1]:\n # Zero the first nonzero superdiagonal element\n # using columns b and b+1, to start the bulge at S(b+1,b).\n E = ehermite(S[b, b], S[b, b+1])\n S[:, [b, b+1]] = np.dot(S[:, [b, b+1]], E.T)\n # V[:, [b, b+1]] = V[:, [b, b+1]] / E\n V[:, [b, b+1]] = left_matrix_division(V[:, [b, b+1]], E)\n\n for j in range(min(m, n)):\n if j+1 < m:\n # Zero S(j+1,j) using rows j and j+1.\n E = ehermite(S[j, j], S[j+1, j])\n S[[j, j+1], :] = np.dot(E, S[[j, j+1], :])\n # U[:, [j, j+1]] = U[:, [j, j+1]] / E\n U[:, [j, j+1]] = left_matrix_division(U[:, [j, j+1]], E)\n if j+2 < n:\n # Zero S(j,j+2) using columns j+1 and j+2.\n E = ehermite(S[j, j+1], S[j, j+2])\n S[:, [j+1, j+2]] = np.dot(S[:, [j+1, j+2]], E.T)\n # V[:, [j+1, j+2]] = V[:, [j+1, j+2]] / E\n V[:, [j+1, j+2]] = left_matrix_division(V[:, [j+1, j+2]], E)\n D = np.diag(S, 1)\n\n # Now S is diagonal. Make it nonnegative.\n\n for j in range(min(m, n)):\n if S[j, j] < 0:\n S[j, :] = -S[j, :]\n U[:, j] = -U[:, j]\n\n # Squeeze factors to lower right to enforce divisibility condition.\n\n for i in range(min(m, n)):\n for j in range(i+1, min(m, n)):\n # Replace S(i,i), S(j,j) by their gcd and lcm respectively.\n a = S[i, i]\n b = S[j, j]\n [c, d, g] = extgcd(a, b)\n E = np.array([[1, d], [-b/g, a*c/g]])\n F = np.array([[c, 1], [-b*d/g, a/g]])\n S[np.ix_([i, j], [i, j])] = np.dot(np.dot(E, S[:, [i, j]][[i, j], :]), F.T)\n # S[i, i] = tmp_arr[0, 0]\n # S[i, j] = tmp_arr[0, 1]\n # S[j, i] = tmp_arr[1, 0]\n # S[j, j] = tmp_arr[1, 1]\n U[:, [i, j]] = left_matrix_division(U[:, [i, j]], E)\n V[:, [i, j]] = left_matrix_division(V[:, [i, j]], F)\n\n U = np.around(U)\n V = np.around(V)\n return U, S, V", "def compact_svd(A, tol=1e-6):\n #Compute eigenvalues/vectors\n lam, V = la.eig((A.conj().T @ A))\n sig = np.sqrt(lam)\n \n #Sort results\n argB = np.argsort(sig)\n arg = []\n for i in range(0, len(argB)):\n arg.append(argB[len(argB)-1-i])\n sig = sig[arg]\n V = V[:,arg]\n #How many non-zero positive\n r = 0\n for j in range(0, len(sig)):\n if abs(sig[j]) >= tol:\n r += 1\n \n sig1 = sig[:r]\n V1 = np.array(V[:,:r])\n \n# print(np.shape(A))\n# print(np.shape(V1))\n U1 = A@V1\n U1 = U1/sig1\n \n #Return answers\n return U1, sig1, V1.conj().T\n\n raise NotImplementedError(\"Problem 1 Incomplete\")", "def nullOld(A, eps=1e-14):\n\t# Taken with gratitude from http://stackoverflow.com/questions/5889142/python-numpy-scipy-finding-the-null-space-of-a-matrix\n\tu, s, vh = la.svd(A)\n\tnull_mask = (s <= eps)\n\tnull_space = scipy.compress(null_mask, vh, axis=0)\n\treturn scipy.transpose(null_space)", "def get_stain_matrix(I):", "def visualize_svd():\n A=np.array([[3,1],[1,3]])\n U,s,Vh=truncated_svd(A)\n \n twopi=np.linspace(0,2.*np.pi,360)\n one=np.reshape(np.linspace(0,1,100),(1,100))\n zeros=np.zeros((1,100))\n S=np.vstack((np.reshape(np.cos(twopi),(1,360)),np.reshape(np.sin(twopi),(1,360))))\n e1=np.vstack((zeros,one))\n e2=e1[::-1] \t\n \n s1S=Vh.dot(S)\n s1e1=Vh.dot(e1)\n s1e2=Vh.dot(e2)\n\n s2S=np.diag(s).dot(s1S)\n s2e1=np.diag(s).dot(s1e1)\n s2e2=np.diag(s).dot(s1e2)\n \n s3S=U.dot(s2S)\n s3e1=U.dot(s2e1)\n s3e2=U.dot(s2e2)\n \n \n \n \n\n \n \n plt.subplot(221)\n plt.plot(S[0],s3S[1],\"b-.\",lw=2)\n plt.plot(e1[0],s3e1[1],\"g-.\",lw=2)\n plt.plot(e2[0],s3e2[1],\"r-.\",lw=2)\n \n \n \n plt.subplot(222)\n plt.plot(s1S[0],s3S[1],\"b-.\",lw=2)\n plt.plot(s1e1[0],s3e1[1],\"g-.\",lw=2)\n plt.plot(s1e2[0],s3e2[1],\"r-.\",lw=2)\n \n \n plt.subplot(223)\n plt.plot(s2S[0],s3S[1],\"b-.\",lw=2)\n plt.plot(s2e1[0],s3e1[1],\"g-.\",lw=2)\n plt.plot(s2e2[0],s3e2[1],\"r-.\",lw=2)\n \n plt.subplot(224) \n \n plt.plot(s3S[0],s3S[1],\"b-.\",lw=2)\n plt.plot(s3e1[0],s3e1[1],\"g-.\",lw=2)\n plt.plot(s3e2[0],s3e2[1],\"r-.\",lw=2)\n \n plt.show()", "def test_svd(self):\n eigenvectors, eigenvalues = self.svd.run(self.test_matrix)\n\n self.assertEqual(eigenvectors.shape, (100, 100))\n self.assertEqual(eigenvalues.shape, (100,))", "def matrix_svd(\n self,\n chis=None,\n eps=0,\n print_errors=\"deprecated\",\n break_degenerate=False,\n degeneracy_eps=1e-6,\n sparse=False,\n trunc_err_func=None,\n ):\n if print_errors != \"deprecated\":\n msg = (\n \"The `print_errors` keyword argument has been deprecated, \"\n \"and has no effect. Rely instead on getting the error as a \"\n \"return value, and print it yourself.\"\n )\n warnings.warn(msg)\n chis = self._matrix_decomp_format_chis(chis, eps)\n maxchi = max(chis)\n assert self.defval == 0\n assert self.invar\n\n # SVD each sector at a time.\n # While doing so, also keep track of a list of all singular values, as\n # well as a heap that gives the negative of the largest singular value\n # in each sector. These will be needed later when deciding how to\n # truncate the decomposition.\n svds = {}\n dims = {}\n minus_next_sings = []\n all_sings = []\n for k, v in self.sects.items():\n if 0 in v.shape:\n shp = v.shape\n m = min(shp)\n u = np.empty((shp[0], m), dtype=self.dtype)\n s = np.empty((m,), dtype=np.float_)\n v = np.empty((m, shp[1]), dtype=self.dtype)\n else:\n if sparse and maxchi < min(v.shape) - 1:\n u, s, v = spsla.svds(\n v, k=maxchi, return_singular_vectors=True\n )\n order = np.argsort(-s)\n u = u[:, order]\n s = s[order]\n v = v[order, :]\n else:\n u, s, v = np.linalg.svd(v, full_matrices=False)\n svd = (s, u, v)\n svds[k] = svd\n dims[k] = 0\n sings = svd[0]\n all_sings.append(sings)\n if 0 not in sings.shape:\n heapq.heappush(minus_next_sings, (-sings[0], k))\n try:\n all_sings = np.concatenate(all_sings)\n except ValueError:\n all_sings = np.array((0,))\n\n if sparse:\n norm_sq = self.norm_sq()\n else:\n norm_sq = None\n\n # Figure out what bond dimension to truncate to, how this bond\n # dimension is distributed over the different sectors, and what the\n # truncation error is.\n chi, dims, rel_err = type(self)._find_trunc_dim(\n all_sings,\n svds,\n minus_next_sings,\n dims,\n chis=chis,\n eps=eps,\n break_degenerate=break_degenerate,\n degeneracy_eps=degeneracy_eps,\n trunc_err_func=trunc_err_func,\n norm_sq=norm_sq,\n )\n\n # Truncate each block and create the dim for the new index.\n new_dim = []\n new_qim = []\n svds = {k: v for k, v in svds.items() if dims[k] > 0}\n for k, v in svds.items():\n d = dims[k]\n if d > 0:\n new_dim.append(d)\n new_qim.append(k[0])\n svds[k] = (v[0][:d], v[1][:, :d], v[2][:d, :])\n else:\n del svds[k]\n\n # Initialize U, S, V.\n d = self.dirs[0]\n U = type(self)(\n [self.shape[0], new_dim],\n qhape=[self.qhape[0], new_qim],\n dirs=[d, -d],\n qodulus=self.qodulus,\n dtype=self.dtype,\n charge=0,\n )\n S = type(self)(\n [new_dim],\n qhape=[new_qim],\n dirs=[d],\n qodulus=self.qodulus,\n dtype=np.float_,\n invar=False,\n charge=0,\n )\n V = type(self)(\n [new_dim, self.shape[1]],\n qhape=[new_qim, self.qhape[1]],\n dirs=[d, self.dirs[1]],\n qodulus=self.qodulus,\n dtype=self.dtype,\n charge=self.charge,\n )\n\n # Set the blocks of U, S and V.\n for k, v in svds.items():\n k_U = (k[0], k[0])\n S[(k[0],)] = v[0]\n U[k_U] = v[1]\n V[k] = v[2]\n\n return U, S, V, rel_err", "def compact_svd(A, tol=1e-6):\r\n eigs, vecs = la.eig(A.conj().T@A)\r\n svs = np.sqrt(eigs)\r\n #sort eigenvalues and eigenvectors accordingly\r\n sorter = list(zip(svs,vecs.T))\r\n sorter.sort(reverse=True, key=lambda tup: tup[0])\r\n svs = [x[0] for x in sorter]\r\n vecs = [x[1] for x in sorter]\r\n #find number of nonzero eigenvalues\r\n r_not = svs.count(0)\r\n r = len(svs) - r_not\r\n svs_1 = np.array(svs[:r])\r\n vecs_1 = np.array(vecs[:r])\r\n u_1 = (A@vecs_1)/svs_1\r\n\r\n return u_1, svs_1, vecs_1.conj().T", "def get_singular_values(matrix, n):\n singular_values = None\n u, s, v = svd(matrix)\n ### YOUR CODE HERE\n pass\n ### END YOUR CODE\n return singular_values", "def fit_svd(self):\n\n # U has the eigenvectors of G.Gt as columns ()\n # S has square roots of the eigenvalues of G.Gt and Gt.G in its diagonal\n # The square roos of the eigenvalues are called singular values\n # V has the eigenvectors of Gt.G as columns ()\n # full_matrices set to false will set the Vt matrix to a shape m x n\n\n U, S, Vt = linalg.svd(self.norm_matrix, full_matrices=False)\n\n # Compute the eigenvalues\n eig_val = (S ** 2)\n\n # Explained_variance tell us how much of the variance in the data each eigen value explains\n explained_variance = eig_val / (self.n_samples - 1)\n # total_var is the total variance in the data\n total_var = explained_variance.sum()\n explained_variance_ratio = explained_variance / total_var\n # The cumulative sum of all ratios\n ratio_cumsum = np.cumsum(explained_variance_ratio)\n\n # We search in the cumsum for the index of the value which, when added, corresponds to the quality_percent\n # The index of the cumsum gives us the components we need to add to explain X quality percent of our data\n n_components = np.searchsorted(ratio_cumsum, self.quality_percent, side='right') + 1\n\n self.components = Vt[:n_components]\n print(\"The principal components have been calculated using svd\", self.components.shape)\n\n return self.components", "def cov_matrix(gx, gy, winsize, alpha):\n\n gx = edge_mirror(gx, winsize)\n gy = edge_mirror(gy, winsize)\n radius_filter = gen_gaussian(winsize)\n radius_filter = numpy.rot90(radius_filter, 2)\n\n lenth = sum(sum(radius_filter))\n\n gx = signal.convolve2d(gx, radius_filter, mode='valid')\n gy = signal.convolve2d(gy, radius_filter, mode='valid')\n\n c11 = numpy.multiply(gx, gx)\n c22 = numpy.multiply(gy, gy)\n c12 = numpy.multiply(gx, gy)\n\n\n # SVD closed form\n lambda1 = (c11 + c22 + numpy.sqrt((c11 - c22)**2 + 4*c12**2)) / 2\n lambda2 = (c11 + c22 - numpy.sqrt((c11 - c22)**2 + 4*c12**2)) / 2\n numer = c11 + c12 - lambda1\n denom = c22 + c12 - lambda2\n\n ev1 = numpy.zeros_like(numer)\n ev2 = numpy.zeros_like(ev1)\n\n rows, cols = numer.shape\n for r in range(rows):\n for c in range(cols):\n if abs(denom[r, c]) < _opzero:\n if abs(numer[r, c]) < _opzero:\n if abs(denom[r, c]) > abs(numer[r, c]):\n ev1[r, c] = 0\n ev2[r, c] = 1\n else:\n ev1[r, c] = 1\n ev2[r, c] = 0\n else:\n ev1[r, c] = 1\n ev2[r, c] = 0\n else:\n theta = math.atan(-numer[r, c]/denom[r, c])\n ev1 = math.sin(theta)\n ev2 = math.cos(theta)\n\n sv1 = math.sqrt(abs(lambda1[r, c]))\n sv2 = math.sqrt(abs(lambda2[r, c]))\n p = ((sv1 * sv2 + _epsa) / lenth)**alpha\n s1 = (sv1 + 1) / (sv2 + 1)\n s2 = 1. / s1\n c11[r, c] = p * (s1 * ev2 ** 2 + s2 * ev1 ** 2)\n c22[r, c] = p * (s1 * ev1 ** 2 + s2 * ev2 ** 2)\n c12[r, c] = p * (s1 - s2) * ev1 * ev2\n\n c11 = edge_mirror(c11, winsize)\n c12 = edge_mirror(c12, winsize)\n c22 = edge_mirror(c22, winsize)\n\n return c11, c12, c22", "def normalize(self, matrix):\n eigvals, eigvecs = np.linalg.eig(matrix)\n Sdiag = np.diagonal(np.linalg.inv(eigvecs)@matrix@eigvecs)\n S12diag = Sdiag**-.5\n S12 = np.zeros((len(S12diag), len(S12diag)))\n np.fill_diagonal(S12, S12diag)\n return S12", "def Sa(self, x_surface, geom):\n\n return np.zeros((0, 0), dtype=float)", "def calculate_k_SVD(smooth_spreadsheet_matrix, k):\n U_unitary_matrix, singular_value, V_unitary_matrix = linalg.svd(smooth_spreadsheet_matrix)\n S_full_squared_matrix = np.zeros((k, k))\n np.fill_diagonal(S_full_squared_matrix, np.sqrt(singular_value[:k]))\n U_unitary_matrix = U_unitary_matrix[:, :k]\n return U_unitary_matrix, S_full_squared_matrix", "def posdef_eig_svd(mat):\n evals, evecs, _ = tf.svd(mat)\n\n return evals, evecs", "def svd(a, full_matrices=1, compute_uv=1):\n return SVD(full_matrices, compute_uv)(a)", "def _set_u_matirx(self):\n c_matrix = self.get_c_matrix()\n u_matrix, d_matrix, _ = np.linalg.svd(c_matrix)\n self.u_matrix = np.matrix(u_matrix)", "def svd_approx(A, k):\n U,s,Vh=la.svd(A,full_matrices=False)\n return U[:,:k].dot(np.diag(s[:k])).dot(Vh[:k,:])", "def implement_svd(data):\n u, s, v = torch.svd(data) # implement svd\n # note: the u returned by this function only includes the top values.\n # u * s will be equivalent due to the zero terms, but will run more efficiently with this implementation.\n s = torch.diag(s) # turn s into a diagonal matrix\n transformed_matrix = torch.mm(u, s) # u * s\n return l21_reg(s), transformed_matrix # return the L2,1 regularization term and matrix", "def calc_image_svd(img:list):\r\n img_r = img.swapaxes(1, 2).swapaxes(0, 1)\r\n svd_u, svd_s, svd_vh = np.linalg.svd(img_r, full_matrices=True)\r\n return [svd_u, svd_s, svd_vh]", "def svt(X, tau):\n U, S, Vt = la.svd(X,full_matrices=False)\n Xs = np.dot(U * st(S,tau), Vt)\n return Xs", "def pesudoInverse(matrix):\n\n # Calculate the SVD matrices\n U, S, Vt = svd(matrix)\n\n # A+ = V * S+ * U.T => The sigma (S) matrix shape needs to be inverted.\n pseudoSigma = S.T\n sigmaShape = np.shape(pseudoSigma)\n\n # Recalculate Sigma as Sigma+ (each value != 0 is now 1/value)\n for row in range(0, sigmaShape[0]):\n for col in range(0, sigmaShape[1]):\n # pylint: disable=E1136 # pylint/issues/3139\n if pseudoSigma[row][col] != 0:\n pseudoSigma[row][col] = 1 / pseudoSigma[row][col]\n\n # Return A+, being A+ = V * S+ * U.T\n return np.matmul(np.matmul(Vt.T, pseudoSigma), U.T)", "def estimate_fundamental_matrix(points_a, points_b):\n mean_a = np.mean(points_a, axis=0)\n mean_b = np.mean(points_b, axis=0)\n std_a = np.std(points_a, axis=0)\n std_b = np.std(points_b, axis=0)\n T_a = np.asarray([[1.0/std_a[0], 0, -mean_a[0]/std_a[0]],\n [0, 1.0/std_a[1], -mean_a[1]/std_a[1]],\n [0, 0, 1]])\n T_b = np.asarray([[1.0/std_b[0], 0, -mean_b[0]/std_b[0]],\n [0, 1.0/std_b[1], -mean_b[1]/std_b[1]],\n [0, 0, 1]])\n points_a = np.hstack((points_a, np.ones((len(points_a), 1)))).T\n points_b = np.hstack((points_b, np.ones((len(points_b), 1)))).T\n points_a = np.dot(T_a, points_a)[:2].T\n points_b = np.dot(T_b, points_b)[:2].T\n\n A = []\n for pa, pb in zip(points_a, points_b):\n ua, va = pa\n ub, vb = pb\n A.append([ua*ub, va*ub, ub, ua*vb, va*vb, vb, ua, va, 1])\n A = np.vstack(A)\n _, _, Vt = np.linalg.svd(A)\n F = Vt[-1, :].reshape((3, 3))\n\n # enforce the singularity constraint\n U, D, Vt = np.linalg.svd(F)\n D[-1] = 0\n F = np.dot(np.dot(U, np.diag(D)), Vt)\n\n F = np.dot(np.dot(T_b.T, F), T_a)\n\n return F", "def InverseMatrix(matrix,vector):\r\n # Unveri reversible matrix\r\n if Determinant(matrix, 1) == 0:\r\n print(\"Error,Singular Matrix\\n\")\r\n return\r\n # result matrix initialized as singularity matrix\r\n result = MakeIMatrix(len(matrix), len(matrix))\r\n # loop for each row\r\n for i in range(len(matrix[0])):\r\n # turn the pivot into 1 (make elementary matrix and multiply with the result matrix )\r\n # pivoting process\r\n matrix, vector = RowXchange(matrix, vector)\r\n elementary = MakeIMatrix(len(matrix[0]), len(matrix))\r\n elementary[i][i] = 1/matrix[i][i]\r\n result = MultiplyMatrix(elementary, result)\r\n matrix = MultiplyMatrix(elementary, matrix)\r\n # make elementary loop to iterate for each row and subtracrt the number below (specific) pivot to zero (make\r\n # elementary matrix and multiply with the result matrix )\r\n for j in range(i+1, len(matrix)):\r\n elementary = MakeIMatrix(len(matrix[0]), len(matrix))\r\n elementary[j][i] = -(matrix[j][i])\r\n matrix = MultiplyMatrix(elementary, matrix)\r\n result = MultiplyMatrix(elementary, result)\r\n\r\n\r\n # after finishing with the lower part of the matrix subtract the numbers above the pivot with elementary for loop\r\n # (make elementary matrix and multiply with the result matrix )\r\n for i in range(len(matrix[0])-1, 0, -1):\r\n for j in range(i-1, -1, -1):\r\n elementary = MakeIMatrix(len(matrix[0]), len(matrix))\r\n elementary[j][i] = -(matrix[j][i])\r\n matrix = MultiplyMatrix(elementary, matrix)\r\n result = MultiplyMatrix(elementary, result)\r\n\r\n return result", "def svd_approx(A, s):\n \n U, S, V = la.svd(A)\n V = V.conj().T\n if s > len(S):\n raise ValueError( str(len(S)) + \" = Rank(A) > s\" )\n \n U2 = U[:,:s]\n S2 = S[:s]\n V2 = V[:,:s]\n V2 = V2.conj().T\n \n S2 = np.diag(S2)\n \n Ag = U2@S2@V2\n ent = U2.size + len(S2) + V2.size\n return Ag, ent\n \n raise NotImplementedError(\"Problem 3 Incomplete\")", "def initiateVMatrixes():\n global v, vNew, vExact\n # Initialize the grid to 0\n v = np.zeros((n+1, n+1)) # matrix of v, index are i: row, j:column\n # Set the boundary conditions\n for i in range(1,n):\n v[0,i] = 10\n v[n,i] = 10\n v[i,0] = 10\n v[i,n] = 10\n # Exact solution\n vExact = np.copy(v)\n for i in range(1,n):\n for j in range(1,n):\n vExact[i,j] = 10\n # Initial guess\n for i in range(1,n):\n for j in range(1,n):\n v[i,j] = 0.9*vExact[i,j]\n vNew = np.copy(v)", "def eight_point(points_lst):\r\n\r\n # get H for normalization and produce normalized points\r\n points_lst = np.array(points_lst)\r\n h_l = get_h(points_lst[:, 0])\r\n h_r = get_h(points_lst[:, 1])\r\n p_l_norm = [h_l @ np.array([p[0], p[1], 1]) for p in points_lst[:, 0]]\r\n p_r_norm = [h_r @ np.array([p[0], p[1], 1]) for p in points_lst[:, 1]]\r\n\r\n # create A using normalized points\r\n a = []\r\n for p_l, p_r in zip(p_l_norm, p_r_norm):\r\n x_l, y_l = p_l[0], p_l[1]\r\n x_r, y_r = p_r[0], p_r[1]\r\n a.append([x_r * x_l, x_r * y_l, x_r, y_r * x_l, y_r * y_l, y_r, x_l, y_l, 1])\r\n a = np.array(a)\r\n\r\n u, s, vh = np.linalg.svd(a)\r\n f_mat = np.reshape(vh[-1, :], (3, 3))\r\n\r\n # enforce singularity constraint\r\n u, s, vh = np.linalg.svd(f_mat)\r\n s[-1] = 0\r\n f_unscaled = (u * s) @ vh\r\n\r\n # rescale F\r\n return np.linalg.inv(h_r) @ f_unscaled @ np.linalg.inv(h_l)", "def get_singular_values(matrix, n):\n singular_values = None\n u, s, v = svd(matrix)\n ### YOUR CODE HERE\n singular_values=s[0:n]\n ### END YOUR CODE\n return singular_values", "def svm():", "def optimalSVHT(matrix):\n \n m, n = matrix.shape\n beta = 1.0 * m / n\n \n w = (8.0 * beta) / (beta + 1 + np.sqrt(beta**2 + 14 * beta +1))\n lambdaStar = np.sqrt(2.0 * (beta + 1) + w)\n \n omega = 0.56*beta**3 - 0.95*beta**2 + 1.82*beta + 1.43 \n uSVD, wSVD, vSVD = np.linalg.svd(matrix)\n medianSV = np.median(wSVD)\n \n thrKnownNoise = lambdaStar * np.sqrt(n)\n thrUnknownNoise = omega * medianSV\n \n muSqrt = lambdaStar / omega\n noiseEstimation = medianSV / (np.sqrt(n) * muSqrt) \n \n return thrKnownNoise, thrUnknownNoise, noiseEstimation, wSVD", "def _sigmainf(N, h, m, dW, Km0, Pm0):\n M = m*(m-1)/2\n Im = broadcast_to(np.eye(m), (N, m, m))\n IM = broadcast_to(np.eye(M), (N, M, M))\n Ims0 = np.eye(m**2)\n factor1 = broadcast_to((2.0/h)*np.dot(Km0, Ims0 - Pm0), (N, M, m**2))\n factor2 = _kp2(Im, _dot(dW, _t(dW)))\n factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))\n return 2*IM + _dot(_dot(factor1, factor2), factor3)", "def update_model(X, U, S, k, n, mu,\n svdmethod='full',\n missingmethod='zero'):\n\n if len(X) == 0:\n printt(\"Error: No data in X.\")\n return None, None, None, -1, None\n #print('%d items in X' % X.shape[1])\n #print('init U:', U)\n\n # If there is no previous U, and we just got a single item in X,\n # set U to all 0's (degenerate SVD),\n # and return it with mu.\n # (PR #22 sets first value to 1; see decals implementation)\n if len(U) == 0 and X.shape[1] == 1:\n mu = X\n # Do this no matter what. Let mu get NaNs in it as needed.\n U = np.zeros_like(mu)\n U[0] = 1\n S = np.array([0])\n n = 1\n pcts = [1.0]\n return U, S, mu, n, pcts\n\n ###########################################################################\n # Do full SVD of X if this is requested, regardless of what is in U \n # Also, if n = 0 or U is empty, start from scratch\n output_k = False\n if svdmethod == 'full' or len(U) == 0 or n == 0:\n if n == 0:\n if len(U) == 0:\n printt(\"----- initial SVD -----\")\n output_k = True\n else:\n # Reshape so we don't have an empty dimension (yay python)\n U = U.reshape(-1, 1)\n elif len(U) == 0:\n printt(\"WARNING: N (number of items modeled by U) is %d, not zero, but U is empty!\" % n)\n\n # Bootstrap\n if missingmethod == 'ignore':\n printt(\"ERROR: ignore with full is not possible under ordinary circumstances.\")\n printt(\"Use --increm-brand to impute for NaNs.\")\n printt(\"For now, we are filling NaNs with 0.\")\n X = copy.deepcopy(X)\n z = np.where(np.isnan(X))\n X[z] = 0\n\n mu = np.mean(X, axis=1).reshape(-1,1)\n X = X - mu\n U, S, V = linalg.svd(X, full_matrices=False)\n printt('Just did full SVD on %d items.' % X.shape[1])\n #print('X:', X)\n #print('U:', U)\n # Reset U to all 0's if we only have one item in X (degenerate SVD)\n if X.shape[1] == 1:\n U = np.zeros_like(U)\n \n # Keep only the first k components\n S_full = S\n S = S[0:k]\n U = U[:,0:k]\n\n # Update n to number of new items in X\n n = X.shape[1]\n \n ###########################################################################\n # Incremental SVD from Ross\n elif svdmethod == 'increm-ross':\n # Incremental SVD from Ross et al. 2008\n # \"Incremental Learning for Robust Visual Tracking\"\n # based on Lim and Ross's sklm.m implementation in MATLAB.\n\n # This method DOES NOT handle missing values.\n if missingmethod == 'ignore':\n print('ERROR: increm-ross cannot handle missing values.')\n print('If they are present, try svdmethod=increm-brand')\n print(' or use missingmethod=zero to zero-fill.')\n print('If there are no missing values, specify missingmethod=none.')\n sys.exit(1)\n\n n_new = X.shape[1]\n \n # Compute mean\n # Weirdly, the later 'X-mu_new' is MUCH faster if you reshape as shown.\n # This is because of differences in the way numpy treats a 1d array versus a 2d column.\n mu_new = np.mean(X, axis=1).reshape(-1,1)\n\n # Subtract the mean, append it as a column vector, and update mu\n # X - mu_new will be zero if X has only 1 item\n mu_old = mu\n # New mu is a weighted sum of old and new mus\n mu = (n * mu_old + n_new * mu_new) / (n + n_new)\n B = np.hstack((X - mu,\n math.sqrt(n_new * n/float(n_new+n)) * \\\n (mu_old - mu_new)))\n printt(\"Now tracking mean for %d -> %d items; mu.min %f, mu.max %f \" % \\\n (n, n+n_new, np.nanmin(mu), np.nanmax(mu)))\n n = n + n_new\n\n if S.all() == 0:\n npcs = U.shape[1]\n diagS = np.zeros((npcs, npcs))\n else:\n diagS = np.diag(S)\n\n # I don't think this is right. At this point B is the augmented\n # matrix rather than the single observation.\n proj = np.dot(U.T, B)\n reproj_err = B - np.dot(U, proj)\n\n # to get orthogonal form of reproj_err\n # This should return q with dimensions [d(X) by n_new+1], square\n q, dummy = linalg.qr(reproj_err, mode='full')\n # print('q.shape should be 7x2: ', q.shape)\n Q = np.hstack((U, q))\n\n # From Ross and Lim, 2008\n # R = [ [ Sigma, U.T * X ] [ 0, orthog. component of reproj error ] ]\n k_now = diagS.shape[0]\n new_dim = k_now + n_new + 1\n R = np.zeros((new_dim, new_dim))\n R[0:k_now,0:k_now] = diagS\n R[0:k_now,k_now:] = proj\n orthog_reproj_err = np.dot(q.T, reproj_err)\n R[k_now:, k_now:] = orthog_reproj_err\n \n # Perform SVD of R. Then finally update U.\n U, S, V = linalg.svd(R, full_matrices=False)\n printt('Just did increm-ross SVD on %d items.' % n)\n\n U = np.dot(Q, U)\n \n # Keep only the first k components\n U = U[:,0:min([n,k])]\n S_full = S\n S = S[0:min([n,k])]\n\n ###########################################################################\n # Incremental SVD from Brand\n elif svdmethod == 'increm-brand':\n # Pulled out James's attempt to handle NaNs into\n # increm-brand-james.py. Starting over from scratch here.\n n_new = X.shape[1]\n\n if n_new != 1:\n print(\"WARNING: increm-brand will probably only work by adding one item at a time.\")\n input('\\nPress enter to continue or ^C/EOF to exit. ')\n\n if missingmethod == 'ignore':\n # 1. Update mu\n mu_old = mu\n mu_new = X\n\n # Be careful! For any pre-existing NaNs in mu,\n # let mu_new fill them in. Can't get any worse!\n naninds = np.where(np.isnan(mu_old))[0]\n if naninds.size > 0:\n mu_old[naninds,0] = mu_new[naninds,0]\n # And likewise for mu_new -- fill with good values from mu_old.\n naninds = np.where(np.isnan(mu_new))[0]\n if naninds.size > 0:\n mu_new[naninds,0] = mu_old[naninds,0]\n # At this point, the only NaNs that should appear are\n # values that were NaN for both mu and X to start with.\n # They will stay NaN and that's okay.\n \n # New mu is a weighted sum of old and new mus\n mu = (n * mu_old + n_new * mu_new) / (n + n_new)\n printt(\"Now tracking mean for %d -> %d items; mu.min %f, mu.max %f \" % \\\n (n, n+n_new, np.nanmin(mu), np.nanmax(mu)))\n n = n + n_new\n\n # 2. Subtract off the mean\n X = X - mu\n\n # 3. Compute L, the projection of X onto U\n # Note: this will only work for a single item in X\n goodinds = np.where(~np.isnan(X))[0]\n #print('X: %d of %d are good.' % (len(goodinds), X.shape[0]))\n\n diagS = np.diag(S)\n # This is Brand's method, which involves S:\n L = np.dot(diagS,\n np.dot(np.linalg.pinv(np.dot(U[goodinds,:],\n diagS)),\n X[goodinds,:]))\n # Simplified version that does not use S (but is probably wrong):\n #L = np.dot(U[goodinds,:].T,\n # X[goodinds,:])\n # Top row of the Q matrix (eqn 12, Brand 2002)\n Q1 = np.hstack([diagS, L])\n\n # 4. Compute J, the orthogonal basis of H, which is\n # the component of X orthog to U (i.e., unrepresentable direction)\n # 5. Compute K, the projection of X onto J (i.e., unrep. content)\n K = linalg.norm(X[goodinds,:] - np.dot(U[goodinds,:],\n np.dot(U[goodinds,:].T,\n X[goodinds,:])))\n # H = X - UL\n J = np.zeros((U.shape[0], 1))\n J[goodinds] = np.dot(K,\n np.linalg.pinv(X[goodinds,:] -\n np.dot(U[goodinds,:],\n L))).T\n \n # Bottom row of Q matrix (eqn 12, Brand 2002)\n Q2 = np.hstack([np.zeros([1, len(S)]), np.array(K).reshape(1,1)])\n Q = np.vstack([Q1, Q2])\n\n # 6. Take the SVD of Q\n Uq, Sq, Vq = linalg.svd(Q, full_matrices=False)\n\n # 7. Update U and S (eqn 4, Brand 2002)\n # Note: Since J is zero-filled for badinds, now U is too.\n # Alternatively, we give J NaNs and let them get into U as well.\n # I think that is a worse idea though.\n U = np.dot(np.hstack([U, J]), Uq)\n S = Sq\n # Updating V requires knowing old V,\n # but we don't need the new one either so it's okay to skip.\n\n printt('Just did increm-brand SVD on %d items.' % n)\n \n ############# end ###########\n \n else: # No missing values (or not 'ignore')\n # 1. Update mu\n mu_old = mu\n mu_new = X\n # New mu is a weighted sum of old and new mus\n mu = (n * mu_old + n_new * mu_new) / (n + n_new)\n n = n + n_new\n\n # 2. Subtract off the mean\n X = X - mu\n\n # 3. Compute L, the projection of X onto U\n L = np.dot(U.T, X)\n Q1 = np.hstack([np.diag(S), L])\n\n # 4. Compute J, the orthogonal basis of H, which is\n # the component of X orthog to U (i.e., unrepresentable direction)\n # 5. Compute K, the projection of X onto J (i.e., unrep. content)\n JK = X - np.dot(U, L)\n (J, K) = linalg.qr(JK)\n\n Q2 = np.hstack([np.zeros([1, len(S)]), np.array(K).reshape(1,1)])\n Q = np.vstack([Q1, Q2])\n\n # 6. Take the SVD of Q\n Uq, Sq, Vq = linalg.svd(Q, full_matrices=False)\n\n # 7. Update U and S (eqn 4, Brand 2002)\n U = np.dot(np.hstack([U, J]), Uq)\n S = Sq\n # V requires knowing old V,\n # but we don't need the new one either so it's okay.\n \n printt('Just did regular increm SVD on %d items.' % n)\n\n # Keep only the first k components\n U = U[:,0:min([n,k])]\n S = S[0:min([n,k])]\n\n Usum = U.sum(1)\n\n\n ###########################################################################\n # We have a bad svdmethod, but somehow didn't catch it earlier.\n else:\n printt(\"504: Bad Gateway in protocol <Skynet_authentication.exe>\")\n return None, None, None, None, None\n\n indivpcts = None\n\n # This only works if a full SVD was done\n if (svdmethod == 'full' and output_k and opts['k_var'] == -773038.0):\n # Calculate percent variance captured by each \n cumsum = np.cumsum(S_full)\n #print(cumsum.shape)\n if cumsum[-1] != 0:\n indivpcts = S / cumsum[-1]\n indivpcts = indivpcts[0:k] # truncate to first k\n cumpercents = cumsum / cumsum[-1]\n else:\n indivpcts = []\n\n # Calculate percent variance captured\n if k >= cumsum.shape[0]:\n printt('Cannot estimate data variance; specified k (%d) exceeds the number of SVs (%d).' % (k, cumsum.shape[0]))\n else:\n printt(\"Selected value of k=%d captures %5.2f%% of the data variance\" % \\\n (k, cumpercents[k-1] * 100))\n if opts['pause']: input(\"Press enter to continue\\n\")\n\n #print('U:', U)\n #print('mu:', mu)\n return U, S, mu, n, indivpcts", "def tsvd(A, threshold=0.99999, avoid_pathological=True):\n M,N = A.shape\n full_matrices = False\n\n if is_int(threshold):\n # Assume specific number is requested\n r = threshold\n assert 1 <= r <= max(M,N)\n if r > min(M,N):\n full_matrices = True\n r = min(M,N)\n\n U,s,VT = sla.svd(A, full_matrices)\n\n if isinstance(threshold,float):\n # Assume proportion is requested\n r = truncate_rank(s,threshold,avoid_pathological)\n\n # Truncate\n U = U [:,:r]\n VT = VT[ :r]\n s = s [ :r]\n return U,s,VT", "def svd_approx(A, s):\r\n U, Si, Vh = la.svd(A)\r\n zeros = list(Si).count(0)\r\n #raise error if there are not enough nonzero singular values\r\n if len(Si) - zeros < s:\r\n raise ValueError(\"s > rank(A)\")\r\n #Only save first s singular values for Si\r\n Si_hat = np.diag(Si[:s])\r\n #Save first s columns of U\r\n U_hat = U[:,:s]\r\n #Save first s rows of Vh\r\n Vh_hat = Vh[:s,:]\r\n\r\n # return new A and num of entries needed\r\n return U_hat@Si_hat@Vh_hat, U_hat.size+s+Vh_hat.size", "def ES_SVD(U, sigma, V, time, f_fault, f_side, PMItreshold, estimate_xi_func=get_SVDxi, estimate_xi_func_params=None):\n\n # Get the search region\n m = sigma.size\n f_fault = np.asanyarray(f_fault)\n f_side = np.asanyarray(f_side)\n dt = time[1] - time[0]\n Fs = 1.0/dt\n PMI = [] #PMI is here the envelope score\n W = []\n for i in range(0, f_fault.size):\n PMI.append(np.zeros(m))\n W.append(np.zeros(m))\n\n # Calculate PMI for each fault type\n for i in range(0, m):\n if estimate_xi_func_params is None:\n a_i = estimate_xi_func(U, sigma, V, i)\n else:\n a_i = estimate_xi_func(U, sigma, V, i, estimate_xi_func_params)\n a_i = envelope(a_i)\n Y, df = fft(a_i, Fs)\n # Calculate PMI for each fault type\n for k in range(0, f_fault.size):\n PMI[k][i] = diagnosefft(Y, df, f_fault[k], 1.0, f_side[k])\n\n # Calculate weights\n for k in range(0, f_fault.size):\n temp = 0.0\n for i in range(0, m):\n if PMI[k][i] > PMItreshold:\n temp += PMI[k][i]\n for i in range(0, m):\n if PMI[k][i] > PMItreshold:\n W[k][i] = PMI[k][i]/temp\n\n # Return data\n return PMI, W", "def spca(a, s, k, d):\n\n p = a.shape[0]\n X = np.zeros((p, k))\n\n for l in range(k):\n # 1\n [w, V] = linalg.eigh(a)\n idx = w.argsort()\n w = w[idx]\n V = V[:, idx]\n\n # 2\n xprime, value = spannogram(V[:, -d:], w[-d:], s=s)\n X[:, l] = xprime[:, 0]\n\n # 3\n idx = np.abs(xprime).argsort(axis=0)\n for i in idx[-s:]:\n a[i, :] = 0\n a[:, i] = 0\n\n return X", "def svd(T):\n try:\n U, S, V = splinalg.svd(T, full_matrices=False)\n except splinalg.LinAlgError:\n U, S, V = splinalg.svd(T, full_matrices=False, lapack_driver='gesvd')\n maxU, minU = U.max(0), U.min(0)\n maxV, minV = V.max(1), V.min(1)\n ind = (np.abs(minU) > maxU) & (np.abs(minV) > maxV)\n U[:, ind] *= -1\n V[ind] *= -1\n return U, S, V", "def svd_inverse(matrix):\n U, S, V = np.linalg.svd(matrix)\n\n dim = S.shape[0]\n S = la.diagsvd(S, dim, dim)\n V = np.matrix(V)\n U = np.matrix(U)\n\n # Compute the inverse SVD\n V_dag_S = np.dot(V.getH(), np.linalg.inv(S))\n V_dag_S_U_dag = np.dot(V_dag_S, U.getH())\n\n return V_dag_S_U_dag", "def prepare(self):\n ls=len(self.v)\n self.S=numpy.zeros(ls)\n self.A=numpy.zeros((ls,ls))\n\n for k,v in self.e.items():\n b,e=k\n bi,ei=self.rv[b],self.rv[e]\n self.A[bi,bi]-=v\n self.A[bi,ei]+=v", "def f_v(_a, _vs, _Ps, _Ps0): # _aはスカラ, _vsはベクトル, _Ps, _Ps0は3行2列の行列\n center_pos = _Ps[0]\n center_pos_0 = _Ps0[0]\n idx_iter = Index_iterator(1, 8)\n #中心点から各点へのベクトル\n x = []\n x0 = []\n for p in (_Ps):\n x.append(p - center_pos)\n for p in _Ps(_Ps0):\n x0.append(p - center_pos_0)\n\n x01 = (_Ps[1]-center_pos) \n x02 = (_Ps[2]-center_pos) \n x03 = (_Ps[3]-center_pos) \n x04 = (_Ps[4]-center_pos) \n x05 = (_Ps[5]-center_pos) \n x06 = (_Ps[6]-center_pos) \n x07 = (_Ps[7]-center_pos) \n x08 = (_Ps[8]-center_pos)\n print('p_id', center_pos, end='\\t')\n print('x01:', x01, end=\"\\t\")\n print('x03:', x03, end=\"\\t\")\n print('x05:', x05, end=\"\\t\")\n print('x07:', x07)\n x001 = (_Ps0[1]-_Ps0[0]) \n x002 = (_Ps0[2]-_Ps0[0]) \n x003 = (_Ps0[3]-_Ps0[0]) \n x004 = (_Ps0[4]-_Ps0[0]) \n x005 = (_Ps0[5]-_Ps0[0]) \n x006 = (_Ps0[6]-_Ps0[0]) \n x007 = (_Ps0[7]-_Ps0[0]) \n x008 = (_Ps0[8]-_Ps0[0]) \n \n #中心点周りの面の面積\n def calc_area(j,k,l):\n s = LA.norm(np.cross(x[j],x[k]))/2 \\\n + LA.norm(np.cross(x[k],x[l]))/2\n return s\n\n s = []\n s0 = []\n hen = [1,3,5,7]\n for i in range(4):\n j,k,l = [n for n in idx_iter.get_indexes(start_idx=hen[i], 3)]\n s[i] = calc_area(j,k,l)\n s0[i] = calc_area(j,k,l)\n\n # s0123 = LA.norm(np.cross(x[1],x[2]))/2\\\n # +LA.norm(np.cross(x[2],x[3]))/2\n # s4367 = LA.norm(np.cross(x[3],x[4]))/2\\\n # +LA.norm(np.cross(x[4],x[5]))/2\n # s4785 = LA.norm(np.cross(x[5],x[6]))/2\\\n # +LA.norm(np.cross(x[6],x[7]))/2\n # s4521 = LA.norm(np.cross(x[7],x[8]))/2\\\n # +LA.norm(np.cross(x[8],x[1]))/2\n # s04103 = LA.norm(np.cross(x0[1],x0[2]))/2\\\n # +LA.norm(np.cross(x0[2],x0[3]))/2\n # s04367 = LA.norm(np.cross(x0[3],x0[4]))/2\\\n # +LA.norm(np.cross(x0[4],x0[7]))/2\n # s04785 = LA.norm(np.cross(x0[7],x0[8]))/2\\\n # +LA.norm(np.cross(x0[8],x0[5]))/2\n # s04521 = LA.norm(np.cross(x0[5],x0[2]))/2\\\n # +LA.norm(np.cross(x0[2],x0[1]))/2\n \n #各方向への平均面積(ここだけ反時計回り順で設定してる)\n S_iminus = (s[1] + s[2]) / 2 #43方向\n S_Jminus = (s[1] + s[4]) / 2 #41方向\n S_iplus = (s[3] + s[4]) / 2 #45方向\n S_Jplus = (s[3] + s[2]) / 2 #47方向\n S_iminus0 = (s0[1] + s0[2]) / 2 #43方向\n S_Jminus0 = (s0[1] + s0[4]) / 2 #41方向\n S_iplus0 = (s0[3] + s0[4]) / 2 #45方向\n S_Jplus0 = (s0[3] + s0[2]) / 2 #47方向\n # 各方向への厚み\n h_iminus = h_0 / ((poisson/(1-poisson) * (S_iminus - S_iminus0) / S_iminus0) + 1) #43方向\n h_Jminus = h_0 / ((poisson/(1-poisson) * (S_Jminus - S_Jminus0) / S_Jminus0) + 1) #41方向\n h_iplus = h_0 / ((poisson/(1-poisson) * (S_iplus - S_iplus0) / S_iplus0) + 1) #45方向\n h_Jplus = h_0 / ((poisson/(1-poisson) * (S_Jplus - S_Jplus0) / S_Jplus0) + 1) #47方向\n # 各断片の重心\n g = []\n kado = [2,4,6,8]\n hen = [1,3,5,7]\n for i in range(len(kado)):\n _kado = kado[i]\n _hen1, _ = [idx for idx in idx_iter.get_indexes_reverse(_kado, 2)]\n _hen2, _ = [idx for idx in idx_iter.get_indexes(_kado, 2)]\n _hen = [_hen1, _hen2]\n _g1 = (center_pos + _Ps[_kado] + _Ps[_hen1])/3\n _g2 = (center_pos + _Ps[_kado] + _Ps[_hen2])/3\n g.append([_g1, _g2])\n\n g401 = (center_pos + _Ps[0] + _Ps[1]) / 3\n g430 = (center_pos + _Ps[3] + _Ps[0]) / 3\n g436 = (center_pos + _Ps[3] + _Ps[6]) / 3\n g467 = (center_pos + _Ps[6] + _Ps[7]) / 3\n g478 = (center_pos + _Ps[7] + _Ps[8]) / 3\n g485 = (center_pos + _Ps[8] + _Ps[5]) / 3\n g452 = (center_pos + _Ps[5] + _Ps[2]) / 3\n g421 = (center_pos + _Ps[2] + _Ps[1]) / 3\n g0401 = (_Ps0[4] + _Ps0[0] + _Ps0[1]) / 3\n g0430 = (_Ps0[4] + _Ps0[3] + _Ps0[0]) / 3\n g0436 = (_Ps0[4] + _Ps0[3] + _Ps0[6]) / 3\n g0467 = (_Ps0[4] + _Ps0[6] + _Ps0[7]) / 3\n g0478 = (_Ps0[4] + _Ps0[7] + _Ps0[8]) / 3\n g0485 = (_Ps0[4] + _Ps0[8] + _Ps0[5]) / 3\n g0452 = (_Ps0[4] + _Ps0[5] + _Ps0[2]) / 3\n g0421 = (_Ps0[4] + _Ps0[2] + _Ps0[1]) / 3\n \n # 各断片面積\n triangle_area = []\n kado = [2,4,6,8]\n for i in range(len(kado)):\n j, k = [idx for idx in idx_iter.get_indexes_reverse(kado[i], 1)]\n _s1 = LA.norm(np.cross(x[j],x[k]))/2\n j, k = [idx for idx in idx_iter.get_indexes(kado[i], 1)]\n _s2 = LA.norm(np.cross(x[j],x[k]))/2\n triangle_area.append([_s1, _s2])\n\n s410 = LA.norm(np.cross(x[1],x[2]))/2\n s403 = LA.norm(np.cross(x[2],x[3]))/2\n s436 = LA.norm(np.cross(x[3],x[4]))/2\n s467 = LA.norm(np.cross(x[4],x[5]))/2\n s478 = LA.norm(np.cross(x[5],x[6]))/2\n s485 = LA.norm(np.cross(x[6],x[7]))/2\n s452 = LA.norm(np.cross(x[7],x[8]))/2\n s421 = LA.norm(np.cross(x[8],x[1]))/2\n s0410 = LA.norm(np.cross(x0[1],x0[2]))/2\n s0403 = LA.norm(np.cross(x0[2],x0[3]))/2\n s0436 = LA.norm(np.cross(x0[3],x0[4]))/2\n s0467 = LA.norm(np.cross(x0[4],x0[5]))/2\n s0478 = LA.norm(np.cross(x0[5],x0[6]))/2\n s0485 = LA.norm(np.cross(x0[6],x0[7]))/2\n s0452 = LA.norm(np.cross(x0[7],x0[8]))/2\n s0421 = LA.norm(np.cross(x0[8],x0[1]))/2\n # 四角の重心\n\n center_g_square = []\n for i in range(len(g)):\n _g = (triangle_area[i][0]*g[i][0] + triangle_area[i][1]*g[i][1])/(triangle_area[i][0] + triangle_area[i][1])\n center_g.append(_g)\n g4103 = (s410*g401 + s403*g430) / (s410 + s403)\n g4367 = (s436*g436 + s467*g467) / (s436 + s467)\n g4785 = (s478*g478 + s485*g485) / (s478 + s485)\n g4521 = (s452*g452 + s421*g421) / (s452 + s421)\n g04103 = (s0410*g0401 + s0403*g0430) / (s0410 + s0403)\n g04367 = (s0436*g0436 + s0467*g0467) / (s0436 + s0467)\n g04785 = (s0478*g0478 + s0485*g0485) / (s0478 + s0485)\n g04521 = (s0452*g0452 + s0421*g0421) / (s0452 + s0421)\n # 各重心間の距離\n Lj82 = LA.norm(g4521 - g4103)\n Lj24 = LA.norm(g4103 - g4367)\n Lj46 = LA.norm(g4367 - g4785)\n Lj68 = LA.norm(g4785 - g4521)\n \n # ひずみ\n eps_i41 = (LA.norm(x01) - LA.norm(x041)) / LA.norm(x041)\n eps_J41 = (LA.norm(g4521 - g4103) - LA.norm(g04521 - g04103)) / LA.norm(g04521 - g04103)\n eps_i43 = (LA.norm(x03) - LA.norm(x043)) / LA.norm(x043)\n eps_J43 = (LA.norm(g4103 - g4367) - LA.norm(g04103 - g04367)) / LA.norm(g04103 - g04367)\n eps_i47 = (LA.norm(x01) - LA.norm(x041)) / LA.norm(x041)\n eps_J47 = (LA.norm(g4367 - g4785) - LA.norm(g04367 - g04785)) / LA.norm(g04367 - g04785)\n eps_i45 = (LA.norm(x01) - LA.norm(x041)) / LA.norm(x041)\n eps_J45 = (LA.norm(g4785 - g4521) - LA.norm(g04785 - g04521)) / LA.norm(g04785 - g04521)\n # 張力\n F_T1 = (young_modulus * h_Jminus * Lj82 * (eps_i41 + poisson * eps_J41) / (1 - poisson**2))*x01/LA.norm(x01)\n F_T3 = (young_modulus * h_iminus * Lj24 * (eps_i43 + poisson * eps_J43) / (1 - poisson**2))*x03/LA.norm(x03)\n F_T5 = (young_modulus * h_Jplus * Lj46 * (eps_i47 + poisson * eps_J47) / (1 - poisson**2))*x05/LA.norm(x05)\n F_T7 = (young_modulus * h_iplus * Lj68 * (eps_i45 + poisson * eps_J45) / (1 - poisson**2))*x07/LA.norm(x07)\n # せん断ひずみ\n gamma513 = (math.acos((np.dot(x07,x01))/(LA.norm(x07)*LA.norm(x01))) - math.acos((np.dot(x045,x041))/(LA.norm(x045)*LA.norm(x041)))\\\n + math.acos((np.dot(x03,x01))/(LA.norm(x03)*LA.norm(x01))) - math.acos((np.dot(x043,x041))/(LA.norm(x043)*LA.norm(x041))))/2\n gamma137 = (math.acos((np.dot(x01,x03))/(LA.norm(x01)*LA.norm(x03))) - math.acos((np.dot(x041,x043))/(LA.norm(x041)*LA.norm(x043)))\\\n + math.acos((np.dot(x03,x05))/(LA.norm(x03)*LA.norm(x05))) - math.acos((np.dot(x043,x047))/(LA.norm(x043)*LA.norm(x047))))/2\n gamma375 = (math.acos((np.dot(x05,x03))/(LA.norm(x05)*LA.norm(x03))) - math.acos((np.dot(x047,x043))/(LA.norm(x047)*LA.norm(x043)))\\\n + math.acos((np.dot(x07,x05))/(LA.norm(x07)*LA.norm(x05))) - math.acos((np.dot(x045,x047))/(LA.norm(x045)*LA.norm(x047))))/2\n gamma751 = (math.acos((np.dot(x05,x07))/(LA.norm(x05)*LA.norm(x07))) - math.acos((np.dot(x047,x045))/(LA.norm(x047)*LA.norm(x045)))\\\n + math.acos((np.dot(x07,x01))/(LA.norm(x07)*LA.norm(x01))) - math.acos((np.dot(x045,x041))/(LA.norm(x045)*LA.norm(x041))))/2\n # せん断力\n F_S41 = ((young_modulus * h_Jminus * LA.norm(x01) * gamma513)/(2 * (1 + poisson)))*x01/LA.norm(x01)\n F_S43 = ((young_modulus * h_Jminus * LA.norm(x03) * gamma137)/(2 * (1 + poisson)))*x03/LA.norm(x03)\n F_S47 = ((young_modulus * h_Jminus * LA.norm(x05) * gamma375)/(2 * (1 + poisson)))*x05/LA.norm(x05)\n F_S45 = ((young_modulus * h_Jminus * LA.norm(x07) * gamma751)/(2 * (1 + poisson)))*x07/LA.norm(x07)\n \n # J方向の曲げ力\n n_j_cross = np.cross(x05, x01)\n if any(n_j_cross):\n n_J = n_j_cross/LA.norm(n_j_cross)\n else: \n\n l_Jalfa = LA.norm(_Ps[1] - _Ps[7])\n cos_Jalfa = (LA.norm(x01)**2 + LA.norm(x05)**2 - l_Jalfa**2) / (2 * LA.norm(x01) * LA.norm(x05))\n if cos_Jalfa > 1.0:\n cos_Jalfa = 1.0\n elif cos_Jalfa < -1.0:\n cos_Jalfa = -1.0\n sin_Jalfa = math.sqrt(1 - cos_Jalfa**2)\n CJa2 = math.sqrt((cos_Jalfa + 1)/2)\n SJa2 = math.sqrt((1 - cos_Jalfa)/2)\n zJC = (_Ps[7][2]-_Ps[1][2])/(_Ps[7][0]-_Ps[1][0]) * (center_pos[0]-_Ps[1][0]) + _Ps[1][2] #曲げ力の方向の場合わけに必要\n if center_pos[2] > zJC:\n e_j = np.dot(np.array([[CJa2 + (n_J[0]**2) * (1 - CJa2), n_J[0] * n_J[1] * (1 - CJa2) + n_J[2] * SJa2, n_J[0] * n_J[2] * (1 - CJa2) - n_J[1] * SJa2],\\\n [n_J[1] * n_J[0] * (1 - CJa2) - n_J[2] * SJa2, CJa2 + (n_J[1]**2) * (1 - CJa2), n_J[1] * n_J[2] * (1 - CJa2) + n_J[0] * SJa2],\\\n [n_J[2] * n_J[0] * (1 - CJa2) + n_J[1] * SJa2, n_J[2] * n_J[1] * (1 - CJa2) - n_J[0] * SJa2, CJa2 + (n_J[2]**2) * (1 - CJa2)]]), (_Ps[7] - center_pos)/LA.norm(_Ps[7] - center_pos))\n else:\n e_j = np.dot(np.array([[CJa2 + (n_J[0]**2) * (1 - CJa2), n_J[0] * n_J[1] * (1 - CJa2) - n_J[2] * SJa2, n_J[0] * n_J[2] * (1 - CJa2) + n_J[1] * SJa2],\\\n [n_J[1] * n_J[0] * (1 - CJa2) + n_J[2] * SJa2, CJa2 + (n_J[1]**2) * (1 - CJa2), n_J[1] * n_J[2] * (1 - CJa2) - n_J[0] * SJa2],\\\n [n_J[2] * n_J[0] * (1 - CJa2) - n_J[1] * SJa2, n_J[2] * n_J[1] * (1 - CJa2) + n_J[0] * SJa2, CJa2 + (n_J[2]**2) * (1 - CJa2)]]), (_Ps[7] - center_pos)/LA.norm(_Ps[7] - center_pos))\n d_etha_J = (2 * sin_Jalfa / l_Jalfa) - (2 * math.sqrt(1 - np.dot(x041,x047)**2/(LA.norm(x041)*LA.norm(x047))**2)/(LA.norm(x041 - x047)))\n\n n_i = np.cross(x07,x03)/LA.norm(np.cross(x03,x07)) \n cos_ialfa = np.dot(x03,x07) / (LA.norm(x03) * LA.norm(x07))\n sin_ialfa = math.sqrt(1 - cos_ialfa**2)\n Cia2 = math.sqrt((cos_ialfa + 1)/2)\n Sia2 = math.sqrt((1 - cos_ialfa)/2)\n ziC = (_Ps[5][2]-_Ps[3][2])/(_Ps[5][0]-_Ps[3][0]) * (center_pos[0]-_Ps[3][0]) + _Ps[3][2]\n if center_pos[2] > ziC:\n e_i = np.dot(np.array([[Cia2 + (n_i[0]**2) * (1 - Cia2), n_i[0] * n_i[1] * (1 - Cia2) + n_i[2] * Sia2, n_i[0] * n_i[2] * (1 - Cia2) - n_i[1] * Sia2],\\\n [n_i[1] * n_i[0] * (1 - Cia2) - n_i[2] * Sia2, Cia2 + (n_i[1]**2) * (1 - Cia2), n_i[1] * n_i[2] * (1 - Cia2) + n_i[0] * Sia2],\\\n [n_i[2] * n_i[0] * (1 - Cia2) + n_i[1] * Sia2, n_i[2] * n_i[1] * (1 - Cia2) - n_i[0] * Sia2, Cia2 + (n_i[2]**2) * (1 - Cia2)]]), (_Ps[7] - center_pos)/LA.norm(_Ps[7] - center_pos))\n else:\n e_i = np.dot(np.array([[Cia2 + (n_i[0]**2) * (1 - Cia2), n_i[0] * n_i[1] * (1 - Cia2) - n_i[2] * Sia2, n_i[0] * n_i[2] * (1 - Cia2) + n_i[1] * Sia2],\\\n [n_i[1] * n_i[0] * (1 - Cia2) + n_i[2] * Sia2, Cia2 + (n_i[1]**2) * (1 - Cia2), n_i[1] * n_i[2] * (1 - Cia2) - n_i[0] * Sia2],\\\n [n_i[2] * n_i[0] * (1 - Cia2) - n_i[1] * Sia2, n_i[2] * n_i[1] * (1 - Cia2) + n_i[0] * Sia2, Cia2 + (n_i[2]**2) * (1 - Cia2)]]), (_Ps[5] - center_pos)/LA.norm(_Ps[5] - center_pos))\n d_etha_i = (2 * sin_ialfa / LA.norm(x07 - x03)) - (2 * math.sqrt(1 - np.dot(x043,x045)**2/(LA.norm(x043)*LA.norm(x045))**2)/(LA.norm(x043 - x045)))\n\n\n l_J = (Lj20 + Lj06 + Lj68 + Lj82) / 4\n h = (h_iminus + h_iplus + h_Jminus + h_Jplus) / 4\n I = (l_J * h**3) / 12\n M_i = (young_modulus * I * (d_etha_i + poisson * d_etha_J)/(1 - poisson**2))\n M_J = (young_modulus * I * (d_etha_J + poisson * d_etha_i)/(1 - poisson**2))\n #曲げ力\n F_Bi = M_i / LA.norm(x03) + M_i / LA.norm(x07) * e_i\n F_BJ = M_J / LA.norm(x01) + M_J / LA.norm(x05) * e_j\n #空気力\n # S = (S_iminus + S_iplus + S_Jminus + S_Jplus) / 4\n # F_A = p * S\n F_A = np.array([0.0, 0.0, -0.1]) * _a\n\n # 運動方程式(支配方程式)\n S_0 = (S_iminus0 + S_iplus0 + S_Jminus0 + S_Jplus0) / 4\n F_T = F_T41 + F_T43 + F_T45 + F_T47\n F_S = F_S41 + F_S43 + F_S45 + F_S47\n F_B = F_Bi + F_BJ\n return (F_T + F_S + F_B + F_A) / (rho * h_0 * S_0) - c * _vs", "def to_s_matrix(w,v):\n pass", "def ssa_decompose(y, dim):\n n = len(y)\n t = n - (dim - 1)\n\n yy = linalg.hankel(y, np.zeros(dim))\n yy = yy[:-dim + 1, :] / np.sqrt(t)\n\n # here we use gesvd driver (as in Matlab)\n _, s, v = linalg.svd(yy, full_matrices=False, lapack_driver='gesvd')\n\n # find principal components\n vt = np.matrix(v).T\n pc = np.matrix(yy) * vt\n\n return np.asarray(pc), s, np.asarray(vt)", "def visualize_svd(A):\r\n theta = np.linspace(0,2*np.pi,200)\r\n #Set S as unit circle\r\n S = np.array([np.cos(theta), np.sin(theta)])\r\n #Set E as orthogonal basis\r\n E = np.array([[1,0,0],[0,0,1]])\r\n U,Si,Vh = la.svd(A)\r\n Si = np.diag(Si)\r\n\r\n #plot original S and E\r\n first = plt.subplot(221)\r\n first.plot(S[0], S[1])\r\n first.plot(E[0], E[1])\r\n first.axis(\"equal\")\r\n\r\n #rotate S,E and plot S,E\r\n second = plt.subplot(222)\r\n vhs = Vh@S\r\n vhe = Vh@E\r\n second.plot(vhs[0], vhs[1])\r\n second.plot(vhe[0], vhe[1])\r\n second.axis(\"equal\")\r\n\r\n #scale S,E and plot S,E\r\n third = plt.subplot(223)\r\n sivhs = Si@vhs\r\n sivhe = Si@vhe\r\n third.plot(sivhs[0],sivhs[1])\r\n third.plot(sivhe[0],sivhe[1])\r\n third.axis([-4,4,-4,4])\r\n\r\n #rotate S,E and plot S,E\r\n fourth = plt.subplot(224)\r\n usivhs = U@sivhs\r\n usivhe = U@sivhe\r\n fourth.plot(usivhs[0],usivhs[1])\r\n fourth.plot(usivhe[0],usivhe[1])\r\n fourth.axis([-4,4,-4,4])\r\n\r\n plt.show()", "def _compute_s_matrix(self, system_std_dev: tf.Tensor) -> None:\n self.s_matrix_inv = self._kronecker_product(\n tf.diag(tf.reshape(tf.ones_like(system_std_dev, dtype=tf.float64)\n / system_std_dev, [-1])),\n tf.eye(self.n_points_int, dtype=tf.float64))\n return", "def S1(A,B):\n C = np.subtract(A,B)\n s = np.linalg.svd(C)[1]\n return (np.sum(s))", "def spcaold(a, s, k, d):\n\n p = a.shape[0]\n X = np.zeros((p, k))\n\n for l in range(k):\n # 1\n [w, V] = linalg.eigh(a)\n idx = w.argsort()\n w = w[idx]\n V = V[:, idx]\n\n # 2\n xprime, value = spannogram(V[:, -d:], w[-d:])\n\n # 4\n idx = np.abs(xprime).argsort(axis=0)\n for i in idx[:-s]:\n xprime[i] = 0\n\n X[:, l] = xprime[:, 0]\n\n # 5\n for i in idx[-s:]:\n a[i, :] = 0\n a[:, i] = 0\n\n return X", "def __init__(self, A, rank=0):\r\n _u, _s, _v = np.linalg.svd(A, full_matrices=0)\r\n \r\n self.rank = rank\r\n\r\n self.U = _u[:,:self.rank].copy()\r\n self.S = _s[:self.rank].copy()\r\n self.SI = np.matrix(np.diag(self.S)).getI()\r\n self.VT = _v[:self.rank,:].copy()\r\n \r\n self._var = [ e/(_s**2).sum() for e in (_s**2).cumsum() ][self.rank-1]", "def calculate_posvij_matrices(main_tetrad_ark):\n\n # Import all the possible solutions to the Vij matrices\n vij_possibilities = matrix_outerprod_calc.illuminator_of_elfes()\n vij_matrices = []\n\n print(\" \")\n print(\" Calculating Vij matrices\")\n print(\" \")\n # for i in range(0, len(main_tetrad_ark)):\n for i in range(0, len(vij_possibilities)):\n tet_i = [x[1] for x in main_tetrad_ark[i]]\n tri_tet = [np.transpose(i) for i in tet_i]\n print(\"# ********************************\")\n # print(\" \")\n print(\"MATRIX i: \", i)\n print(\" \")\n for j in range(0, len(main_tetrad_ark)):\n tet_j = [x[1] for x in main_tetrad_ark[j]]\n trj_tet = [np.transpose(j) for j in tet_j]\n vij_temp = []\n # print(\"# ********************************\")\n print(\" \")\n print(\"MATRIX j: \", j)\n temp_zero = np.zeros((4,4), dtype=int)\n for x in range(0,len(tet_i)):\n test_1half = np.dot(tri_tet[x],tet_j[x])\n test_2half = np.dot(trj_tet[x],tet_i[x])\n test_difs = np.subtract(test_1half, test_2half)\n # print(\" \")\n # print(test_difs)\n temp_mat = np.dot(tri_tet[x],tet_j[x]) - np.dot(trj_tet[x],tet_i[x])\n vij_temp.append(temp_mat)\n # print(\"\")\n temp_add1 = np.add(vij_temp[0], vij_temp[1])\n temp_add2 = np.add(temp_add1, vij_temp[2])\n tempf = np.add(temp_add2, vij_temp[3])\n # tempf = np.divide(temp_add3, 2)\n for ijx in vij_possibilities:\n if np.array_equal(temp_addf, ijx[0]):\n print(\"*************$$$$$$$$$$$$$$$$$$***************** \")\n print(\"l-solution found:\", ijx[1])\n print(temp_addf)\n print(\"\")\n print(ijx[0])\n if np.array_equal(temp_addf, temp_zero):\n pass\n else:\n vij_matrices.append(temp_addf)\n # print(\"\")\n print(temp_addf)\n # vij_matrices.append(temp_addf)\n vijmats_size = sys.getsizeof(vij_matrices)\n print(\"Size of Vij Matrices list: bytes / kilobytes:\", vijmats_size, vijmats_size/1024)\n print(\"Length of Vij Matrices\")\n print(len(vij_matrices))\n print(vij_matrices)\n pass", "def compute_matrix(self):\n\n fac = self.a / self.dx ** 2\n\n diagonal = np.ones(self.nx) * 2 * fac\n lower = np.ones(self.nx - 1) * -fac\n upper = np.ones(self.nx - 1) * -fac\n\n matrix = sp.diags(\n diagonals=[diagonal, lower, upper],\n offsets=[0, -1, 1], shape=(self.nx, self.nx),\n format='csr')\n\n return matrix", "def decompress_svd(size:tuple, svd_u, svd_s, svd_vh):\r\n m, n = size[0:2]\r\n u = np.zeros((3, m, m), dtype=np.float64)\r\n s = np.zeros((3, min(m, n)), dtype=np.float64)\r\n vh = np.zeros((3, n, n), dtype=np.float64)\r\n\r\n _,p = svd_s.shape\r\n u[:, 0:m, 0:p] = svd_u[:, :, :]\r\n s[:, 0:p] = svd_s[:, :]\r\n vh[:, 0:p, 0:n] = svd_vh[:, :, :]\r\n\r\n # SVD equation: A = U * D * VH\r\n img_svd = np.zeros(size, dtype=np.uint8)\r\n for k in range(3):\r\n d = np.zeros((m, n), dtype=np.float64)\r\n d[:min(m, n), :min(m, n)] = np.diag(s[k, :])[:, :]\r\n img_svd[:,:,k] = np.dot(np.dot(u[k,:,:], d), vh[k,:,:])\r\n return img_svd", "def inverse(self):\n if not self.is_square():\n raise(ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n # TODO - your code here\n inverse = []\n if self.h == 1:\n temp = []\n temp.append(1/self.g[0][0])\n inverse.append(temp)\n else:\n identity_matrix = identity(self.h)\n det_term = 1/self.determinant()\n trace_term = self.trace()\n # implement intermediate scaling step locally\n # trace_x_I = trace_term * identity_matrix\n trace_x_I = []\n for i in range(len(self.g)):\n temp_row = []\n for j in range(len(self.g[i])):\n temp_row.append(trace_term * identity_matrix[i][j])\n trace_x_I.append(temp_row)\n # implement sub-traction locally\n # sub_term = trace_x_I - self.g\n sub_term = []\n for i in range(len(trace_x_I)):\n temp_row = []\n for j in range(len(trace_x_I[i])):\n temp_row.append(trace_x_I[i][j] - self.g[i][j])\n sub_term.append(temp_row)\n # implement final scaling step locally\n # inverse = det_term * sub_term\n inverse = []\n for i in range(len(sub_term)):\n temp_row = []\n for j in range(len(sub_term[i])):\n temp_row.append(det_term * sub_term[i][j])\n inverse.append(temp_row)\n return Matrix(inverse)\n # TODO - your code here", "def svd_shrink(X, tau):\n U,s,V = np.linalg.svd(X, full_matrices=False)\n return np.dot(U, np.dot(np.diag(shrink(s, tau)), V))", "def calculate_posvij_matrices(main_tetrad_ark):\n\n\t# Import all the possible solutions to the Vij matrices\n\tvij_possibilities = matrix_outerprod_calc.illuminator_of_elfes()\n\tvij_matrices = []\n\n\tprint(\"\t\t\t\t\t\t\t\")\n\tprint(\"\tCalculating Vij matrices\")\n\tprint(\"\t\t\t\t\t\t\t\")\n\t# for i in range(0, len(main_tetrad_ark)):\n\tfor i in range(0, len(vij_possibilities)):\n\t\ttet_i = [x[1] for x in main_tetrad_ark[i]]\n\t\ttri_tet = [np.transpose(i) for i in tet_i]\n\t\tprint(\"# ********************************\")\n\t\t# print(\"\t\t\t\t\t\t\t\t \")\n\t\tprint(\"MATRIX i: \", i)\n\t\tprint(\"\t\t\t\t\t\t\t\t \")\n\t\tfor j in range(0, len(main_tetrad_ark)):\n\t\t\ttet_j = [x[1] for x in main_tetrad_ark[j]]\n\t\t\ttrj_tet = [np.transpose(j) for j in tet_j]\n\t\t\tvij_temp = []\n\t\t\t# print(\"# ********************************\")\n\t\t\tprint(\"\t\t\")\n\t\t\tprint(\"MATRIX j: \", j)\n\t\t\ttemp_zero = np.zeros((4,4), dtype=int)\n\t\t\tfor x in range(0,len(tet_i)):\n\t\t\t\ttest_1half = np.dot(tri_tet[x],tet_j[x])\n\t\t\t\ttest_2half = np.dot(trj_tet[x],tet_i[x])\n\t\t\t\ttest_difs = np.subtract(test_1half, test_2half)\n\t\t\t\t# print(\" \")\n\t\t\t\t# print(test_difs)\n\t\t\t\ttemp_mat = np.dot(tri_tet[x],tet_j[x]) - np.dot(trj_tet[x],tet_i[x])\n\t\t\t\tvij_temp.append(temp_mat)\n\t\t\t\t# print(\"\")\n\t\t\ttemp_add1 = np.add(vij_temp[0], vij_temp[1])\n\t\t\ttemp_add2 = np.add(temp_add1, vij_temp[2])\n\t\t\ttempf = np.add(temp_add2, vij_temp[3])\n\t\t\t# tempf = np.divide(temp_add3, 2)\n\t\t\tfor ijx in vij_possibilities:\n\t\t\t\tif np.array_equal(temp_addf, ijx[0]):\n\t\t\t\t\tprint(\"*************$$$$$$$$$$$$$$$$$$***************** \")\n\t\t\t\t\tprint(\"l-solution found:\", ijx[1])\n\t\t\t\t\tprint(temp_addf)\n\t\t\t\t\tprint(\"\")\n\t\t\t\t\tprint(ijx[0])\n\t\t\tif np.array_equal(temp_addf, temp_zero):\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tvij_matrices.append(temp_addf)\n\t\t\t# print(\"\")\n\t\t\tprint(temp_addf)\n\t\t\t# vij_matrices.append(temp_addf)\n\t\tvijmats_size = sys.getsizeof(vij_matrices)\n\t\tprint(\"Size of Vij Matrices list: bytes / kilobytes:\", vijmats_size, vijmats_size/1024)\n\tprint(\"Length of Vij Matrices\")\n\tprint(len(vij_matrices))\n\tpass", "def prepare4iCSD(self, index=0):\n if (\n self.surveys[index].x0_ini_guess == True\n or self.surveys[index].x0_prior == True\n ):\n self._estimateM0_(index=index)\n\n # CONSTRAINSTED INVERSION\n # -----------------------\n # Create vector with weight (data weigth, constrainsts weight and regularisation weigth)\n if self.surveys[index].x0_prior == True: # if relative smallness\n self.reg_w_0_b, self.reg_w_0_A = regularize_w(\n self.surveys[index].reg_A,\n self.wr,\n self.surveys[index].x0_prior,\n x0=self.x0,\n )\n\n # stack weight matrix\n self.W_s_A, self.W_s_b = stack_w(\n self.surveys[index].obs_w,\n self.surveys[index].con_w,\n self.surveys[index].x0_prior,\n reg_w_0_A=self.reg_w_0_A,\n reg_w_0_b=self.reg_w_0_b,\n )\n\n # apply weight to A and b (data weigth, constrainsts weight and regularisation weigth)\n self.A_w = weight_A(\n self.surveys[index].x0_prior, self.surveys[index].A_s, W_s_A=self.W_s_A\n )\n self.b_w = weight_b(\n self.surveys[index].x0_prior, self.surveys[index].b_s, W_s_b=self.W_s_b\n )\n\n # UNCONSTRAINSTED INVERSION\n # -------------------------\n else:\n self.reg_w = regularize_w(self.surveys[index].reg_A, self.wr, self.x0_prior)\n\n self.W_s = stack_w(\n self.surveys[index].obs_w,\n self.surveys[index].con_w,\n self.x0_prior,\n reg_w=self.reg_w,\n )\n self.A_w = weight_A(self.x0_prior, self.surveys[index].A_s, W_s=self.W_s)\n self.b_w = weight_b(self.x0_prior, self.surveys[index].b_s, W_s=self.W_s)", "def get_prox_nuclear(self, x_matrix, scale_factor, prev_u0=None):\n if self.gamma_num_s is None or self.gamma_num_s > 18:\n u, s, vt = np.linalg.svd(x_matrix)\n else:\n tol = scale_factor/10.\n try:\n k = max(1, self.gamma_num_s)\n if prev_u0 is not None:\n u, s, vt = sp.sparse.linalg.svds(x_matrix, v0=prev_u0, k=k, which=\"LM\", tol=tol)\n else:\n u, s, vt = sp.sparse.linalg.svds(x_matrix, k=k, which=\"LM\", tol=tol)\n u = np.matrix(u)\n vt = np.matrix(vt)\n except ValueError as e:\n print \"value error svd\", e\n u, s, vt = np.linalg.svd(x_matrix)\n\n num_nonzero_orig = (np.where(s > scale_factor))[0].size\n thres_s = np.maximum(s - scale_factor, 0)\n nuc_norm = np.linalg.norm(thres_s, ord=1)\n self.gamma_num_s = (np.where(thres_s > 0))[0].size\n\n if s.size > 0:\n prev_u0 = u[:,0]\n else:\n prev_u0 = None\n\n return u * np.diag(thres_s) * vt, nuc_norm, prev_u0", "def svd_images(imagear):\n n = np.shape(imagear)[1]\n u, s, v = np.linalg.svd(imagear/np.sqrt(n-1),full_matrices=False)\n \n return(u, s, v)", "def get_sigmazinv(self):\n\n try:\n out = np.diag(1 / self.eigen_x)\n except AttributeError:\n self.get_eigen(predictor=True)\n out = np.diag(1 / self.eigen_x)\n return out", "def regular(P):\n try:\n cols = P.shape[0]\n ans = np.ones((1, cols))\n # eq = np.matmul(ans, P)\n # s = np.array(np.arange(1, cols + 1))\n eq = np.vstack([P.T - np.identity(cols), ans])\n # va, vec = np.linalg .eig(P)\n results = np.zeros((cols, 1))\n results = np.vstack([results, np.array([1])])\n statetionary = np.linalg.solve(eq.T.dot(eq), eq.T.dot(results)).T\n # print(statetionary)\n # print(np.argwhere(statetionary < 0))\n if len(np.argwhere(statetionary < 0)) > 0:\n return None\n return statetionary\n except Exception as e:\n return None", "def lowest_rank_approx(A,e):\n \n \n U,s,Vh=la.svd(A,full_matrices=False)\n t=s.copy()\n t[t>e]=0\n i=t.nonzero()[0][0]\n \n return U[:,:i].dot(np.diag(s[:i])).dot(Vh[:i,:])", "def compute_svd(self,data,k):\n m, n =data.shape\n n = self.comm1.allreduce(n)\n print(m,n)\n if k==-1:\n k = min(m,n)\n args = parse()\n args.m,args.n,args.k,args.comm = m,n,k,self.comms\n args.eps = np.finfo(data.dtype).eps\n if args.m<args.n: args.p_r,args.p_c = 1,self.size\n dsvd = DistSVD(args, data)\n singularValues, U, V = dsvd.svd()\n rel_error = dsvd.rel_error(U, np.diag(singularValues), V)\n if self.global_rank==0: print('relative error is:', rel_error )\n return singularValues,U,V,rel_error", "def MATSOL(N,A):\r\n\r\n X = np.zeros((N+1),dtype=float) # X.shape = N+1\r\n NROW = np.arange(0,N+1,dtype=int) # NROW.shape = N+1\r\n\r\n for i in np.arange(N): # loop through rows\r\n AMAX = np.max(np.abs(A[NROW[i:],i])) # max value for column, all later rows\r\n ip = np.argmax(np.abs(A[NROW[i:],i]))+i # index of above\r\n \r\n if(abs(AMAX) <= 1E-08):\r\n print('Singular matrix --> No unique solution exists')\r\n return X\r\n \r\n if(NROW[i] != NROW[ip]): # swap rows\r\n NC = NROW[i].copy()\r\n NROW[i] = NROW[ip].copy()\r\n NROW[ip] = NC.copy()\r\n \r\n \r\n COEF = A[NROW[i+1:],i]/A[NROW[i],i] # normalize column values by maximum magnitude value (AMAX > 0)\r\n A[NROW[i+1:],i+1:] = A[NROW[i+1:],i+1:] - np.dot(COEF[:,None],A[NROW[i],i+1:][None,:]) # normalize/reduce matrix\r\n \r\n \r\n if(abs(A[NROW[N],N]) <= 1E-08):\r\n print('Singular matrix --> No unique solution exists')\r\n return X\r\n \r\n X[N] = A[NROW[N],N+1]/A[NROW[N],N] # downstream edge\r\n i = N-1\r\n while (i >= 0):\r\n# SUMM = 0.0\r\n# j = i+1\r\n \r\n SUMM = np.sum(A[NROW[i],i+1:N+1]*X[i+1:N+1]) # do not include final column\r\n \r\n# while (j <= N-1):\r\n# SUMM = A[NROW[i],j]*X[j] + SUMM\r\n# j = j+1\r\n # print(SUMM,SUMM2)\r\n \r\n X[i] = (A[NROW[i],N+1] - SUMM)/A[NROW[i],i]\r\n i = i-1\r\n return X", "def TR_algo8(self, h):\n ve = 0\n vd = self._vd\n k = 0\n p = [0,]*self._N\n m = max(self._compact_M)\n vM = sum(self._compact_M)\n for i in range(m-1, -1, -1):\n mu = self.extract_mask(i)\n mu_norm = sum([bit_component(mu, j) for j in range(self._N)])\n mu = rotate_right(mu, vd+1)\n pi = rotate_right(ve, vd+1) & (~mu & 2**self._N-1)\n r = [bit_component(h, vM - k - (j+1)) for j in range(mu_norm)][::-1]\n r = sum( [rx*2**j for j, rx in enumerate(r)] )\n k = k + mu_norm\n w = gcr_inv(r, mu, pi)\n l = gc(w)\n l = T_inv(ve, vd, l)\n for j in range(self._N):\n p[j] |= bit_component(l, j) << i\n ve = ve ^ (rotate_left(e(w), vd+1))\n vd = (vd + d(w) + 1) % self._N\n return p", "def svd(matrix, rank=None):\n if matrix.ndim != 2:\n raise ValueError('Input should be a two-dimensional array. matrix.ndim is {} != 2'.format(matrix.ndim))\n dim_1, dim_2 = matrix.shape\n if dim_1 <= dim_2:\n min_dim = dim_1\n else:\n min_dim = dim_2\n\n if rank is None or rank >= min_dim:\n # Default on standard SVD\n U, S, V = scipy.linalg.svd(matrix)\n U, S, V = U[:, :rank], S[:rank], V[:rank, :]\n return U, S, V\n\n else:\n # We can perform a partial SVD\n # First choose whether to use X * X.T or X.T *X\n if dim_1 < dim_2:\n S, U = scipy.sparse.linalg.eigsh(np.dot(matrix, matrix.T), k=rank, which='LM')\n S = np.sqrt(S)\n V = np.dot(matrix.T, U * 1 / S[None, :])\n else:\n S, V = scipy.sparse.linalg.eigsh(np.dot(matrix.T, matrix), k=rank, which='LM')\n S = np.sqrt(S)\n U = np.dot(matrix, V) * 1 / S[None, :]\n\n # WARNING: here, V is still the transpose of what it should be\n U, S, V = U[:, ::-1], S[::-1], V[:, ::-1]\n return U, S, V.T", "def _pseudo_inv22sym_vectorized(M):\n assert M.ndim == 3\n assert M.shape[-2:] == (2, 2)\n M_inv = np.empty_like(M)\n prod1 = M[:, 0, 0]*M[:, 1, 1]\n delta = prod1 - M[:, 0, 1]*M[:, 1, 0]\n rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))\n\n if np.all(rank2):\n # Normal 'optimized' flow.\n M_inv[:, 0, 0] = M[:, 1, 1] / delta\n M_inv[:, 0, 1] = -M[:, 0, 1] / delta\n M_inv[:, 1, 0] = -M[:, 1, 0] / delta\n M_inv[:, 1, 1] = M[:, 0, 0] / delta\n else:\n # 'Pathologic' flow.\n # Here we have to deal with 2 sub-cases\n # 1) First sub-case: matrices of rank 2:\n delta = delta[rank2]\n M_inv[rank2, 0, 0] = M[rank2, 1, 1] / delta\n M_inv[rank2, 0, 1] = -M[rank2, 0, 1] / delta\n M_inv[rank2, 1, 0] = -M[rank2, 1, 0] / delta\n M_inv[rank2, 1, 1] = M[rank2, 0, 0] / delta\n # 2) Second sub-case: rank-deficient matrices of rank 0 and 1:\n rank01 = ~rank2\n tr = M[rank01, 0, 0] + M[rank01, 1, 1]\n tr_zeros = (np.abs(tr) < 1.e-8)\n sq_tr_inv = (1.-tr_zeros) / (tr**2+tr_zeros)\n #sq_tr_inv = 1. / tr**2\n M_inv[rank01, 0, 0] = M[rank01, 0, 0] * sq_tr_inv\n M_inv[rank01, 0, 1] = M[rank01, 0, 1] * sq_tr_inv\n M_inv[rank01, 1, 0] = M[rank01, 1, 0] * sq_tr_inv\n M_inv[rank01, 1, 1] = M[rank01, 1, 1] * sq_tr_inv\n\n return M_inv", "def getEssentialMatrix(K, F):\n E = np.dot(K.T, np.dot(F, K))\n u, s, v = np.linalg.svd(E)\n\n # We correct the singular values of the E matrix\n s_new = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 0]]).reshape(3, 3)\n final_E = np.dot(u, np.dot(s_new, v))\n return final_E", "def normal_modes(self, finite_step):\n\n # Get the mass weighted hessian matrix in amu\n hessian = self.calculate_hessian(finite_step)\n\n # Now get the eigenvalues and vectors\n e_vals, e_vectors = np.linalg.eig(hessian)\n print(e_vals)\n print(e_vectors)", "def _set_ls_matrices(self):\n zz_t = self.z_matrix * self.z_matrix.transpose()\n l, s, l_t = np.linalg.svd(zz_t)\n s[self.p:] = 0\n self.l_matrix = np.matrix(l)\n self.s_matirx = np.matrix(np.diag(s))", "def truncated_svd(A,k=None):\n \n \n \n AHA=np.conj(A).T.dot(A)\n evals,evecs=la.eig(AHA)\n order=np.argsort(evals)\n\n evals=evals[order][::-1].copy()\n evecs=evecs.T[order][::-1].copy()\n m,n=AHA.shape\n \n tol=1e-12\n Vh=[]\n for i in xrange(0,m):\n\t\t if np.abs(evals[i])>=tol:\n\t \t\tVh+=[evecs[i]]\n \n Vh=np.array(Vh)\n s=np.sqrt(evals[:Vh.shape[0]])\n U=[]\n for i in xrange(0,len(s)):\n U+=[(1./s[i])*A.dot(Vh[i])]\n U=np.array(U).T\n \n return U,s,Vh", "def check_non_singular(self, Am):\r\n det = self.detf(Am)\r\n if det != 0:\r\n return det\r\n else:\r\n raise ArithmeticError(\"Singular Matrix!\")", "def stbinv(A, B, C, D, y, t):\n # Description to help the user\n\n # calculate the number of samples of the output\n N = np.shape(y)[\n 1\n ] # the number of samples is the number of columns of the data matrix y\n\n # calculate system's dimensions: number of states and number of inputs\n m = B.shape[1] # number of inputs\n n = A.shape[0] # number of states\n\n # initialize the variable v (additional input)\n v = np.zeros((n, N)) # it will be important later\n\n # initializing the flag variable\n flag = 0\n # initializing the flag variable for the vrft method\n flag_vr = 0\n # initializing the counter of reduction steps done by the algorithm\n kround = 0\n\n # starting the loop of the reduction procedure\n while flag == 0:\n # run a step of the reduction order algorithm\n Ahat, Bhat, Chat, Dhat, yhat, vhat, nhat, phat, rhat = invredc(A, B, C, D, y, v)\n # increase the counter of reductions\n kround = kround + 1\n\n # preallocating the state vector of the inverse system\n xhat = np.zeros((nhat, N - kround)) # it must have N-kround samples\n # preallocating the calculated input\n uhat = np.zeros((m, N - kround))\n\n # defining the reduced time vector\n tt = t[:, 0 : N - kround]\n\n # test the conditions of invertibility\n if phat < m:\n # if this condition is true, then the algorithm has failed and it is not possible to find the inverse\n flag = 1\n flag_vr = 1\n # if this is the case, we print a message and end the execution\n # print('The inversion algorithm has failed')\n return uhat, tt, flag_vr\n else:\n if rhat == m:\n # ((rhat==m)&(rhat==phat)):\n # if this condition is true, then the algorithm is done. We can calculate the signal u\n flag = 2\n # calculating the inverse of the feedforward matrix\n # E=np.linalg.inv(Dhat)\n E = np.linalg.pinv(Dhat)\n else:\n # if none of the conditions above is true, then we need to proceed to another round of the reduction step of the algorithm\n A = Ahat\n B = Bhat\n C = Chat\n D = Dhat\n y = yhat\n v = vhat\n # after the reduction procedure is done, then the system can be inverted\n\n # calculating the dynamic matrix of the inverse system\n Ainv = Ahat - Bhat @ E @ Chat\n # eigenvalues of the inverse system's dynamic matrix\n wv, v = np.linalg.eig(Ainv) # w=eigenvalues, v=eigenvectors\n # calculating the input matrix of the inverse system\n Binv = Bhat @ E\n # calculating the output matrix of the inverse system\n Cinv = -E @ Chat\n # calculating the feedforward matrix of the inverse system\n Dinv = E\n\n # test if the inverse dynamic system is stable\n wbool = wv > 1\n wsum = np.sum(wbool)\n # test if wsum is greater than 1\n if wsum > 0:\n # if wsum is greater than 1, then, the inverse system is unstable, so we end the execution of the algorithm\n # print('The inverse system is unstable')\n flag_vr = 2\n return uhat, tt, flag_vr\n else:\n # if wsum=0, then the inverse system is stable, and we can calculate the input signal\n # calculate the first value for the output (t=0)\n uhat[:, 0] = Cinv @ xhat[:, 0] + Dinv @ yhat[:, 0]\n # calculate the states and the output of the inverse system\n for k in range(0, N - 1 - kround):\n xhat[:, k + 1] = Ainv @ xhat[:, k] + Binv @ yhat[:, k] + vhat[:, k]\n uhat[:, k + 1] = Cinv @ xhat[:, k + 1] + Dinv @ yhat[:, k + 1]\n\n return uhat, tt, flag_vr", "def project_Lnuc_ball(X: \"fasta.linalg.Matrix\", t: float) -> \"fasta.linalg.Matrix\":\n U, s, V = la.svd(X)\n\n # Construct the diagonal matrix of singular values, S, as a shrunken version of the original signal values\n S = np.zeros(X.shape)\n S[:len(s),:len(s)] = np.diag(shrink(s, t))\n return U @ S @ V", "def matI(a):\n shape=matShape(a)\n if shape[0]!=shape[1]: raise ValueError\n n=shape[0]\n ret=matZeros((n,n*2))\n for i in range(n):\n for j in range(n):\n matSet(ret,i,j,matGet(a,i,j))\n for i in range(n):\n matSet(ret,i,i+n,1)\n for row in range(n):\n rm=row\n ap=abs(matGet(ret,rm,row))\n for rint in range(row+1,n):\n p=abs(matGet(ret,rint,row))\n if ap<p:\n ap=p\n rm=rint\n if 0.000000001 > ap:\n return matCopy(a) # Not invertible\n di=matGet(ret,rm,row)\n if rm!=row:\n for i in range(n*2):\n t=matGet(ret,rm,i)\n matSet(ret,rm,i,matGet(ret,row,i))\n matSet(ret,row,i,t)\n idi=1.0/di\n for rint in range(row+1,n):\n f=idi*matGet(ret,rint,row)\n if f!=0:\n for co in range(row,n*2):\n matSet(ret,rint,co,matGet(ret,rint,co)-f*matGet(ret,row,co))\n row=n-1\n while row>=0:\n ic=1.0/matGet(ret,row,row)\n for rint in range(row):\n icx=ic*matGet(ret,rint,row)\n if icx!=0:\n for co in range(row, n*2):\n matSet(ret,rint,co,matGet(ret,rint,co)-icx*matGet(ret,row,co))\n matSet(ret,row,row,ic*matGet(ret,row,row))\n for co in range(n,n*2):\n matSet(ret,row,co,ic*matGet(ret,row,co))\n row-=1\n return matPart(ret,0,n,n,n*2)", "def initialize (A, S):\n ind = get_indicator_vector(A[S,:][:,S])\n x_v = np.zeros(shape=A.shape[0])\n x_v[S] = ind\n # Candidate set to contain only the edges on the periphery of As\n C = []\n for v in S:\n for u in A[v, :].nonzero()[1]:\n if (x_v[u] == 0):\n C.append((v, u))\n return x_v, C", "def fir(timeseries, design):\r\n X = np.matrix(design)\r\n y = np.matrix(timeseries)\r\n h = np.array(linalg.pinv(X.T * X) * X.T * y.T)\r\n return h", "def zsx_0(self):\n return self.glb[iz0]/self.glb[ix0]", "def smat(v):\n \n k = len(v)\n n = TriLengthToSquare(k)\n \n A = np.zeros([n,n])\n A[np.triu_indices(n)] = v\n A[np.triu_indices(n,1)] *= 2 / np.sqrt(2)\n return (A + A.T) / 2", "def _vect_matrix_inverse(A):\n identity = np.identity(A.shape[2], dtype=A.dtype)\n return np.array([np.linalg.solve(x, identity) for x in A])", "def initialize(self):\n self.U = range(self.K)\n self.H = np.identity(self.rank)\n temp = 0\n self.S = np.zeros([self.rank, self.rank, self.K])\n for k in range(self.K):\n self.S[:, :, k] = np.identity(self.rank)\n temp += self.X[k].T.dot(self.X[k])\n [eigval, eigvec] = np.linalg.eig(temp)\n self.V = eigvec[:, range(self.rank)]", "def _compute_terms_to_make_leading_submatrix_singular(hessian_info, k):\n hessian_plus_lambda = hessian_info.hessian_plus_lambda\n upper_triangular = hessian_info.upper_triangular\n n = len(hessian_plus_lambda)\n\n delta = (\n np.sum(upper_triangular[: k - 1, k - 1] ** 2)\n - hessian_plus_lambda[k - 1, k - 1]\n )\n\n v = np.zeros(n)\n v[k - 1] = 1\n\n if k != 1:\n v[: k - 1] = solve_triangular(\n upper_triangular[: k - 1, : k - 1], -upper_triangular[: k - 1, k - 1]\n )\n\n return delta, v", "def svd_S(T):\n try:\n S = splinalg.svd(T, full_matrices=False, compute_uv=False)\n except splinalg.LinAlgError:\n S = splinalg.svd(T, full_matrices=False, lapack_driver='gesvd', compute_uv=False)\n return S", "def Sinv(self):\n Wplus = np.matrix(np.diag(self.w**2 + 1.0))\n return self.priorSinvh * self.V.T * Wplus * self.V * self.priorSinvh", "def svd_factorization_projections(A, m, n, orth_tol, max_refin, tol):\n # SVD Factorization\n U, s, Vt = scipy.linalg.svd(A, full_matrices=False)\n\n # Remove dimensions related with very small singular values\n U = U[:, s > tol]\n Vt = Vt[s > tol, :]\n s = s[s > tol]\n\n # z = x - A.T inv(A A.T) A x\n def null_space(x):\n # v = U 1/s V.T x = inv(A A.T) A x\n aux1 = Vt.dot(x)\n aux2 = 1/s*aux1\n v = U.dot(aux2)\n z = x - A.T.dot(v)\n\n # Iterative refinement to improve roundoff\n # errors described in [2]_, algorithm 5.1.\n k = 0\n while orthogonality(A, z) > orth_tol:\n if k >= max_refin:\n break\n # v = U 1/s V.T x = inv(A A.T) A x\n aux1 = Vt.dot(z)\n aux2 = 1/s*aux1\n v = U.dot(aux2)\n # z_next = z - A.T v\n z = z - A.T.dot(v)\n k += 1\n\n return z\n\n # z = inv(A A.T) A x\n def least_squares(x):\n # z = U 1/s V.T x = inv(A A.T) A x\n aux1 = Vt.dot(x)\n aux2 = 1/s*aux1\n z = U.dot(aux2)\n return z\n\n # z = A.T inv(A A.T) x\n def row_space(x):\n # z = V 1/s U.T x\n aux1 = U.T.dot(x)\n aux2 = 1/s*aux1\n z = Vt.T.dot(aux2)\n return z\n\n return null_space, least_squares, row_space", "def svd(a, full_matrices=False, compute_uv=True):\n #-> gesvd\n a, cv, isMatrix = get_computation_matrix(a)\n t_dtype = TypeUtil.to_numpy_dtype(a.get_dtype())\n (u, s, vt, _) = gesvd(a, compute_uv=compute_uv, \\\n full_matrices=full_matrices, lwork=0,\\\n overwrite_a=1, dtype=t_dtype)\n if not compute_uv:\n if cv:\n return s.to_numpy_array() # ndarray\n else:\n return s # FrovedisVector\n else:\n if cv and isMatrix:\n return (u.to_numpy_matrix(), s.to_numpy_array(),\\\n vt.to_numpy_matrix())\n elif cv and not isMatrix:\n return (u.to_numpy_array(), s.to_numpy_array(),\\\n vt.to_numpy_array())\n else:\n return (u, s, vt)", "def pca(x):\n\t\n\tx = (x - x.mean(axis = 0)) # Subtract the mean of column i from column i, in order to center the matrix.\n\t\n\tnum_observations, num_dimensions = x.shape\n\t\n\t# Often, we have a large number of dimensions (say, 10,000) but a relatively small number of observations (say, 75). In this case, instead of directly computing the eigenvectors of x^T x (a 10,000 x 10,000 matrix), it's more efficient to compute the eigenvectors of x x^T and translate these into the eigenvectors of x^T x by using the transpose trick. \n\t# The transpose trick says that if v is an eigenvector of M^T M, then Mv is an eigenvector of MM^T.\n\t# We arbitrarily select \"100\" as the switching threshold. Another approach is to switch by comparing num_observations and num_dimensions.\n\tif num_dimensions > 100:\n\t\teigenvalues, eigenvectors = linalg.eigh(dot(x, x.T))\n\t\tv = (dot(x.T, eigenvectors).T)[::-1] # Unscaled, but the relative order is still correct.\n\t\ts = sqrt(eigenvalues)[::-1] # Unscaled, but the relative order is still correct.\n\telse:\n\t\tu, s, v = linalg.svd(x, full_matrices = False)\n\t\t\n\treturn v, s", "def project(self, new_expn):\n \"\"\"\n data = numpy.array(self.parent.serialisedArrayDataList)\n import sklearn\n skpca = sklearn.decomposition.PCA()\n X_r = skpca.fit(data).transform(data)\n \n self.__v = X_r\n \"\"\"\n # old martrisx\n matrix = numpy.array(self.parent.serialisedArrayDataList)\n U, S, V = numpy.linalg.svd(matrix.T, full_matrices=False)\n \n print(\"matrix\", matrix.shape)\n \n # set-ups\n self.parent = new_expn\n if self.rowwise:\n self.labels = new_expn[self.label_key]\n else:\n self.labels = new_expn.getConditionNames()\n \n matrix = numpy.array(self.parent.serialisedArrayDataList)\n S = numpy.diag(S)\n print(\"U\", U.shape)\n print(\"V\", V.shape)\n print(\"S\", S.shape)\n print(\"matrix\", matrix.shape)\n \n #data = np.dot(U, np.dot(S, V))\n #X_transformed = np.dot(X_transformed, self.V.T)\n print(numpy.dot(S, V).shape)\n\n pr = numpy.dot(matrix, S)\n print(\"pr\", pr.shape)\n #y = x*W;\n #y0 = Y(1,:);\n #sum(abs(y0 - y)) %\n \n # I want a new v. U and D are the same.\n \n self.__v = pr\n \n print(U)\n print()\n print(pr)\n \n print(numpy.allclose(U, pr)) \n print(numpy.allclose(matrix.T, numpy.dot(U, numpy.dot(S, V))))\n return(True)" ]
[ "0.66048837", "0.6466162", "0.6259937", "0.6250825", "0.62505597", "0.62274474", "0.6104567", "0.6089218", "0.6025379", "0.5982765", "0.597328", "0.590215", "0.58907986", "0.58582675", "0.58575904", "0.584388", "0.58408606", "0.58376825", "0.581499", "0.58008623", "0.5792866", "0.57560784", "0.5754681", "0.573956", "0.5721776", "0.5710557", "0.56707674", "0.5659692", "0.5650968", "0.5639219", "0.5630885", "0.5629088", "0.5626873", "0.5615836", "0.5613928", "0.55852765", "0.5566613", "0.5562814", "0.55594915", "0.5549646", "0.55289227", "0.55209863", "0.5490764", "0.54743916", "0.5464799", "0.54568446", "0.5453528", "0.5440958", "0.5440758", "0.54392815", "0.54341847", "0.5433504", "0.54313093", "0.54305494", "0.5421267", "0.54145354", "0.54045105", "0.53959453", "0.5387335", "0.53868663", "0.53835815", "0.5366867", "0.5357951", "0.53560305", "0.53364813", "0.5334518", "0.5326498", "0.53012824", "0.5294708", "0.5294111", "0.5280381", "0.52732414", "0.5269597", "0.52694637", "0.5258422", "0.52538425", "0.5249476", "0.52431375", "0.52352005", "0.52111375", "0.5202409", "0.51951665", "0.518155", "0.5176293", "0.5174969", "0.5166565", "0.51636213", "0.5161851", "0.5161104", "0.51589423", "0.5157191", "0.51358396", "0.51332694", "0.5129083", "0.51276344", "0.5126066", "0.51243526", "0.5123419", "0.5115548", "0.5110807" ]
0.68444854
0
Leverages the 8point algorithm and implement RANSAC algorithm to find the inliers and the best fundamental matrix
def getInlierRANSAC(pts1, pts2): # global finalFundamentalMatrix iterations = 50 threshold = 0.01 max_count = 0 n = len(pts1) finalFundamentalMatrix = np.zeros((3, 3)) for i in range(iterations): count = 0 idx = random.sample(range(n - 1), 8) left_pts = pts1[idx] right_pts = pts2[idx] F = computeFundamentalMatrix(left_pts, right_pts) left_feature_inlier = [] right_feature_inlier = [] # print("Sample index: ", len(idx)) for j in range(0, n): homogeneous_right = np.array([pts2[j, 0], pts2[j, 1], 1]) homogeneous_left = np.array([pts1[j, 0], pts1[j, 1], 1]) fit = np.dot(homogeneous_right.T, np.dot(F, homogeneous_left)) # print("Fit for iteration ", i," ", np.abs(fit)) if np.abs(fit) < threshold: left_feature_inlier.append(pts1[j]) right_feature_inlier.append(pts2[j]) count = count + 1 # print('Inlier count', count) inlier_Left = np.array(left_feature_inlier) inlier_Right = np.array(right_feature_inlier) if count > max_count: max_count = count finalFundamentalMatrix = F final_inlier_Left = inlier_Left final_inlier_Right = inlier_Right return finalFundamentalMatrix, final_inlier_Left, final_inlier_Right
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ransac(data, hypothesis, metric, sample_size, num_iter, inlier_thresh):\n N,d = data.shape\n best_frac, best_hypothesis, best_mask = 0, None, None\n for i in range(num_iter):\n js = np.random.choice(N,size=sample_size,replace=False)\n hypothesis_elements = data[js,:]\n H = hypothesis(hypothesis_elements)\n badness = np.array([metric(row,H) for row in data])\n inlier_mask = (badness<inlier_thresh)\n inlier_frac = inlier_mask.mean()\n if inlier_frac>best_frac:\n best_frac, best_hypothesis, best_mask = inlier_frac,H,inlier_mask\n # print(H)\n # print(inlier_mask)\n return best_hypothesis, best_mask", "def evaltr(x_solution): \n \n large = 10.0**30\n pred = np.zeros(cfg.ntrain)\n e0 = 0.0 # mean of observed values\n y=0.0\n for i in range(cfg.ntrain): # Computation of correct piece\n e0 += cfg.a_unscaled[i][-1]\n pind = 0\n ipbest = 0\n pbest = -large # for max\n \n for j1 in range(cfg.nomax):\n ipmin=pind\n pmin=large # for min\n for _ in range(cfg.jk[j1]):\n piece=x_solution[(pind+1)*cfg.nfea-1] \n for j3 in range(cfg.nfea-1): #\n piece += x_solution[pind*cfg.nfea+j3]*cfg.a_unscaled[i][j3]\n if piece < pmin:\n ipmin = pind\n pmin = piece\n pind += 1 \n \n if pmin > pbest:\n ipbest = ipmin\n pbest = pmin\n \n pred[i] = x_solution[(ipbest+1)*cfg.nfea-1] # Computation of prediction\n for j1 in range(cfg.nfea-1):\n pred[i] += x_solution[ipbest*cfg.nfea+j1]*cfg.a_unscaled[i][j1]\n y += pred[i]\n \n y = y/cfg.ntrain \n e0 = e0/cfg.ntrain\n \n # Computation of indices\n rmse = 0.0\n mae = 0.0\n e1 = 0.0\n for i in range(cfg.ntrain):\n rmse += (pred[i]-cfg.a_unscaled[i][-1])**2\n mae += np.abs(pred[i]-cfg.a_unscaled[i][-1]) \n e1 += (cfg.a_unscaled[i][-1] - e0)**2\n ce = 1.0 - rmse/e1 \n rmse = np.sqrt(rmse/cfg.ntrain)\n mae = mae/cfg.ntrain \n\n if cfg.ntrain > 1:\n sx=0.0\n sy=0.0\n rcor=0.0\n for i in range(cfg.ntrain):\n sx += (pred[i]-y)**2\n sy += (cfg.a_unscaled[i][-1]-e0)**2 \n rcor += (pred[i]-y) * (cfg.a_unscaled[i][-1]-e0) \n\n r = rcor/np.sqrt(sx*sy)\n \n return rmse,mae,ce,r", "def estimate_F_ransac(corr, num_iter, inlier_thresh):\n _, inlier_mask = ransac(corr, estimate_F, sym_epipolar_dist, 8, num_iter, inlier_thresh)\n # inlier_mask = np.ones(9)\n # inlier_mask[0] = 0\n F = estimate_F(corr[inlier_mask.astype(np.bool)])\n return F", "def ransac(matches, kp1, kp2, s=4, threshold=3, maxIterations=2000, returnMatches=False, inlierRatio=0.05, ransacRatio=0.6):\n\n sizes_kp1 = [kp1[dt[0].queryIdx].size for dt in matches]\n sizes_kp2 = [kp1[dt[0].trainIdx].size for dt in matches]\n tup_matches_kp1 = [kp1[dt[0].queryIdx].pt for dt in matches]\n tup_matches_kp2 = [kp2[dt[0].trainIdx].pt for dt in matches]\n matches_kp1 = np.array([[h for h in kp] + [1] for kp in tup_matches_kp1])\n matches_kp2 = np.array([[h for h in kp] + [1] for kp in tup_matches_kp2])\n\n cnt_matches = len(matches)\n\n max_matches = []\n max_p1, max_p2 = [], []\n max_p1_sizes, max_p2_sizes = [], []\n max_total = 0\n\n for iter in range(maxIterations):\n # Find Homography based on random sample\n data = random.sample(matches, s)\n data_p1 = np.array([matches_kp1[dt[0].queryIdx] for dt in data])\n data_p2 = np.array([matches_kp2[dt[0].trainIdx] for dt in data])\n homography = homomat(data_p1[:, :2], data_p2[:, :2])\n\n # Find P1 projection from the homography matrix\n projected_p2 = np.dot(homography, matches_kp1.transpose())\n projected_p2 = projected_p2[0:3] / projected_p2[2] # make sure w' is 1\n projected_p2 = projected_p2.transpose()\n\n # Initialize Current Matches\n current_matches = []\n current_p1, current_p2 = [], []\n current_p1_sizes, current_p2_sizes = [], []\n current_total = 0\n\n # Check for inliers and outliers for each matches\n for i, (match) in enumerate(matches):\n # normalize the error\n error = np.linalg.norm(matches_kp2[i] - projected_p2[i])\n\n # Check for inliers\n if error < threshold:\n current_matches.append([cv.DMatch(current_total, current_total, match[0].distance)])\n current_p1.append(matches_kp1[i][0:2])\n current_p2.append(matches_kp2[i][0:2])\n current_p1_sizes.append(sizes_kp1[i])\n current_p2_sizes.append(sizes_kp2[i])\n current_total += 1\n\n # If\n if current_total > max_total and current_total >= np.round(inlierRatio*cnt_matches):\n max_matches = current_matches\n max_p1 = current_p1\n max_p2 = current_p2\n max_p1_sizes = current_p1_sizes\n max_p2_sizes = current_p2_sizes\n max_total = current_total\n\n # # we are done in case we have enough inliers\n if current_total > cnt_matches * ransacRatio:\n break\n\n\n # Re-evaluate the Homography based on the best inliers\n max_homography = homomat(np.array(max_p1), np.array(max_p2))\n\n if returnMatches:\n max_kp1 = [cv.KeyPoint(d[0], d[1], max_p1_sizes[i]) for i, d in enumerate(max_p1)]\n max_kp2 = [cv.KeyPoint(d[0], d[1], max_p2_sizes[i]) for i, d in enumerate(max_p2)]\n return max_homography, max_matches, max_kp1, max_kp2\n\n return max_homography", "def create_cands(data):\n\n best = np.zeros(data.dim+1)\n best[0:data.dim] = data.xbest\n best[data.dim] = 1-np.sum(data.xbest)\n\n # Ncand times the best value\n cp_e = np.kron(np.ones((data.Ncand, 1)), np.asmatrix(best))\n # This generates random perturbations\n # need dim+1 to account for the \"missing\" value\n r = np.random.rand(data.Ncand, data.dim+1)\n a = r < data.pertP\n idx = np.where(np.sum(a, axis=1) == 0)\n for ii in range(len(idx[0])):\n f = np.random.permutation(data.dim+1)\n a[idx[0][ii], f[0]] = True\n randnums = np.random.randn(data.Ncand, data.dim+1)\n randnums[a == False] = 0\n pv = randnums*data.sigma_stdev\n # Create new points by adding random fluctucations to best point\n new_pts = cp_e+pv\n\n # Iterative, column wise procedure to force the randomly\n # sampled point to be in [0,1]\n for ii in range(data.dim+1):\n vec_ii = new_pts[:, ii]\n adj_l = np.where(vec_ii < data.xlow)\n vec_ii[adj_l[0]] = data.xlow + (data.xlow - vec_ii[adj_l[0]])\n adj_u = np.where(vec_ii > data.xup)\n vec_ii[adj_u[0]] = data.xup - (vec_ii[adj_u[0]]-data.xup)\n stillout_u = np.where(vec_ii > data.xup)\n vec_ii[stillout_u[0]] = data.xlow\n stillout_l = np.where(vec_ii < data.xlow)\n vec_ii[stillout_l[0]] = data.xup\n new_pts[:, ii] = copy.copy(vec_ii)\n\n new_pts = new_pts / np.sum(new_pts, axis=1)\n\n cp_e = copy.copy(new_pts)\n rand_pts = np.asmatrix(np.random.uniform(0, 1, [data.Ncand, data.dim + 1]))\n cp_r = rand_pts/np.sum(rand_pts, axis=1)\n\n CandPoint = np.concatenate((cp_e, cp_r), axis=0)\n # return only data.dim candidate points\n CandPoint_out = CandPoint[:, 0:data.dim]\n\n return CandPoint_out", "def ransac(cloud_s, cloud_t, \n depth_s, depth_t,\n A_prev, b_prev,\n n_iter, n_inlier_cutoff, d_cutoff):\n import random\n n_s = len(cloud_s)\n n_t = len(cloud_t)\n n_inliers = [0] * n_iter\n# Initialization\n A_init = A_prev\n b_init = b_prev\n pred_t = A_init.dot(cloud_s.T).T + b_init\n# TODO: should really be looking at the distance in the projected space!!\n inliers = [np.linalg.norm(pred_t[i,] - cloud_t[i,]) < d_cutoff for i in range(n_s)]\n max_inliers = sum(inliers)\n print(\"Have \" + str(n_s) + \" features that could be inliers\")\n print(\"Starting with \" + str(max_inliers) + \" inliers\")\n for iter in range(n_iter):\n assert n_s == n_t, \"clouds not of equal size in ransac()\"\n # TODO: replace this random choice with 3 corresponding feature descriptors\n points_inds = random.sample(range(n_s), 3)\n x_vals = np.array([cloud_s[i] for i in points_inds])\n y_vals = np.array([cloud_t[i] for i in points_inds])\n\n # Using Horn 1987, Closed-form solution of absolute orientation\n # using unit quaternions.\n A_init_tmp, b_init_tmp = horn_adjust(x_vals, y_vals)\n\n # TODO: find inliers to the transformation T\n pred_t = A_init_tmp.dot(cloud_s.T).T + b_init_tmp\n# TODO: should really be looking at the distance in the projected space!!\n inliers = [np.linalg.norm(pred_t[i,] - cloud_t[i,]) < d_cutoff for i in range(n_s)]\n n_inliers = sum(inliers)\n\n # TODO: do we want to refit on the inliers?\n if n_inliers > max_inliers:\n A_init = A_init_tmp\n b_init = b_init_tmp\n max_inliers = n_inliers\n print(\"Adjusting A and b again!\")\n print(A_init)\n print(b_init)\n\n # TODO: are we using n_inlier_cutoff in this way? Check the paper!\n if max_inliers < n_inlier_cutoff:\n raise Exception('insufficient inliers! Want ' + str(n_inlier_cutoff) +\n ' but got ' + str(max_inliers))\n #max_index = n_inliers.index(max(n_inliers)) \n # Compute the best transformation T_star\n# TODO: actually optimize over the depth field!! using spatial.KDTree and spatial.KDTree.query\n# Need to shift depth1XYZ by our initial transformation first\n depth1XYZ = A_init.dot(depth_s.T).T + b_init\n depth2XYZ = depth_t\n tree = spatial.KDTree(depth2XYZ)\n tree_q = tree.query(depth1XYZ)\n# Keep only matches within the cutoff.\n# depth_pair_inds has indeces for depth1XYZ and depth2XYZ\n cutoff = 0.01\n depth_pair_inds = [(i,tree_q[1][i]) for i in range(len(tree_q[0]))\n if tree_q[0][i] < cutoff]\n #depth_cloud_s = np.array([depth1XYZ[k[0]] for k in depth_pair_inds])\n depth_cloud_s = np.array([depth_s[k[0]] for k in depth_pair_inds])\n depth_cloud_t = np.array([depth2XYZ[k[1]] for k in depth_pair_inds])\n\n# A_d = list(range(n_s))\n# A, b = find_argmin_T(cloud_s, cloud_t, A_d,\n# A_init, b_init)\n A_d = list(range(depth_cloud_s.shape[0]))\n A, b = find_argmin_T(depth_cloud_s, depth_cloud_t, A_d,\n A_init, b_init)\n print(\"A_init value:\")\n print(A_init)\n print(\"b_init value:\")\n print(b_init)\n \n print(\"Returning A, b\")\n print(\"A value:\")\n print(A)\n print(\"b value:\")\n print(b)\n print(\"inliers:\")\n print(max_inliers)\n return(A, b)", "def estimateFundamentalMatrix(x1, x2):\n A = correspondence_matrix(x1, x2)\n # compute linear least square solution\n U, S, V = np.linalg.svd(A)\n F = V[-1].reshape(3, 3)\n\n # constrain F. Make rank 2 by zeroing out last singular value\n U, S, V = np.linalg.svd(F)\n S[-1] = 0\n \n F = np.dot(U, np.dot(np.diag(S), V))\n return F", "def pareto_frontier(cmrf,featlist) :\n\tQ = []\n\ttaboodict = {}\n\tnStates = len(featlist)\n\tfeat1,feat2 = featlist\n\tEaxa,Xa = cmrf.decode(feat1)\n\tEbxb,Xb = cmrf.decode(feat2)\n\tif Xa == Xb : \n\t\treturn [Xa],[(Eaxa,Ebxb)]\n\tEaxb = cmrf.score(Xb,feat1)\n\tEbxa = cmrf.score(Xa,feat2)\n\tQ.append((Xa,Xb))\n\tfrontier,frontier_energy = [],[]\n\tfrontier.extend([Xa,Xb])\n\tfrontier_energy.extend([(Eaxa,Ebxa),(Eaxb,Ebxb)])\n\ttaboodict[(Eaxa,Ebxa)] = 1;\n\ttaboodict[(Eaxb,Ebxb)] = 1;\n\twhile len(Q) > 0 :\n\t\t### Optimize \n\t\tXa,Xb = Q[0]\n\t\tQ = Q[1:] # Dequeue\n\t\tEaxb = cmrf.score(Xb,feat1)\n\t\tEbxa = cmrf.score(Xa,feat2)\t\n\t\tEaxa = cmrf.score(Xa,feat1)\n\t\tEbxb = cmrf.score(Xb,feat2)\t\n\t\tm = (Ebxa - Ebxb)/(Eaxa-Eaxb)\n\t\tif m > 0 : \n\t\t\t#stop()\n\t\t\tsys.stderr.write(\"### WARNING : Slope > 0. Cvxhull failed\")\n\t\t\treturn frontier,frontier_energy\n\t\tthetaa = -m/(1-m)\n\t\tthetab = 1/(1-m)\n\t\ttmrf = TMRF(cmrf,[thetaa,thetab],[feat1,feat2])\n\t\tXab = tmrf.decode()[1]\n\t\tEaxab = cmrf.score(Xab,feat1)\n\t\tEbxab = cmrf.score(Xab,feat2)\n\t\tif Xab != Xa and Xab != Xb and \\\n\t\t\tnot taboodict.has_key((Eaxab,Ebxab)) :\n\t\t\t# Check almost equal condition\n\t\t\tif any(map(lambda(x):almost_eq(Eaxab,x[0] or \\\n\t\t\t\talmost_eq(Ebxab,x[1])),taboodict.keys())) : \n\t\t\t\tcontinue\n\t\t\t\t\t\n\t\t\tfrontier.append(Xab)\n\t\t\tfrontier_energy.append((Eaxab,Ebxab))\n\t\t\ttaboodict[(Eaxab,Ebxab)]=1\n\t\t\tQ.extend([(Xa,Xab),(Xab,Xb)])\n\t# Calculate energy of frontier elements\t\n\treturn frontier,frontier_energy", "def MATSOL(N,A):\r\n\r\n X = np.zeros((N+1),dtype=float) # X.shape = N+1\r\n NROW = np.arange(0,N+1,dtype=int) # NROW.shape = N+1\r\n\r\n for i in np.arange(N): # loop through rows\r\n AMAX = np.max(np.abs(A[NROW[i:],i])) # max value for column, all later rows\r\n ip = np.argmax(np.abs(A[NROW[i:],i]))+i # index of above\r\n \r\n if(abs(AMAX) <= 1E-08):\r\n print('Singular matrix --> No unique solution exists')\r\n return X\r\n \r\n if(NROW[i] != NROW[ip]): # swap rows\r\n NC = NROW[i].copy()\r\n NROW[i] = NROW[ip].copy()\r\n NROW[ip] = NC.copy()\r\n \r\n \r\n COEF = A[NROW[i+1:],i]/A[NROW[i],i] # normalize column values by maximum magnitude value (AMAX > 0)\r\n A[NROW[i+1:],i+1:] = A[NROW[i+1:],i+1:] - np.dot(COEF[:,None],A[NROW[i],i+1:][None,:]) # normalize/reduce matrix\r\n \r\n \r\n if(abs(A[NROW[N],N]) <= 1E-08):\r\n print('Singular matrix --> No unique solution exists')\r\n return X\r\n \r\n X[N] = A[NROW[N],N+1]/A[NROW[N],N] # downstream edge\r\n i = N-1\r\n while (i >= 0):\r\n# SUMM = 0.0\r\n# j = i+1\r\n \r\n SUMM = np.sum(A[NROW[i],i+1:N+1]*X[i+1:N+1]) # do not include final column\r\n \r\n# while (j <= N-1):\r\n# SUMM = A[NROW[i],j]*X[j] + SUMM\r\n# j = j+1\r\n # print(SUMM,SUMM2)\r\n \r\n X[i] = (A[NROW[i],N+1] - SUMM)/A[NROW[i],i]\r\n i = i-1\r\n return X", "def FigA7(case):\n \n #set the parameter, arrays\n \n n_array=np.array([1,2,3])\n\n #set the result arrays\n if case==0:\n class_number=5\n elif case==1:\n class_number=6\n fate=np.zeros([class_number])#number of evolutionary fate\n fate_matrix=np.zeros([np.size(n_array),np.size(fate)])\n \n time=np.linspace(0,100000, 1000000)\n loop=10**6\n \"\"\"\n 0 Co and/or Ch cannot survive in mono-culture\n 1 Co cannot invade\n 2 Only equilibrium of exclusion is stable\n 3 Only equilibrium of coexistence is stable\n 4 Two equilibria are UNstable\n 5 two Equilibrium are stable (which may occur only when sCO vs rCh)\n \"\"\"\n for tri in range(np.size(n_array)):\n counter=0\n n=n_array[tri]\n print(str(\"Hill coefficient is %d\" %(n)))\n fate=np.zeros([class_number])#number of evolutionary fate should be reset\n if case==0 or case==1:\n fname=str('parameter-sweep-MC-n%d-case%d' %(n, case))\n else:\n print(\"Error in case\")\n return 1\n \n for i in range(loop):\n if(i+1)%10000==0:\n print(i+1)\n Ks,cd,T0, alpha,=np.random.uniform(0,1,4)\n Kr,cr=np.random.uniform([Ks,0],[1,1],2)#Kr>Ks and cr.cd\n #check whether r is positive or not\n if case==0:\n r1=rmax*(1-cr-cd)#rCO\n r2=rmax#sCH\n W0Co=r1-dmax*T0**n/(T0**n+Kr**n)-alpha#initial growth of Cooperator\n W0Ch=r2-dmax*T0**n/(T0**n+Ks**n)-alpha#initial growth of Cheater\n elif case==1:\n r1=rmax*(1-cd)#sCo\n r2=rmax*(1-cr)#rCh\n W0Co=r1-dmax*T0**n/(T0**n+Ks**n)-alpha\n W0Ch=r2-dmax*T0**n/(T0**n+Kr**n)-alpha\n stab_e=0#initialize the falgs of stability\n stab_c=0\n if W0Co<0 or W0Ch<0:\n fate[0]+=1\n res=0\n else:\n #succeed in mono-culture \n init=np.array([T0,10**(-6)])\n if case==0: \n solCo=odeint(DyCoop, init, time, args=(T0, r1, Kr, alpha, n))\n Ts=solCo[-1,0]\n #x1s=solCo[-1,1]\n solCh=odeint(DyCheat, init, time, args=(T0, r2, Ks, alpha, n))\n x2s=solCh[-1,1]\n else:\n solCo=odeint(DyCoop, init, time, args=(T0, r1, Ks, alpha, n))\n Ts=solCo[-1,0]\n #x1s=solCo[-1,1]\n solCh=odeint(DyCheat, init, time, args=(T0, r2, Kr, alpha, n))\n x2s=solCh[-1,1]\n \n #Evolutionary dynamics \n if case==0:\n K=Kr\n else:\n K=Ks\n if r1*(1-x2s)-dmax*T0**n/(T0**n+K**n)<alpha:\n #Co cannot invade\n fate[1]+=1\n res=1\n else:\n #Co can invade\n #calculate Tdagger Td and check whether coexist or exclude\n if case==0:\n #rCo vs sCh\n #in this case, at most one equilbrium is stable\n tau=Quad(case,alpha,cr+cd,0,Kr, Ks, n)\n Td=tau**(1/n)\n if Td<Ts:\n #Co exclude Ch\n fate[2]+=1\n res=2\n else:\n x1d=alpha*Kd*(T0-Td)/(fmax*Td-alpha*(T0-Td))\n x2d=1-x1d-(dmax*Td**n/(Td**n+K**n)+alpha)/r1\n #check the stability condition\n stab=Stab_cond(alpha, T0, Td,x1d,x2d, r1,r2,n, K)\n if stab==0:\n #stable coexistence\n fate[3]+=1\n res=3\n else:\n #unstable coexistence nor exclusion\n fate[4]+=1\n res=4\n print(Td, x1d, x2d)\n else:\n #sCo vs rCh\n # in this case two equilibria can be stable at the same time\n [tau_p,tau_m]=Quad(case,alpha,cd,cr,Ks, Kr, n)\n if tau_m>Ts**n or tau_p<Ts**n:\n # cexclusion is stable\n stab_e=1\n # stability in coexistence \n if tau_p<0:\n stab_c=0\n else:\n Td=tau_p**(1/n)\n x1d=alpha*Kd*(T0-Td)/(fmax*Td-alpha*(T0-Td))\n x2d=1-x1d-(dmax*Td**n/(Td**n+K**n)+alpha)/r1\n #check the stability condition\n stab=Stab_cond(alpha, T0, Td,x1d,x2d, r1,r2,n, K)\n if stab==0:\n #stable coexistence\n stab_c=1\n #classify\n if stab_e==1 and stab_c==1:\n # two stable equilbria\n fate[5]+=1\n res=5\n elif stab_e==1 and stab_c==0:\n #only stable cexclusion\n fate[2]+=1\n res=2\n elif stab_e==0 and stab_c==1:\n #stable coexistence\n fate[3]+=1\n res=3\n else:\n #both unstable\n fate[4]+=1\n res=4\n \n #save the results\n if counter==0:\n result=np.array([[Ks, Kr, cr, cd, alpha, T0,res]])\n #save the result with parameter values\n \n else:\n #add array of results\n R=np.array([[Ks, Kr, cr, cd, alpha, T0,res]])\n result=np.concatenate((result, R), axis=0)\n counter+=1\n \n #save csv file and graph\n np.savetxt(fname+'.csv',result, delimiter=',', header='Ks, Kr, cr, cd, alpha, T0, class', fmt='%.6f') \n print(fate)\n fate_matrix[tri,:]=fate \n if case==0: \n np.savetxt('parameter_sweep_MC_total_case0.csv',fate_matrix, delimiter=',', header='cl0,l1,cl2,cl3,cl4', fmt='%d')\n else:\n np.savetxt('parameter_sweep_MC_total_case1.csv',fate_matrix, delimiter=',', header='cl0,l1,cl2,cl3,cl4,cl5', fmt='%d')\n Plot(case)", "def fit_plane_ransac(pts, neighbors=None,z_pos=None, dist_inlier=0.05, \n min_inlier_frac=0.60, nsample=3, max_iter=100):\n n,_ = pts.shape\n ninlier,models = [],[]\n for i in range(max_iter):\n if neighbors is None:\n p = pts[np.random.choice(pts.shape[0],nsample,replace=False),:]\n else:\n p = pts[neighbors[:,i],:]\n m = fit_plane(p,z_pos)\n ds = np.abs(pts.dot(m[:3])+m[3])\n nin = np.sum(ds < dist_inlier)\n if nin/pts.shape[0] >= min_inlier_frac:\n ninlier.append(nin)\n models.append(m)\n\n if models == []:\n print (\"RANSAC plane fitting failed!\")\n return #None\n else: #refit the model to inliers:\n ninlier = np.array(ninlier)\n best_model_idx = np.argsort(-ninlier)\n n_refit, m_refit, inliers = [],[],[]\n for idx in best_model_idx[:min(10,len(best_model_idx))]:\n # re-estimate the model based on inliers:\n dists = np.abs(pts.dot(models[idx][:3])+models[idx][3])\n inlier = dists < dist_inlier\n m = fit_plane(pts[inlier,:],z_pos)\n # compute new inliers:\n d = np.abs(pts.dot(m[:3])+m[3])\n inlier = d < dist_inlier/2 # heuristic\n n_refit.append(np.sum(inlier))\n m_refit.append(m)\n inliers.append(inlier)\n best_plane = np.argmax(n_refit)\n return m_refit[best_plane],inliers[best_plane]", "def als(matrix, n_factors=8,n_iterations=15, lambda_=10):\r\n\tm, n = matrix.shape\r\n\tQ = matrix\r\n\tW = Q > 0.5\r\n\tW = W.astype(int)\r\n\tprint('X and Y randomly initialzied.')\r\n\tX = 5 * np.random.rand(m, n_factors) \r\n\tY = 5 * np.random.rand(n_factors, n)\r\n\tfor ii in range(n_iterations):\r\n\t\tfor u, Wu in enumerate(W):\r\n\t\t\tX[u] = np.linalg.solve(np.dot(Y, np.dot(np.diag(Wu), Y.T)) + lambda_ * np.eye(n_factors),\r\n\t np.dot(Y, np.dot(np.diag(Wu), Q[u].T))).T\r\n\t\tfor i, Wi in enumerate(W.T):\r\n\t\t\tY[:,i] = np.linalg.solve(np.dot(X.T, np.dot(np.diag(Wi), X)) + lambda_ * np.eye(n_factors),\r\n\t np.dot(X.T, np.dot(np.diag(Wi), Q[:, i])))\r\n\t\tprint('{}th iteration is completed of {}'.format(ii + 1,n_iterations))\r\n\tprediction = np.dot(X,Y)\r\n\tprint('Done.')\r\n\treturn prediction, X, Y", "def question27():\n global conv_residuals\n def catch(r):\n \"\"\"Helper function to retrieve residual + steps to convergence for\n GMRES operation in Scipy. Used as a callback function for\n scipy.sparse.linalg.gmres\n \"\"\"\n global conv_residuals\n conv_residuals.append(r)\n return\n\n def iterate(rk):\n \"\"\" Preconditioner Function for GMRES.\"\"\"\n y = scipy.sparse.linalg.spsolve(P1, rk)\n RHS = scipy.sparse.csr_matrix.dot(P4, y) + rk\n zk = scipy.sparse.linalg.spsolve(P3, RHS)\n return zk\n\n\n N_search = np.array([20, 40, 60, 80, 100, 120, 140, 160, 180])\n steps_till_conv_N = np.zeros(N_search.size)\n\n fig271 = plt.figure(figsize=(13, 8))\n\n for i, n in enumerate(N_search):\n n2 = n**2\n A = construct_matrix_A(n)\n b = np.random.randn(n2)\n M, N = construct_M_N(n)\n mu_max = scipy.sparse.linalg.eigs(M, k=1, which='LM', return_eigenvectors=False)[0].real\n mu_min = scipy.sparse.linalg.eigs(M, k=1, which='SM', return_eigenvectors=False)[0].real\n gamma = np.sqrt(mu_max*mu_min)\n gammaI = scipy.sparse.diags((gamma,), (0,), shape=(n2, n2), format=\"csr\")\n P1 = gammaI + M\n P2 = gammaI - N\n P3 = gammaI + N\n P4 = gammaI - M\n M = scipy.sparse.linalg.LinearOperator((n2, n2), matvec=iterate)\n conv_residuals = []\n x = scipy.sparse.linalg.gmres(A, b, M=M, callback=catch)\n steps_till_conv_N[i] += len(conv_residuals)\n n_steps = len(conv_residuals)\n plt.semilogy(range(n_steps), conv_residuals, label=f\"N = {n}\")\n\n plt.xlabel(\"Steps Required for Convergence\")\n plt.ylabel(\"Residuals\")\n plt.title(\"Figure 271 - GMRES + Preconditioner Residuals for Varying N\", fontsize=13)\n plt.legend()\n plt.grid()\n plt.savefig(f\"figures/figure271.png\")\n plt.show()\n\n\n fig270 = plt.figure(figsize=(13, 8))\n plt.plot(N_search, steps_till_conv_N)\n plt.xlabel(\"N\")\n plt.ylabel(\"Steps until convergence\")\n plt.title(\"Figure 270 - GMRES + Preconditioner Convergence Required for Varying N\", fontsize=13)\n plt.grid()\n plt.savefig(f\"figures/figure270.png\")\n plt.show()\n return", "def scoreCirc_PassiveFilter(circuit, gen, indi, makeRedundancyInMatrix):#TODO\n #Calculate density and uniquiness (as in makeNetlist)\n if makeRedundancyInMatrix == True:\n FullBigCircuitMatrix = deepcopy(fullRedundancyBigCircuitMatrix(circuit.BigCircuitMatrix))\n else:\n FullBigCircuitMatrix = deepcopy(circuit.BigCircuitMatrix)\n\n rowsR,columnsR,columnsC,rowsC = sortedNonZeroIndices(FullBigCircuitMatrix)\n\n matrixDensity = float(len(rowsR))/float((BigMatrixSize*BigMatrixSize/2))\t#(ones/(all/2))\n matrixQuaziID = sum(rowsR)+sum(columnsR)-BigMatrixSize*(BigMatrixSize-1)\n OcSc, IcNc, SelfConnElm = checkConnsConnected(FullBigCircuitMatrix) #Outer connections Short cut, Inner connections Not connected\n #print \"Kratkih stikov zunanjih povezav:\", OcSc\n \n results = None\n if OcSc > 1:\n score = 1e4*np.exp(OcSc)\n else:\n makeNetlist(circuit, gen, indi, FullBigCircuitMatrix)\n results = runme2.evaluatePassiveFilter_SUHAD(gen, indi)#TODO\n \n disfCount = 0\n\n gain = np.array(results['gain']['nominal'], dtype=float)\n if np.isnan(gain):\n disfCount = disfCount + 1\n g = 0\n else:\n g = abs(gain - 0) if gain < 0 else 0\n\n ripple = np.array(results['ripple']['nominal'], dtype=float)\n if np.isnan(ripple):\n disfCount = disfCount + 1\n r = 0 \n else:\n r = abs(ripple - 0.5)# if ripple > 0.5 else 0\n \n damping = np.array(results['damping']['nominal'], dtype=float)\n if np.isnan(damping):\n disfCount = disfCount + 1\n d = 0\n else:\n d = abs(40 - damping)# if damping < 60 else 0\n \n #THD = np.array(results['THD']['nominal'], dtype=float)\n #if np.isnan(THD):\n # disfCount = disfCount + 1\n # thd = 0\n #else:\n # thd = THD-1 if THD > 1 else 0\n \n score = 10*r + g + 10*d\n\n if disfCount > 0:\n score = np.exp(disfCount) * 1e3\n\n score = score + (IcNc*IcNc+1)# + abs(BW-bw)*1e2 + abs(CUTOFF-cutoff)*1e2 #add small punishment if not all nodes connected and bw and cutoff are off\n\n #print \"\\t\\t\\t\\t\\tG_\" + str(gen) + \"_I_\" + str(indi) + \" SCORE:\", score\n #cleanup current subcircuit\n filename = \"g_\" + str(gen) + \"_i_\" + str(indi) + \"_subckt.cir\"\n os.remove(filename)\n return score, matrixDensity, matrixQuaziID, results", "def ws06(adp1, adp2):\n # print sum(adp1[:3])/3. - sum(adp2[:3])/3.\n adp1 = get_matrix(adp1)\n adp2 = get_matrix(adp2)\n adp1i = np.linalg.inv(adp1)\n adp2i = np.linalg.inv(adp2)\n a = 2 ** 1.5\n b = np.dot(adp1i, adp2i)\n c = np.linalg.det(b)\n\n # if c <= 0:\n # c *= -1\n d = c ** 0.25\n up = a * d\n\n x = adp1i + adp2i\n y = np.linalg.det(x)\n # if y <= 0:\n # y *= -1\n z = y ** 0.5\n R = up / z\n return 100 * (1 - R)", "def _inexact_alm_l1(imgflt_stack,options):\n # Get basic image information and reshape input\n img_width = imgflt_stack.shape[0]\n img_height = imgflt_stack.shape[1]\n img_size = img_width* img_height\n img_3d = imgflt_stack.shape[2]\n imgflt_stack = np.reshape(imgflt_stack,(img_size, img_3d))\n options['weight'] = np.reshape(options['weight'],imgflt_stack.shape)\n\n # Matrix normalization factor\n temp = np.linalg.svd(imgflt_stack,full_matrices=False,compute_uv=False)\n norm_two = np.float64(temp[0])\n del temp\n\n # A is a low rank matrix that is being solved for\n A = np.zeros(imgflt_stack.shape,dtype=np.float64)\n A_coeff = np.ones((1, img_3d),dtype=np.float64) # per image scaling coefficient, accounts for things like photobleaching\n A_offset = np.zeros((img_size,1),dtype=np.float64) # offset per pixel across all images\n\n # E1 is the additive error. Since the goal is determining the background signal, this is the real signal at each pixel\n E1 = np.zeros(imgflt_stack.shape,dtype=np.float64)\n\n # Normalization factors\n ent1 = np.float64(1) # flatfield normalization\n ent2 = np.float64(10) # darkfield normalization\n\n # Weights\n weight_upd = _dct2(np.mean(np.reshape(A,(img_width, img_height, img_3d)),2))\n\n # Initialize gradient and weight normalization factors\n Y1 = np.float64(0)\n mu = np.float64(12.5)/norm_two\n mu_bar = mu * 10**7\n rho = np.float64(1.5)\n\n # Frobenius norm\n d_norm = np.linalg.norm(imgflt_stack,'fro')\n\n # Darkfield upper limit and offset\n B1_uplimit = np.min(imgflt_stack)\n B1_offset = np.float64(0)\n\n # Perform optimization\n iternum = 0\n converged = False\n while not converged:\n iternum += 1\n\n # Calculate the flatfield using existing weights, coefficients, and offsets\n W_idct_hat = _idct2(weight_upd)\n A = np.matmul(np.reshape(W_idct_hat,(img_size,1)),A_coeff) + A_offset\n temp_W = np.divide(imgflt_stack - A - E1 + np.multiply(1/mu,Y1),ent1)\n\n # Update the weights\n temp_W = np.reshape(temp_W,(img_width, img_height, img_3d))\n temp_W = np.mean(temp_W,2)\n weight_upd = weight_upd + _dct2(temp_W)\n weight_upd = np.max(np.reshape(weight_upd - options['lambda']/(ent1*mu),(img_width, img_height,1)),-1,initial=0) + np.min(np.reshape(weight_upd + options['lambda']/(ent1*mu),(img_width, img_height,1)),-1,initial=0)\n W_idct_hat = _idct2(weight_upd)\n\n # Calculate the flatfield using updated weights\n A = np.matmul(np.reshape(W_idct_hat,(img_size,1)),A_coeff) + A_offset\n\n # Determine the error\n E1 = E1 + np.divide(imgflt_stack - A - E1 + np.multiply(1/mu,Y1),ent1)\n E1 = np.max(np.reshape(E1 - options['weight']/(ent1*mu),(img_size, img_3d,1)),-1,initial=0) + np.min(np.reshape(E1 + options['weight']/(ent1*mu),(img_size, img_3d,1)),-1,initial=0)\n\n # Calculate the flatfield coefficients by subtracting the errors from the original data\n R1 = imgflt_stack-E1\n A_coeff = np.reshape(np.mean(R1,0)/np.mean(R1),(1, img_3d))\n A_coeff[A_coeff<0] = 0 # pixel values should never be negative\n\n # Calculate the darkfield component if specified by the user\n if options['darkfield']:\n # Get images with predominantly background pixels\n validA1coeff_idx = np.argwhere(A_coeff<1)[:,1]\n R1_upper = R1[np.argwhere(np.reshape(W_idct_hat,(-1,1)).astype(np.float64)>(np.float64(np.mean(W_idct_hat))-np.float64(10**-5)))[:,0],:]\n R1_upper = np.mean(R1_upper[:,validA1coeff_idx],0)\n R1_lower = R1[np.argwhere(np.reshape(W_idct_hat,(-1,1))<np.mean(W_idct_hat)+np.float64(10**-5))[:,0],:]\n R1_lower = np.mean(R1_lower[:,validA1coeff_idx],0)\n B1_coeff = (R1_upper-R1_lower)/np.mean(R1)\n k = validA1coeff_idx.size\n\n # Calculate the darkfield offset\n temp1 = np.sum(np.square(A_coeff[0,validA1coeff_idx]))\n temp2 = np.sum(A_coeff[0,validA1coeff_idx])\n temp3 = np.sum(B1_coeff)\n temp4 = np.sum(A_coeff[0,validA1coeff_idx]*B1_coeff)\n temp5 = temp2 * temp3 - k*temp4\n if temp5 == 0:\n B1_offset = np.float64(0)\n else:\n B1_offset = (temp1*temp3-temp2*temp4)/temp5\n B1_offset = np.max(B1_offset,initial=0)\n B1_offset = np.min(B1_offset,initial=B1_uplimit/(np.mean(W_idct_hat)+10**-7))\n B_offset = B1_offset * np.mean(W_idct_hat) - B1_offset*np.reshape(W_idct_hat,(-1,1))\n\n # Calculate darkfield\n A1_offset = np.reshape(np.mean(R1[:,validA1coeff_idx],1),(-1,1)) - np.mean(A_coeff[0,validA1coeff_idx]) * np.reshape(W_idct_hat,(-1,1))\n A1_offset = A1_offset - np.mean(A1_offset)\n A_offset = A1_offset - np.mean(A1_offset) - B_offset\n\n # Update darkfield weights\n W_offset = _dct2(np.reshape(A_offset,(img_width, img_height)))\n W_offset = np.max(np.reshape(W_offset - options['lambda_darkfield']/(ent2*mu),(img_width, img_height,1)),-1,initial=0) \\\n + np.min(np.reshape(W_offset + options['lambda_darkfield']/(ent2*mu),(img_width, img_height,1)),-1,initial=0)\n\n # Calculate darkfield based on updated weights\n A_offset = _idct2(W_offset)\n A_offset = np.reshape(A_offset,(-1,1))\n A_offset = np.max(np.reshape(A_offset - options['lambda_darkfield']/(ent2*mu),(A_offset.shape[0],A_offset.shape[1],1)),-1,initial=0) \\\n + np.min(np.reshape(A_offset + options['lambda_darkfield']/(ent2*mu),(A_offset.shape[0],A_offset.shape[1],1)),-1,initial=0)\n A_offset = A_offset + B_offset\n\n # Loss\n Z1 = imgflt_stack - A - E1\n\n # Update weight regularization term\n Y1 = Y1 + mu*Z1\n\n # Update learning rate\n mu = np.min(mu*rho,initial=mu_bar)\n\n # Stop if loss is below threshold\n stopCriterion = np.linalg.norm(Z1,ord='fro')/d_norm\n if stopCriterion < options['optimization_tol'] or iternum > options['max_iterations']:\n converged = True\n\n # Calculate final darkfield image\n A_offset = A_offset + B1_offset * np.reshape(W_idct_hat,(-1,1))\n\n return A,E1,A_offset", "def computeFundamentalMatrix(pts1, pts2):\n A = np.empty((8, 9))\n for i in range(len(pts1)-1):\n x1 = pts1[i][0]\n x2 = pts2[i][0]\n y1 = pts1[i][1]\n y2 = pts2[i][1]\n A[i] = np.array([x1 * x2, x2 * y1, x2,\n y2 * x1, y2 * y1, y2,\n x1, y1, 1])\n # Compute F matrix by evaluating SVD\n U, S, V = np.linalg.svd(A)\n F = V[-1].reshape(3, 3)\n\n # Constrain the F matrix to rank 2\n U1, S1, V1 = np.linalg.svd(F)\n # print('Old S', S)\n # S[2] = 0\n S2 = np.array([[S1[0], 0, 0], [0, S1[1], 0], [0, 0, 0]])\n # print('New S', S)\n F = np.dot(np.dot(U1, S2), V1)\n\n return F", "def ransac(matches, kp1, kp2, sample_points=4, iterations=5, inlier_tolerance=3, inlier_ratio=0.45, check=True, return_max_x=False):\n\n best_inlier_count = 0\n best_h = None\n best_inlier_indices = None\n\n # Get all the corresponing matching pairs for both the images\n pts1 = np.array([kp1[m.queryIdx].pt for m in matches])\n pts2 = np.array([kp2[m.trainIdx].pt for m in matches])\n\n # Re-usable variables for all iterations\n homogeneous_pts1 = np.hstack((pts1, np.ones((pts1.shape[0], 1)))).T\n indices = np.arange(len(pts1))\n num_pts = pts1.shape[0]\n required_inliers = inlier_ratio * num_pts\n\n # For number of iterations\n for _ in range(iterations):\n\n # Sample a small set of points from the point match pairs\n indices_to_sample = np.random.choice(indices, sample_points)\n pts1_sample = pts1[indices_to_sample]\n pts2_sample = pts2[indices_to_sample]\n\n # Get the homography matrix\n h = get_homography_matrix(pts1_sample, pts2_sample)\n\n # Find the new points using the homography matrix\n transformed_points = np.dot(h, homogeneous_pts1).T\n\n # Convert it to world coordinates\n last_col = np.copy(transformed_points[:, -1])\n last_col = last_col[:, np.newaxis]\n transformed_points /= last_col\n transformed_points = transformed_points[:, :-1]\n\n # Find the distance between the actual and the mapped points\n distance = np.linalg.norm(pts2 - transformed_points, axis=1)\n inlier_indices = distance < inlier_tolerance\n inlier_count = inlier_indices.sum()\n\n # Update the best_h if the current h has more inliers\n if inlier_count > best_inlier_count:\n best_h = h\n best_inlier_indices = inlier_indices\n best_inlier_count = inlier_count\n\n # If required inliers is reached break\n if inlier_count > required_inliers:\n break\n\n # Verbose mode - Print the number of inliers\n if check:\n transformed_points = np.dot(best_h, homogeneous_pts1).T\n # Convert it to world coordinates\n last_col = np.copy(transformed_points[:, -1])\n last_col = last_col[:, np.newaxis]\n transformed_points /= last_col\n transformed_points = transformed_points[:, :-1]\n distance = np.linalg.norm(pts2 - transformed_points, axis=1)\n inlier_count = len(distance[distance < inlier_tolerance])\n print('%2.2f of the points are inliers' %\n (inlier_count / num_pts * 100))\n\n # If x coordinates are needed\n if return_max_x:\n max_x_inlier_1 = ceil(pts1[best_inlier_indices].max(axis=0)[0])\n max_x_inlier_2 = ceil(pts2[best_inlier_indices].max(axis=0)[0])\n return best_h, max_x_inlier_1, max_x_inlier_2\n return best_h", "def ransac(data, model, n, k, t, d, debug=False, return_all=False):\n iterations = 0\n bestfit = None\n # besterr = np.inf\n best_inlier_idxs = None\n while iterations < k:\n maybe_idxs, test_idxs = random_partition(n, data.shape[0])\n maybeinliers = data[maybe_idxs, :]\n test_points = data[test_idxs, :]\n maybemodel = model.fit(maybeinliers)\n test_err = model.get_error(test_points, maybemodel)\n # select indices of rows with accepted points\n also_idxs = test_idxs[test_err < t]\n alsoinliers = data[also_idxs, :]\n if len(alsoinliers) > d:\n betterdata = np.concatenate((maybeinliers, alsoinliers))\n bestfit = model.fit(betterdata)\n # better_errs = model.get_error(betterdata, bettermodel)\n # thiserr = np.mean(better_errs)\n # if thiserr < besterr:\n # bestfit = bettermodel\n # besterr = thiserr\n best_inlier_idxs = np.concatenate((maybe_idxs, also_idxs))\n break\n iterations += 1\n if bestfit is None:\n raise ValueError(\"did not meet fit acceptance criteria\")\n if return_all:\n return bestfit, {'inliers': best_inlier_idxs}\n else:\n return bestfit", "def method1(self):\n cres=0. # Variable for storing Chern number.\n # The U matrices from Fukui's method; storage...\n Ux=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n \n # ... and calculation of U matrices\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.alleigvecs[:,:,ix ,iy ]\n if ix<self.kS.Nx:\n mat2=self.alleigvecs[:,:,ix+1,iy ]\n else:\n mat2=self.alleigvecs[:,:,1 ,iy ]\n if iy<self.kS.Ny:\n mat3=self.alleigvecs[:,:,ix ,iy+1]\n else:\n mat3=self.alleigvecs[:,:,ix ,1 ]\n Ux[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat2)[:self.NL,:self.NL])\n Uy[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat3)[:self.NL,:self.NL])\n \n # Local estimates of Berry curvature; storage ...\n ftempall=np.zeros((self.kS.Nx,self.kS.Ny),complex)\n # ... and calculation\n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux[ix,iy]*Uy[ix+1,iy]/Ux[ix,iy+1]/Uy[ix,iy])\n ftempall[ix,iy]=ftemp # ... of local Berry curvature ...\n cres+=ftemp/2./pi/1j # ... and of Berry phase (Chern number).\n\n return cres.real, ftempall", "def solveForModeB1(X, M, n, maxInner, epsilon, tol,sita,Y1, lambta2):\n # Pi(n) = [A(N) kr A(N-1) kr ... A(n+1) kr A(n-1) kr .. A(1)]^T\n Pi = tensorTools.calculatePi(X, M, n)\n #print 'Pi size', Pi.shape\n #print 'pi='+str(Pi)\n #print(M.U[n])\n for iter in range(maxInner):\n # Phi = (X(n) elem-div (B Pi)) Pi^T\n #print X.vals.shape,X.shape\n #print X.vals.flatten().shape\n Phi = tensorTools.calculatePhi(X, M.U[n], Pi, n, epsilon=epsilon)\n #print('phi'+str(Phi))\n #print(Phi)\n # check for convergence that min(B(n), E - Phi(n)) = 0 [or close]\n kktModeViolation = np.max(np.abs(np.minimum(M.U[n], 1-Phi).flatten()))\n if (kktModeViolation < tol):\n break\n\n B=M.U[n]\n #print B.shape\n colNorm = np.apply_along_axis(np.linalg.norm, 0, B, 1)\n zeroNorm = np.where(colNorm == 0)[0]\n colNorm[zeroNorm] = 1\n B = B / colNorm[np.newaxis, :]\n tm=np.hstack((np.ones((B.shape[0],1)),B))\n Y1=Y1.reshape((Y1.shape[0],1))\n\n derive=-1.0*lambta2/B.shape[0]*np.dot((Y1-np.dot(tm,sita)),sita.T)\n #print derive.shape\n #print np.multiply(M.U[n],derive[:,1:]).shape\n #print np.multiply(M.U[n],Phi).shape\n M.U[n] = np.array(np.multiply(M.U[n],Phi))-np.array((np.multiply(M.U[n],derive[:,1:])))\n\n #print 'after'\n #print M.U[n][0]\n #print(\" Mode={0}, Inner Iter={1}, KKT violation={2}\".format(n, iter, kktModeViolation))\n return M, Phi, iter, kktModeViolation", "def _cce(func, s, sf, bl, bu, mask, icall, maxn, alpha, beta, maxit, printit):\n\n \"\"\"\n List of local variables\n sb(.) = the best point of the simplex\n sw(.) = the worst point of the simplex\n w2(.) = the second worst point of the simplex\n fw = function value of the worst point\n ce(.) = the centroid of the simplex excluding wo\n snew(.) = new point generated from the simplex\n iviol = flag indicating if constraints are violated\n = 1 , yes\n = 0 , no\n \"\"\"\n # Assign the best and worst points:\n sb = s[0,:]\n fb = sf[0]\n sw = s[-1,:]\n fw = sf[-1]\n\n # Compute the centroid of the simplex excluding the worst point:\n ce = np.mean(s[:-1,:],axis=0)\n\n # Attempt a reflection point\n snew = ce + alpha*(ce-sw)\n snew = np.where(mask, snew, sb) # sb should have initial params at mask==False\n\n # Check if is outside the bounds:\n ibound = 0\n # s1 = snew-bl\n # idx = (s1<0).nonzero()\n # if idx[0].size != 0: ibound = 1\n if np.ma.any(np.ma.array(snew-bl, mask=~mask) < 0.): ibound = 1\n\n # s1 = bu-snew\n # idx = (s1<0).nonzero()\n # if idx[0].size != 0: ibound = 2\n if np.ma.any(np.ma.array(bu-snew, mask=~mask) < 0.): ibound = 2\n\n if ibound >= 1:\n snew = _SampleInputMatrix(1,bl,bu,distname='randomUniform')[0]\n snew = np.where(mask, snew, sb)\n\n fuc = func(snew)\n fnew = -fuc if maxit else fuc\n icall += 1\n if printit==1: print(' i, f, X: ', icall, fnew, snew)\n\n # Reflection failed; now attempt a contraction point:\n if fnew > fw:\n snew = sw + beta*(ce-sw)\n snew = np.where(mask, snew, sb)\n fuc = func(snew)\n fnew = -fuc if maxit else fuc\n icall += 1\n if printit==1: print(' i, f, X: ', icall, fnew, snew)\n\n # Both reflection and contraction have failed, attempt a random point;\n if fnew > fw:\n snew = _SampleInputMatrix(1,bl,bu,distname='randomUniform')[0]\n snew = np.where(mask, snew, sb)\n fuc = func(snew)\n fnew = -fuc if maxit else fuc\n icall += 1\n if printit==1: print(' i, f, X: ', icall, fnew, snew)\n\n # end of _cce\n return snew, fnew, icall", "def scoreCirc_ActiveFilter_3(circuit, gen, indi, makeRedundancyInMatrix):\n\n FullBigCircuitMatrix = deepcopy(circuit.fullRedundancyMatrix)\n\n rowsR,columnsR,columnsC,rowsC = sortedNonZeroIndices(FullBigCircuitMatrix)\n\n matrixDensity = float(len(rowsR))/float((BigMatrixSize*BigMatrixSize/2))\t#(ones/(all/2))\n matrixQuaziID = sum(rowsR)+sum(columnsR)-BigMatrixSize*(BigMatrixSize-1)\n OcSc, IcNc, SelfConnElm = checkConnsConnected(FullBigCircuitMatrix) #Outer connections Short cut, Inner connections Not connected\n #print \"Kratkih stikov zunanjih povezav:\", OcSc\n \n score = 0\n results = None\n if OcSc > 1:\n score = 1e4*np.exp(OcSc)\n else:\n makeNetlist(circuit, gen, indi, FullBigCircuitMatrix)\n results = runme2.evaluateActiveFilter_2(gen, indi)\n\n disfCount = 0\n \n ripple = np.array(results['ripple']['nominal'], dtype=float)\n if np.isnan(ripple):\n disfCount = disfCount + 1\n r = 0 \n else:\n r = abs(ripple - 0.5) if ripple > 0.5 else 0\n \n damping = np.array(results['damping']['nominal'], dtype=float)\n if np.isnan(damping):\n disfCount = disfCount + 1\n d = 0\n else:\n d = abs(20 - damping) if damping < 20 else 0\n \n gain = np.array(results['gain']['nominal'], dtype=float)\n if np.isnan(gain):\n disfCount = disfCount + 1\n g = 0\n else:\n g = abs(gain - 0)# if gain < 10 else 0.01\n \n THD_Lf = np.array(results['THD_Lf']['nominal'], dtype=float)\n if np.isnan(THD_Lf):\n disfCount = disfCount + 1\n thd_lf = 0\n else:\n thd_lf = THD_Lf-1 if THD_Lf > 1 else 0\n \n THD_Hf = np.array(results['THD_Hf']['nominal'], dtype=float)\n if np.isnan(THD_Hf):\n disfCount = disfCount + 1\n thd_hf = 0\n else:\n thd_hf = THD_Hf-1 if THD_Hf > 1 else 0\n \n #RIN = np.array(results['rin_meas']['nominal'], dtype=float) #--------not in use\n #if np.isnan(RIN):\n # disfCount = disfCount + 1\n # rin = 0\n #else:\n # rin = 1/RIN*1e6 if RIN < 1e7 else 0\n\n isLP = np.array(results['is_LP']['nominal'], dtype=float)\n if np.isnan(isLP):\n disfCount = disfCount + 1\n islp = 0\n else:\n islp = 0 if isLP>0 else 100# np.abs(isLP)\n \n #slope = np.array(results['maxDampingSlope']['nominal'], dtype=float)\n #print slope\n #if np.isnan(slope):\n # disfCount = disfCount + 1\n # slo = 0\n #else:\n # slo = 0 if slope>60 else 60-slope\n \n maxSlope = results['maxDampingSlope']['nominal']\n if type(np.nan) == type(maxSlope) or type(None) == type(maxSlope):\n disfCount = disfCount + 2\n slo = 0\n slof = 0 \n else:\n if len(maxSlope)==2:\n\tslo = 0 if maxSlope[0]>60 else 60-maxSlope[0]\n\tslof = np.log10(abs(maxSlope[1]-1000))\n else:\n\tslo = 0\n\tslof = 0\n\tdisfCount = disfCount + 1 \n \n \n bandwidth = np.array(results['bw']['nominal'], dtype=float)\n if np.isnan(bandwidth):\n #disfCount = disfCount + 1\n bandwidth = 0\n bw = abs(bandwidth-1000)\n \n StaticOut = not results['isOutVNonStationary']['nominal']\n score = 10*slo + 10*r + (100*StaticOut + 10*(thd_lf + thd_hf) + 1*islp + g)#rin!\n\n #print disfCount\n if disfCount > 0:\n score = 0 + np.exp(disfCount) * 1e3\n #print \"disfCount was there\"\n\n #score = score + (IcNc+1)# + abs(BW-bw)*1e2 + abs(CUTOFF-cutoff)*1e2 #add small punishment if not all nodes connected and bw and cutoff are off\n\n \n print \"\\t\\t\\t\\tG_\" + str(gen) + \"_I_\" + str(indi) + \" SCORE:\", score\n #cleanup current subcircuit\n filename = \"g_\" + str(gen) + \"_i_\" + str(indi) + \"_subckt.cir\"\n os.remove(filename)\n #print \".\",\n #circuit.objectivesScore = copy(score)\t#id does not work with mpirun since mpirun works with copies\n #circuit.matrixDensity = matrixDensity\n return score, matrixDensity, matrixQuaziID, results", "def question26():\n n = 10\n n2 = n**2\n A = construct_matrix_A(n)\n x0 = np.random.randn(n2)\n b = np.random.randn(n2)\n\n # Compute optimal gamma:\n M, N = construct_M_N(n)\n\n # Eigenvalues of M and N are the same, so just use M for this now\n mu_max = scipy.sparse.linalg.eigsh(M, k=1, which='LM', return_eigenvectors=False)[0]\n mu_min = scipy.sparse.linalg.eigsh(M, k=1, which='SM', return_eigenvectors=False)[0]\n\n optimal_gamma_theoretical = np.sqrt(mu_min * mu_max)\n\n # We now verify this using our code:\n gamma_search = np.linspace(0.1, 4, 500)\n iters_array = np.zeros(500, dtype=int)\n\n for i, g in enumerate(gamma_search):\n iters_array[i] = alternative_iterative_method(x0, n, g, b)[1]\n\n min_graph = np.argmin(iters_array)\n min_iter = np.min(iters_array)\n min_gamma = gamma_search[min_graph]\n\n fig260 = plt.figure(figsize=(13, 8))\n plt.plot(gamma_search, iters_array)\n plt.plot(min_gamma, min_iter, 'ro',\n label=f\"Theoretical Gamma = {optimal_gamma_theoretical:.3f}\\n\" \\\n f\"Min Iterations at (Gamma={min_gamma:.3f}, Iters={min_iter})\")\n plt.axvline(x=optimal_gamma_theoretical)\n plt.legend()\n plt.grid()\n plt.xlabel(\"Gamma\")\n plt.ylabel(\"Iterations til Convergence\")\n plt.title(\"Figure 260 - Convergence Steps for Varying Gamma (N=10)\")\n plt.savefig(\"figures/figure260.png\")\n plt.show()\n return", "def scoreCirc_ActiveFilter(circuit, gen, indi, makeRedundancyInMatrix):#TODO\n #Calculate density and uniquiness (as in makeNetlist)\n if makeRedundancyInMatrix == True:\n #FullBigCircuitMatrix = deepcopy(fullRedundancyBigCircuitMatrix(circuit.BigCircuitMatrix))\n FullBigCircuitMatrix = deepcopy(circuit.fullRedundancyMatrix)\n else:\n FullBigCircuitMatrix = deepcopy(circuit.BigCircuitMatrix)\n\n rowsR,columnsR,columnsC,rowsC = sortedNonZeroIndices(FullBigCircuitMatrix)\n\n matrixDensity = float(len(rowsR))/float((BigMatrixSize*BigMatrixSize/2))\t#(ones/(all/2))\n matrixQuaziID = sum(rowsR)+sum(columnsR)-BigMatrixSize*(BigMatrixSize-1)\n OcSc, IcNc, SelfConnElm = checkConnsConnected(FullBigCircuitMatrix) #Outer connections Short cut, Inner connections Not connected\n #print \"Kratkih stikov zunanjih povezav:\", OcSc\n \n results = None\n if OcSc > 1:\n score = 1e4*np.exp(OcSc)\n else:\n makeNetlist(circuit, gen, indi, FullBigCircuitMatrix)\n results = runme2.evaluateActiveFilter_SUHAD(gen, indi)#TODO\n \n \n disfCount = 0\n \n ripple = np.array(results['ripple']['nominal'], dtype=float)\n if np.isnan(ripple):\n disfCount = disfCount + 1\n r = 0 \n else:\n r = abs(ripple - 0.5) if ripple > 0.5 else 0\n \n damping = np.array(results['damping']['nominal'], dtype=float)\n if np.isnan(damping):\n disfCount = disfCount + 1\n d = 0\n else:\n d = abs(40 - damping) if damping < 40 else 0\n \n gain = np.array(results['gain']['nominal'], dtype=float)\n if np.isnan(gain):\n disfCount = disfCount + 1\n g = 0\n else:\n g = abs(gain - 10) if gain < 10 else 0\n \n THD = np.array(results['THD']['nominal'], dtype=float)\n if np.isnan(THD):\n disfCount = disfCount + 1\n thd = 0\n else:\n thd = THD-1 if THD > 1 else 0\n\t \n StaticOut = not results['isOutVNonStationary']['nominal']\n \n score = 5*r + 4*d + 2*g + (100*StaticOut + 10*thd)\n\n #print disfCount\n if disfCount > 0:\n score = np.exp(disfCount) * 1e3\n \n ##add a little salt!\n #score = score + random.uniform(0.0, 1)\n\n score = score + (IcNc*IcNc+1)# + abs(BW-bw)*1e2 + abs(CUTOFF-cutoff)*1e2 #add small punishment if not all nodes connected and bw and cutoff are off\n\n \n #print \"\\t\\t\\t\\t\\tG_\" + str(gen) + \"_I_\" + str(indi) + \" SCORE:\", score\n #cleanup current subcircuit\n filename = \"g_\" + str(gen) + \"_i_\" + str(indi) + \"_subckt.cir\"\n os.remove(filename)\n #print \".\",\n return score, matrixDensity, matrixQuaziID, results", "def find_inlier(self):\n len_of_matches = len(self.match)\n # The last line of W stores the whole number of consistency of this match\n self.W = np.zeros((len_of_matches+1, len_of_matches))\n for i in np.arange(len_of_matches):\n for j in np.arange(len_of_matches):\n if i >= j:\n continue\n\n # ASSUMPTION : the index of descriptor is the same with the index of image\n wa = self.featureFrameA[self.match[i].queryIdx].pt[0]-self.featureFrameA[self.match[j].queryIdx].pt[0]\n wb = self.featureFrameA[self.match[i].queryIdx].pt[1]-self.featureFrameA[self.match[j].queryIdx].pt[1]\n wa_ = self.featureFrameB[self.match[i].trainIdx].pt[0]-self.featureFrameB[self.match[j].trainIdx].pt[0]\n wb_ = self.featureFrameB[self.match[i].trainIdx].pt[1]-self.featureFrameB[self.match[j].trainIdx].pt[1]\n\n # Compare and complete the matrix W\n if abs(wa-wa_) + abs(wb-wb_) <= INLIER_DIST_THRE:\n self.W[i, j] = 1\n self.W[j, i] = 1\n self.W[len_of_matches, j] += 1\n\n # Choose the best inlier features\n self.best_matches = []\n candidate = np.arange(len_of_matches)\n while True:\n best_matchIdx = self.find_most_compatible_match(candidate)\n if not best_matchIdx or best_matchIdx == -1: # in case no best match is found\n break\n else:\n self.best_matches.append(self.match[best_matchIdx])\n candidate = np.delete(candidate, np.where(candidate == best_matchIdx), axis=0)", "def getRowHeuristics(matrix):\n row, col = matrix.shape\n rHeuristic = np.zeros((row,2)) # Dos columnas. La primera para indicar la columna la segunda para la Heuristica\n for i in range(0,row):\n rHeuristic[i,0] = int(i)\n #print (i,sum(matrix[:,i]), pesos[i], float(pesos[i]/sum(matrix[:,i])))\n rHeuristic[i,1] = 1/sum(matrix[i,:])\n return rHeuristic[rHeuristic[:,1].argsort()]", "def __finalize(self,final_data):\n\t\tcopy_input_data = copy.deepcopy(self.matrix)\n\t\tbest_matrix = self.__set_format_info(copy_input_data,0)\n\t\tbest_matrix = self.__fill_data(best_matrix,final_data,0)\n\t\tmin_penalty = lost_point(best_matrix)\n\t\tbest_mask_pattern = 0\n\t\tfor i in range(1,8):\n\t\t\tcopy_input_data = copy.deepcopy(self.matrix)\n\t\t\ttemp_matrix = self.__set_format_info(copy_input_data,i)\n\t\t\ttemp_matrix = self.__fill_data(temp_matrix,final_data,i)\n\t\t\tpenalty = lost_point(temp_matrix)\n\n\t\t\tif penalty < min_penalty:\n\t\t\t\tbest_matrix = copy.deepcopy(temp_matrix)\n\t\t\t\tbest_mask_pattern = i\n\t\t\t\tmin_penalty = penalty\n\n\t\treturn best_matrix,best_mask_pattern", "def ransac(cloud, sacmodel):\n # Create the segmentation object\n seg = cloud.make_segmenter()\n\n # Set the model you wish to fit \n seg.set_model_type(sacmodel)\n seg.set_method_type(pcl.SAC_RANSAC)\n\n # Max distance for a point to be considered fitting the model\n # Experiment with different values for max_distance \n # for segmenting the table\n max_distance = 0.01\n seg.set_distance_threshold(max_distance)\n\n # Call the segment function to obtain set of inlier indices and model coefficients\n inliers, coefficients = seg.segment()\n return inliers, coefficients", "def scoreCirc_PassiveFilter_2(circuit, gen, indi, makeRedundancyInMatrix):\n #Calculate density and uniquiness (as in makeNetlist)\n if makeRedundancyInMatrix == True:\n FullBigCircuitMatrix = deepcopy(fullRedundancyBigCircuitMatrix(circuit.BigCircuitMatrix))\n else:\n FullBigCircuitMatrix = deepcopy(circuit.BigCircuitMatrix)\n\n rowsR,columnsR,columnsC,rowsC = sortedNonZeroIndices(FullBigCircuitMatrix)\n\n matrixDensity = float(len(rowsR))/float((BigMatrixSize*BigMatrixSize/2))\t#(ones/(all/2))\n matrixQuaziID = sum(rowsR)+sum(columnsR)-BigMatrixSize*(BigMatrixSize-1)\n OcSc, IcNc, SelfConnElm = checkConnsConnected(FullBigCircuitMatrix) #Outer connections Short cut, Inner connections Not connected\n #print \"Kratkih stikov zunanjih povezav:\", OcSc\n \n results = None\n if OcSc > 1:\n score = 1e4*np.exp(OcSc)\n else:\n makeNetlist(circuit, gen, indi, FullBigCircuitMatrix)\n results = runme2.evaluatePassiveFilter_2(gen, indi)\n \n disfCount = 0\n\n gain = np.array(results['gain']['nominal'], dtype=float)\n if np.isnan(gain):\n disfCount = disfCount + 1\n g = 0\n else:\n g = abs(gain - 0) if gain < 0 else 0\n\n ripple = np.array(results['ripple']['nominal'], dtype=float)\n if np.isnan(ripple):\n disfCount = disfCount + 1\n r = 0 \n else:\n r = abs(ripple - 0.5) if ripple > 0.5 else 0\n \n damping = np.array(results['damping']['nominal'], dtype=float)\n if np.isnan(damping):\n disfCount = disfCount + 1\n d = 0\n else:\n d = abs(40 - damping) if damping < 40 else 0\n\n slope = np.array(results['dumpingSlope']['nominal'], dtype=float)\n if np.isnan(slope):\n disfCount = disfCount + 1\n slo = 0\n else:\n slo = 0 if slope>60 else 60-slope\n \n bandwidth = np.array(results['bw']['nominal'], dtype=float)\n if np.isnan(bandwidth):\n disfCount = disfCount + 1\n bw = 0\n else:\n bw = abs(bandwidth-1000)/100\n \n #THD = np.array(results['THD']['nominal'], dtype=float)\n #if np.isnan(THD):\n # disfCount = disfCount + 1\n # thd = 0\n #else:\n # thd = THD-1 if THD > 1 else 0\n #print 10*r, g, d, slo, bw\n score = 10*r + g + d + slo + bw\n\n if disfCount > 0:\n score += np.exp(disfCount) * 1e3\n\n #score = score + (IcNc*IcNc+1)# + abs(BW-bw)*1e2 + abs(CUTOFF-cutoff)*1e2 #add small punishment if not all nodes connected and bw and cutoff are off\n\n #print \"\\t\\t\\t\\t\\tG_\" + str(gen) + \"_I_\" + str(indi) + \" SCORE:\", score\n #cleanup current subcircuit\n filename = \"g_\" + str(gen) + \"_i_\" + str(indi) + \"_subckt.cir\"\n os.remove(filename)\n return score, matrixDensity, matrixQuaziID, results", "def calcIntrasec(self, gui):\n count = len(self.points)\n numpoints = (self.chessSize[0]*self.chessSize[1])\n\n #create matrices that are needed to compute calibration\n mat = cv.CreateMat(3,3,cv.CV_32FC1)\n distCoeffs = cv.CreateMat(4,1,cv.CV_32FC1)\n p3d = cv.CreateMat(count,3,cv.CV_32FC1) #compute 3D points\n p2d = cv.CreateMat(count,2,cv.CV_32FC1) #compute 2D points\n pointCounts = cv.CreateMat( self.nframe ,1,cv.CV_32SC1) #give numpoints per images\n cv.Set(pointCounts,numpoints)\n rvecs = cv.CreateMat(self.nframe,3,cv.CV_32FC1)\n tvecs = cv.CreateMat(self.nframe,3,cv.CV_32FC1)\n\n i = 0\n row = 0\n col = 0\n cv.Set(p3d,0.0) #to set every values to 0.0... and not set Z value\n\n #this compute points in row and cols...\n for p in self.points:\n p2d[i,0] = p[0]\n p2d[i,1] = p[1]\n \n p3d[i,0] = col\n p3d[i,1] = row\n col+=1\n if col >= self.chessSize[0]: \n row+=1\n col=0\n if row >= self.chessSize[1]:\n row = 0\n i+=1\n\n #and now, calibrate...\n cv.CalibrateCamera2(p3d, p2d, pointCounts, self.framesize, mat, distCoeffs, rvecs, tvecs, flags=0)\n gui.setMessage(\"Intrasinc camera parameters checked\")\n\n return (mat, distCoeffs)", "def RANSAC(kp1, kp2, iterations):\n k_max = 0\n m_foe = (0, 0)\n m_inliers = []\n m_outliers = []\n for k in range(iterations):\n # random select 2 different points as sample\n sample = np.random.randint(0, len(kp1), 2)\n if sample[0] == sample[1]:\n continue\n\n # calculate the line through the 2 points\n p1 = kp1[sample[0]], kp2[sample[0]]\n p2 = kp1[sample[1]], kp2[sample[1]]\n\n # the intersection\n foe = get_intersect(p1, p2)\n if foe == (np.inf, np.inf):\n continue\n\n # calculate the inliers and outliers\n inliers, outliers = get_inliers(kp1, kp2, foe, 5)\n\n # update the best feo\n if len(inliers) > k_max:\n k_max = len(inliers)\n m_foe = foe\n m_inliers = inliers\n m_outliers = outliers\n\n return k_max, m_foe, m_inliers, m_outliers", "def scoreCirc_CmosVoltageReference(circuit, gen, indi, makeRedundancyInMatrix): #TODO 6.9.2016 napisi cost function ki se sklada z evaluateCmosVoltageRef\n #----------#\n VREF = 1.5\n #----------#\n \n FullBigCircuitMatrix = deepcopy(circuit.fullRedundancyMatrix)\n rowsR,columnsR,columnsC,rowsC = sortedNonZeroIndices(FullBigCircuitMatrix)\n\n matrixDensity = float(len(rowsR))/float((BigMatrixSize*BigMatrixSize/2))\t#(ones/(all/2))\n matrixQuaziID = sum(rowsR)+sum(columnsR)-BigMatrixSize*(BigMatrixSize-1)\n OcSc, IcNc, SelfConnElm = checkConnsConnected(FullBigCircuitMatrix) #Outer connections Short cut, Inner connections Not connected\n \n results = None\n if OcSc > 1:\n score = 1e4*np.exp(OcSc)\n else:\n makeNetlist(circuit, gen, indi, FullBigCircuitMatrix)\n results = runme2.evaluateCmosVoltageRef(gen, indi)\n disfCount = 0\n \n \n #Vdd sweeps on 3 temperatures - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # -20 deg\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n vdd_sweep_t1 = np.array(results['vout_vdd_temp1']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_t1)):\n disfCount = disfCount + 1\n vdd_s_t1 = 0\n vdd_s_t1_d = 0\n else:\n x = np.median(vdd_sweep_t1)\n vdd_s_t1 = abs(x - VREF) #if x > VREF else 0\n vdd_s_t1_d = np.max(vdd_sweep_t1) - np.min(vdd_sweep_t1)\n \n \n # 25 deg\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n vdd_sweep_t2 = np.array(results['vout_vdd_temp2']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_t2)):\n disfCount = disfCount + 1\n vdd_s_t2 = 0\n vdd_s_t2_d = 0\n else:\n x = np.median(vdd_sweep_t2)\n vdd_s_t2 = abs(x - VREF) #if x > VREF else 0\n vdd_s_t2_d = np.max(vdd_sweep_t2) - np.min(vdd_sweep_t2) \n \n # 120 deg\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n vdd_sweep_t3 = np.array(results['vout_vdd_temp3']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_t3)):\n disfCount = disfCount + 1\n vdd_s_t3 = 0\n vdd_s_t3_d = 0\n else:\n x = np.median(vdd_sweep_t3)\n vdd_s_t3 = abs(x - VREF) #if x > VREF else 0\n vdd_s_t3_d = np.max(vdd_sweep_t3) - np.min(vdd_sweep_t3) \n \n #Vdd sweeps on 3 loads - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # 10e6 Ohm\n vdd_sweep_scale = np.array(results['vout_vdd_res_scale']['nominal'], dtype=float)\n vdd_sweep_r1 = np.array(results['vout_vdd_res1']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_r1)):\n disfCount = disfCount + 1\n vdd_s_r1 = 0\n vdd_s_r1_d = 0\n else:\n x = np.median(vdd_sweep_r1)\n vdd_s_r1 = abs(x - VREF) #if x > VREF else 0\n vdd_s_r1_d = np.max(vdd_sweep_r1) - np.min(vdd_sweep_r1)\n \n # 10e4 Ohm\n vdd_sweep_scale = np.array(results['vout_vdd_res_scale']['nominal'], dtype=float)\n vdd_sweep_r2 = np.array(results['vout_vdd_res2']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_r2)):\n disfCount = disfCount + 1\n vdd_s_r2 = 0\n vdd_s_r2_d = 0\n else:\n x = np.median(vdd_sweep_r2)\n vdd_s_r2 = abs(x - VREF) #if x > VREF else 0\n vdd_s_r2_d = np.max(vdd_sweep_r2) - np.min(vdd_sweep_r2) \n \n # 10e2 Ohm\n vdd_sweep_scale = np.array(results['vout_vdd_res_scale']['nominal'], dtype=float)\n vdd_sweep_r3 = np.array(results['vout_vdd_res3']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_r3)):\n disfCount = disfCount + 1\n vdd_s_r3 = 0\n vdd_s_r3_d = 0\n else:\n x = np.median(vdd_sweep_r3)\n vdd_s_r3 = abs(x - VREF) #if x > VREF else 0\n vdd_s_r3_d = np.max(vdd_sweep_r3) - np.min(vdd_sweep_r3) \n \n power = results['power']['nominal']\n if np.isnan(np.array(power, dtype=float)):\n disfCount = disfCount + 1\n powe = 0\n else:\n powe = power\n \n #---COST FUNCTION DEFINITION---#\n score = vdd_s_t1 + vdd_s_t1_d + \\\n\t vdd_s_t2 + vdd_s_t2_d + \\\n\t vdd_s_t3 + vdd_s_t3_d + \\\n\t vdd_s_r1 + vdd_s_r1_d + \\\n\t vdd_s_r2 + vdd_s_r2_d + \\\n\t vdd_s_r3 + vdd_s_r3_d + \\\n\t (100*powe)\n\n #print disfCount\n if disfCount > 0:\n score = np.exp(disfCount) * 1e3\n if np.isnan(score):\n score = 2e4\n score = score + (IcNc+1) #add small punishment if not all nodes connected\n\n #print \"\\t\\t\\t\\t\\tG_\" + str(gen) + \"_I_\" + str(indi) + \" SCORE:\", score\n \n filename = \"g_\" + str(gen) + \"_i_\" + str(indi) + \"_subckt.cir\"\n os.remove(filename) #cleanup current subcircuit\n\n return score, matrixDensity, matrixQuaziID, results", "def ani1_feature(Z, R):\n assert len(Z) == R.shape[1]\n assert R.shape[0] == 3\n atom_Z_arr = np.asarray(Z)\n atom_R_arr = R.T\n '''constants'''\n rad_cutoff = 4.6\n ang_cutoff = 3.1\n rad_shifts = np.asarray([\n 5.0000000e-01, 7.5625000e-01, 1.0125000e+00, 1.2687500e+00,\n 1.5250000e+00, 1.7812500e+00, 2.0375000e+00, 2.2937500e+00,\n 2.5500000e+00, 2.8062500e+00, 3.0625000e+00, 3.3187500e+00,\n 3.5750000e+00, 3.8312500e+00, 4.0875000e+00, 4.3437500e+00])\n rad_eta = 1.6000000e+01\n zeta = 8.0000000e+00\n ang_theta_shifts = np.asarray([\n 0.0000000e+00, 7.8539816e-01, 1.5707963e+00, 2.3561945e+00,\n 3.1415927e+00, 3.9269908e+00, 4.7123890e+00, 5.4977871e+00])\n ang_eta = 6.0000000e+00\n ang_shifts = np.asarray([\n 5.0000000e-01, 1.1500000e+00, 1.8000000e+00, 2.4500000e+00])\n\n\n '''compute'''\n ref_Z_list = [1, 6, 7, 8]\n ref_Z_indices_list = [atom_Z_arr == ref_Z for ref_Z in ref_Z_list]\n atom_feat_list = []\n for atom_i, (atom_Z, atom_R) in enumerate(zip(atom_Z_arr, atom_R_arr)):\n atom_feat = []\n for ref_Z_i, ref_Z_indices1 in enumerate(ref_Z_indices_list):\n env_indices1 = ref_Z_indices1.copy()\n env_indices1[atom_i] = False\n env_R_arr1 = atom_R_arr[env_indices1]\n dist1 = calc_dist(atom_R, env_R_arr1)\n rad_func = piece_wise_cutoff(dist1, rad_cutoff)\n rad_exp = np.exp(-rad_eta * (dist1 - rad_shifts[:, np.newaxis])) # num_shifts by num_env\n rad_vec = rad_exp.dot(rad_func)\n atom_feat.append(rad_vec)\n for ref_Z_indices2 in ref_Z_indices_list[ref_Z_i:]:\n env_indices2 = ref_Z_indices2.copy()\n env_indices2[atom_i] = False\n env_R_arr2 = atom_R_arr[env_indices2]\n dist2 = calc_dist(atom_R, env_R_arr2)\n ang_func1 = piece_wise_cutoff(dist1, ang_cutoff)\n ang_func2 = piece_wise_cutoff(dist2, ang_cutoff)\n prod_func12 = ang_func1[:, np.newaxis] * ang_func2\n angle = calc_angle(atom_R, env_R_arr1, env_R_arr2)\n\n cos_factor = (1.0 + np.cos(angle[np.newaxis, :, :] - ang_theta_shifts[:, np.newaxis, np.newaxis]))**zeta # num_theta_shifts by num_env1 by num_env2\n ang_exp = np.exp(-ang_eta * (((dist1[:, np.newaxis] + dist2) / 2)[np.newaxis, :, :] - ang_shifts[:, np.newaxis, np.newaxis])**2) # num_shifts by num_env1 by num_env2\n cos_exp = cos_factor[:, None, :, :] * ang_exp[None, :, :, :]\n\n ang_vec = (cos_exp * prod_func12[np.newaxis, np.newaxis, :, :]).sum(axis=-1).sum(axis=-1).ravel()\n ang_vec *= 2**(1.0 - zeta)\n atom_feat.append(ang_vec)\n atom_feat_list.append(np.concatenate(atom_feat))\n return np.stack(atom_feat_list)", "def scoreCirc_VoltageReference(circuit, gen, indi, makeRedundancyInMatrix):\n #----------#\n VREF = 1.5\n #----------#\n \n FullBigCircuitMatrix = deepcopy(circuit.fullRedundancyMatrix)\n rowsR,columnsR,columnsC,rowsC = sortedNonZeroIndices(FullBigCircuitMatrix)\n\n matrixDensity = float(len(rowsR))/float((BigMatrixSize*BigMatrixSize/2))\t#(ones/(all/2))\n matrixQuaziID = sum(rowsR)+sum(columnsR)-BigMatrixSize*(BigMatrixSize-1)\n OcSc, IcNc, SelfConnElm = checkConnsConnected(FullBigCircuitMatrix) #Outer connections Short cut, Inner connections Not connected\n \n results = None\n badSweep = 0\n if OcSc > 1:\n score = 1e4*np.exp(OcSc)\n else:\n makeNetlist(circuit, gen, indi, FullBigCircuitMatrix)\n results = runme2.evaluateVoltageRef(gen, indi)\n disfCount = 0\n \n vdd_sweep = np.array(results['vout_vdd']['nominal'], dtype=float) #This line changes Nones to np.nans\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep)):\n disfCount = disfCount + 1\n vdd_s = 0\n vdd_s_d = 0\n #print \"tukej!\", vdd_sweep_scale\n else:\n x = np.median(vdd_sweep)\n vdd_s = abs(x - VREF) #if x > VREF else 0\n vdd_s_d = np.max(vdd_sweep) - np.min(vdd_sweep)\n #if sweep did not finish completely - add to score\n #check last scale value in runme2!!\n #print \"tukiii\", vdd_sweep_scale\n if (vdd_sweep_scale[-1]<20): #20V\n\tbadSweep = badSweep + 1\n \n rload_sweep = np.array(results['vout_rload']['nominal'], dtype=float)\n rload_sweep_scale = np.array(results['vout_rload_scale']['nominal'], dtype=float)\n # if measurement is empty\n if np.any(np.isnan(rload_sweep)):\n disfCount = disfCount + 1\n rload_s = 0\n rload_s_d = 0\n else:\n x = np.median(rload_sweep)\n rload_s = abs(x - VREF) #if x > VREF else 0\n rload_s_d = np.max(rload_sweep) - np.min(rload_sweep)\n #if sweep did not finish completely - add to score\n #check last scale value in runme2!!\n if (rload_sweep_scale[-1]<100e3): #100kOhm\n\tbadSweep = badSweep + 1\n \n temp_sweep = np.array(results['vout_temp']['nominal'], dtype=float)\n temp_sweep_scale = np.array(results['vout_temp_scale']['nominal'], dtype=float)\n # if measurement is empty OR sweep did not finish completely - check last scale value in runme2!!\n if np.any(np.isnan(temp_sweep)):\n disfCount = disfCount + 1\n temp_s = 0\n temp_s_d = 0\n else:\n x = np.median(temp_sweep)\n temp_s = abs(x - VREF) #if x > VREF else 0\n temp_s_d = np.max(temp_sweep) - np.min(temp_sweep)\n if (temp_sweep_scale[-1]<120): #120 deg celsius\n\tbadSweep = badSweep + 1\n \n power = results['power']['nominal']\n if np.isnan(np.array(power, dtype=float)):\n disfCount = disfCount + 1\n powe = 0\n else:\n powe = power\n \n #---COST FUNCTION DEFINITION---#\n score = (vdd_s) + (vdd_s_d) + 5*(rload_s) + 5*(rload_s_d) + (100*temp_s) + (100*temp_s_d) + (100*powe) + badSweep*100\n\n #print disfCount\n if disfCount > 0:\n score = np.exp(disfCount) * 1e3\n if np.isnan(score):\n score = 2e4\n score = score + (IcNc+1) #add small punishment if not all nodes connected\n\n #print \"\\t\\t\\t\\t\\tG_\" + str(gen) + \"_I_\" + str(indi) + \" SCORE:\", score\n #print vdd_s, vdd_s_d, rload_s, rload_s_d, temp_s, temp_s_d, powe\n #print vdd_s, vdd_s_d, rload_s, rload_s_d, 100*temp_s, 100*temp_s_d, 100*powe\n \n filename = \"g_\" + str(gen) + \"_i_\" + str(indi) + \"_subckt.cir\"\n os.remove(filename) #cleanup current subcircuit\n\n return score, matrixDensity, matrixQuaziID, results", "def question5():\n \n null_dist = {38: 1, 39: 1, 40: 6, 41: 8, 42: 17, 43: 31, 44: 41, 45: 47, 46: 65, 47: 76,\n 48: 66, 49: 63, 50: 62, 51: 69, 52: 69, 53: 57, 54: 49, 55: 37, 56: 26, 57: 30,\n 58: 33, 59: 23, 60: 25, 61: 20, 62: 5, 63: 11, 64: 11, 65: 7, 66: 5, 67: 4,\n 68: 5, 69: 4, 70: 5, 71: 3, 72: 3, 73: 4, 74: 1, 75: 2, 80: 3, 81: 1, 82: 1,\n 85: 1, 87: 1, 92: 1}\n \n # find mean\n mean = 0\n for key in null_dist.keys():\n mean += key / float(1000) * null_dist[key]\n \n print mean\n \n # find standard deviation\n variance = 0\n for key in null_dist.keys():\n variance += (key - mean) ** 2 / float(1000) * null_dist[key]\n stdev = variance ** 0.5\n \n print stdev\n \n # find z-score\n score = 875\n z_score = (score - mean) / stdev\n \n print z_score", "def get_stain_matrix(I):", "def fit_greedy(data, nnbr=10, threshold=0.05, refit=refit_pll):\n n,m = data.shape;\n L = np.zeros((n,n)) # initialize parameters\n scores = np.zeros(n) \n data = data.astype(int)\n for i in range(n):\n Ni = []\n while (len(Ni)<nnbr):\n Vi = (0*data[i,:] + sum(data[j,:]*(2**jj) for jj,j in enumerate(Ni))).astype(int)\n Vsz = int(Vi.max()+1)\n for j in range(n):\n if j==i or j in Ni: scores[j]=0.; continue\n pIJV = Factor( [Var(0,2),Var(1,2),Var(2,Vsz)] , 0.)\n # pIJV[data[i,:],data[j,:],Vi] += 1. # Test??\n for k in range(m): pIJV[data[i,k],data[j,k],Vi[k]] += 1.\n pV = pIJV.marginal([2]); pV /= (pV.sum()+1e-20);\n pIJV /= (pIJV.sum([0])+1e-20)\n scores[j] = ((pIJV.condition({0:1,1:1})-pIJV.condition({0:1,1:0})).abs()*pV).sum()\n jmax = int(np.argmax(scores))\n if scores[jmax] < threshold: break\n Ni.append(jmax)\n # TODO: prune back each list?\n #print(i,\" : \",Ni)\n L[i,Ni] = 1.\n L = L*L.T # \"and\" connectivity: keep only if edges (i,j) and (j,i) present?\n model = Ising(L);\n refit(model,data)\n return model", "def _calc_corrections(self): \n searchIter= self._niter-1\n while searchIter > 0:\n trySavefilename= self._createSavefilename(searchIter)\n if os.path.exists(trySavefilename):\n trySavefile= open(trySavefilename,'rb')\n corrections= sc.array(pickle.load(trySavefile))\n trySavefile.close()\n break\n else:\n searchIter-= 1\n if searchIter == 0:\n corrections= sc.ones((self._npoints,2))\n for ii in range(searchIter,self._niter):\n if ii == 0:\n currentDF= self._dftype(surfaceSigma=self._surfaceSigmaProfile,\n beta=self._beta)\n else:\n currentDF= self._dftype(surfaceSigma=self._surfaceSigmaProfile,\n beta=self._beta,\n corrections=corrections,\n npoints=self._npoints,\n rmax=self._rmax,\n savedir=self._savedir,\n interp_k=self._interp_k)\n newcorrections= sc.zeros((self._npoints,2))\n for jj in range(self._npoints):\n thisSurface= currentDF.surfacemass(self._rs[jj],\n use_physical=False)\n newcorrections[jj,0]= currentDF.targetSurfacemass(self._rs[jj],use_physical=False)/thisSurface\n newcorrections[jj,1]= currentDF.targetSigma2(self._rs[jj],use_physical=False)*thisSurface\\\n /currentDF.sigma2surfacemass(self._rs[jj],\n use_physical=False)\n #print(jj, newcorrections[jj,:])\n corrections*= newcorrections\n #Save\n picklethis= []\n for arr in list(corrections):\n picklethis.append([float(a) for a in arr])\n save_pickles(self._savefilename,picklethis) #We pickle a list for platform-independence)\n return corrections", "def qr_factorization_projections(A, m, n, orth_tol, max_refin, tol):\n # QRFactorization\n Q, R, P = scipy.linalg.qr(A.T, pivoting=True, mode='economic')\n\n if np.linalg.norm(R[-1, :], np.inf) < tol:\n warn('Singular Jacobian matrix. Using SVD decomposition to ' +\n 'perform the factorizations.')\n return svd_factorization_projections(A, m, n,\n orth_tol,\n max_refin,\n tol)\n\n # z = x - A.T inv(A A.T) A x\n def null_space(x):\n # v = P inv(R) Q.T x\n aux1 = Q.T.dot(x)\n aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)\n v = np.zeros(m)\n v[P] = aux2\n z = x - A.T.dot(v)\n\n # Iterative refinement to improve roundoff\n # errors described in [2]_, algorithm 5.1.\n k = 0\n while orthogonality(A, z) > orth_tol:\n if k >= max_refin:\n break\n # v = P inv(R) Q.T x\n aux1 = Q.T.dot(z)\n aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)\n v[P] = aux2\n # z_next = z - A.T v\n z = z - A.T.dot(v)\n k += 1\n\n return z\n\n # z = inv(A A.T) A x\n def least_squares(x):\n # z = P inv(R) Q.T x\n aux1 = Q.T.dot(x)\n aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)\n z = np.zeros(m)\n z[P] = aux2\n return z\n\n # z = A.T inv(A A.T) x\n def row_space(x):\n # z = Q inv(R.T) P.T x\n aux1 = x[P]\n aux2 = scipy.linalg.solve_triangular(R, aux1,\n lower=False,\n trans='T')\n z = Q.dot(aux2)\n return z\n\n return null_space, least_squares, row_space", "def solve_least_squares(train_feat, res_feat):\n\n training = train_feat\n\n result_training = res_feat\n\n trans = get_transpose(training)\n\n mat_mul_trans = matrix_mul(trans, training)\n\n mat_mul_trans = get_inverse(mat_mul_trans)\n\n second_prod = matrix_mul(mat_mul_trans, trans)\n\n return matrix_mul(second_prod, result_training)", "def leastsquares(A,b,qr=qrfact.qri_mgs_piv,alpha=0.5):\n \n\n A = numpy.array(A, dtype=float)\n m,n = A.shape\n z = numpy.zeros( n )\n a = numpy.zeros( n )\n x = numpy.zeros( n )\n b = numpy.transpose(b)[0]\n\n # do the QR factorization\n try:\n Q,R = qr(A)[:2] # Some QR routines return a third permutation P solving AP=QR.\n PA = A\n except TypeError:\n Q,R,P = qr(A,alpha)[:3] # Some QR routines return a third permutation P solving AP=QR.\n AP = numpy.dot( A, P )\n\n # Step 1'': orthogonalization of b against Q\n u = b\n for j in range( 0, n ) :\n # print \"Qj = \", Q[:,j]\n # print \"u = \", u\n # print \"dot = \", numpy.dot( Q[:,j], u )\n z[j] = numpy.dot( Q[:,j], u )\n u = u - z[j] * Q[:,j]\n\n # Step 2'': iterative orthogonalization of u\n ul2norm = numpy.linalg.norm( u )\n ii = 0\n while True : # iterate\n for j in range( 0, n ) :\n a[j] = numpy.dot( Q[:,j], u )\n z[j] = z[j] + a[j]\n u = u - a[j] * Q[:,j]\n\n ii = ii + 1\n ulnorm = ul2norm\n ul2norm = numpy.linalg.norm( u )\n\n #print ul2norm, ulnorm\n \n if (ul2norm > alpha * ulnorm) or ul2norm == 0 :\n # print \"used\", ii, \"orthogonalizations\"\n break\n\n #print z\n #print R\n\n # Step 3'': use back substitution to solve Rx = z\n for i in range( n-1, -1, -1 ) :\n x[i] = z[i]\n for j in range( i+1, n ) :\n x[i] = x[i] - R[i,j] * x[j]\n x[i] = x[i] / R[i,i]\n #print x\n\n #need to permute x according to permutation matrix P\n \n return numpy.dot( P, x )", "def ransac(keypoints1, keypoints2, matches, sampling_ratio=0.5, n_iters=500, threshold=20):\n N = matches.shape[0]\n n_samples = int(N * sampling_ratio)\n\n # Please note that coordinates are in the format (y, x)\n matched1 = pad(keypoints1[matches[:,0]])\n matched2 = pad(keypoints2[matches[:,1]])\n matched1_unpad = keypoints1[matches[:,0]]\n matched2_unpad = keypoints2[matches[:,1]]\n\n max_inliers = np.zeros(N)\n n_inliers = 0\n\n # RANSAC iteration start\n ### YOUR CODE HERE\n raise NotImplementedError() # Delete this line\n ### END YOUR CODE\n return H, matches[max_inliers]", "def estimate_lsq(fp, tp):\n\n fp = asarray(fp, float)\n tp = asarray(tp, float)\n\n if fp.shape[0] != 3:\n raise RuntimeError, 'number of rows in fp must be 3 (there were %d)' % fp.shape[0]\n\n if tp.shape[0] != 3:\n raise RuntimeError, 'number of rows in tp must be 3 (there were %d)' % tp.shape[0]\n\n if fp.shape[1] != tp.shape[1]:\n raise RuntimeError, 'number of points do not match'\n\n #condition points (important for numerical reasons)\n #--from points--\n m = mean(fp[:2], axis=1)\n maxstd = max(std(fp[:2], axis=1))\n if abs(maxstd) < 1e-8:\n # This is a degenerate configuration\n raise linalg.LinAlgError\n\n C1 = diag([1/maxstd, 1/maxstd, 1]) \n C1[0][2] = -m[0]/maxstd\n C1[1][2] = -m[1]/maxstd\n fp = dot(C1,fp)\n\n #--to points--\n m = mean(tp[:2], axis=1)\n #C2 = C1.copy() #must use same scaling for both point sets\n maxstd = max(std(tp[:2], axis=1))\n if abs(maxstd) < 1e-8:\n # This is a degenerate configuration\n raise linalg.LinAlgError\n\n C2 = diag([1/maxstd, 1/maxstd, 1])\n C2[0][2] = -m[0]/maxstd\n C2[1][2] = -m[1]/maxstd\n tp = dot(C2,tp)\n\n #create matrix for linear method, 2 rows for each correspondence pair\n nbr_correspondences = fp.shape[1]\n A = zeros((2*nbr_correspondences,9))\n for i in range(nbr_correspondences): \n A[2*i] = [-fp[0][i],-fp[1][i],-1,0,0,0,tp[0][i]*fp[0][i],tp[0][i]*fp[1][i],tp[0][i]]\n A[2*i+1] = [0,0,0,-fp[0][i],-fp[1][i],-1,tp[1][i]*fp[0][i],tp[1][i]*fp[1][i],tp[1][i]]\n\n U,S,V = linalg.svd(A)\n\n H = V[8].reshape((3,3)) \n\n #decondition and return\n return dot(linalg.inv(C2),dot(H,C1))", "def SCF(N, R, Zeta1, Zeta2, Za, Zb, G):\n Crit = 1e-11 # Convergence critera\n Maxit = 250 # Maximum number of iterations\n Iter = 0\n\n ######## STEP 1. Guess an initial density matrix ########\n # Use core hamiltonian for initial guess of F, I.E. (P=0)\n P = np.zeros([2, 2])\n\n Energy = 0.0\n\n while (Iter < Maxit):\n Iter += 1\n print(Iter)\n\n ######## STEP 2. calculate the Fock matrix ########\n # Form two electron part of Fock matrix from P\n G = np.zeros([2, 2]) # This is the two electron contribution in the equations above\n for i in range(2):\n for j in range(2):\n for k in range(2):\n for l in range(2):\n G[i, j] = G[i, j] + P[k, l] * (TT[i, j, k, l] - 0.5 * TT[i, j, k, l])\n\n # Add core hamiltonian H^CORE to get fock matrix\n F = H + G\n\n # Calculate the electronic energy\n Energy = np.sum(0.5 * P * (H + F))\n\n print('Electronic energy = ', Energy)\n\n ######## STEP 3. Calculate F' (remember S^-1/2 is X and S^1/2 is X.T) ########\n G = np.matmul(F, X)\n Fprime = np.matmul(X.T, G)\n\n ######## STEP 4. Solve the eigenvalue problem ########\n # Diagonalise transformed Fock matrix\n Diag(Fprime, Cprime, E)\n\n ######## STEP 5. Calculate the molecular orbitals coefficients ########\n # Transform eigen vectors to get matrix C\n C = np.matmul(X, Cprime)\n\n ######## STEP 6. Calculate the new density matrix from the old P ########\n Oldp = np.array(P)\n P = np.zeros([2, 2])\n\n # Form new density matrix\n for i in range(2):\n for j in range(2):\n # Save present density matrix before creating a new one\n for k in range(1):\n P[i, j] += 2.0 * C[i, k] * C[j, k]\n\n ######## STEP 7. Check to see if the energy has converged ########\n Delta = 0.0\n # Calculate delta the difference between the old density matrix Old P and the new P\n Delta = (P - Oldp)\n Delta = np.sqrt(np.sum(Delta ** 2) / 4.0)\n print(\"Delta\", Delta)\n\n # Check for convergence\n if (Delta < Crit):\n # Add nuclear repulsion to get the total energy\n Energytot = Energy + Za * Zb / R\n print(\"Calculation converged with electronic energy:\", Energy)\n print(\"Calculation converged with total energy:\", Energytot)\n print(\"Density matrix\", P)\n print(\"Mulliken populations\", np.matmul(P, S))\n print(\"Coeffients\", C)\n\n break", "def screening_graph_estimate(S, lambdaL, p, maxdf, idx_scr, threshold=1e-4, max_iter=10000):\n nlambda = lambdaL.shape[0]\n nscr = idx_scr.shape[0]\n x = np.zeros(p * maxdf * nlambda)\n col_cnz = np.zeros(p + 1).astype(int)\n row_idx = np.zeros(p * maxdf * nlambda).astype(int)\n idx_a = np.zeros(nscr).astype(int)\n w1 = np.zeros(p)\n\n cnz = 0\n for m in range(p):\n idx_i = np.copy(idx_scr[:, m])\n w0 = np.zeros(p)\n size_a = 0\n\n for i in range(nlambda):\n ilambda = lambdaL[i]\n gap_ext = 1\n iter_ext = 0\n while gap_ext > 0 and iter_ext < max_iter:\n size_a_prev = size_a\n for j in range(nscr):\n w_idx = idx_i[j]\n if w_idx != -1:\n r = S[m, w_idx]\n for k in range(size_a):\n rss_idx = idx_a[k]\n r -= S[w_idx, rss_idx] * w0[rss_idx]\n\n if abs(r) > ilambda:\n if r >= 0:\n w1[w_idx] = r - ilambda\n else:\n w1[w_idx] = r + ilambda\n idx_a[size_a] = w_idx\n size_a += 1\n idx_i[j] = -1\n else:\n w1[w_idx] = 0\n w0[w_idx] = w1[w_idx]\n\n gap_ext = size_a - size_a_prev\n\n gap_int = 1\n iter_int = 0\n while gap_int > threshold and iter_int < max_iter:\n tmp1 = 0\n tmp2 = 1e-4\n for j in range(size_a):\n w_idx = idx_a[j]\n r = S[m, w_idx] + w0[w_idx]\n\n for k in range(size_a):\n rss_idx = idx_a[k]\n r -= S[w_idx, rss_idx] * w0[rss_idx]\n\n if abs(r) > ilambda:\n if r >= 0:\n w1[w_idx] = r - ilambda\n else:\n w1[w_idx] = r + ilambda\n tmp2 += abs(w1[w_idx])\n else:\n w1[w_idx] = 0\n tmp1 += abs(w1[w_idx] - w0[w_idx])\n w0[w_idx] = w1[w_idx]\n gap_int = tmp1 / tmp2\n iter_int += 1\n iter_ext += 1\n\n for j in range(size_a):\n w_idx = idx_a[j]\n x[cnz] = w1[w_idx]\n row_idx[cnz] = i * p + w_idx\n cnz += 1\n col_cnz[m + 1] = cnz\n\n return col_cnz, row_idx, x", "def test_main_interior_sparse():\n\n bound = None\n Name, obj_Netlib = benchmark()\n Name_work = benchmark_work()\n name_benchmark = {}\n\n # Dict name\n j = 0\n for name in Name:\n name_benchmark[name] = obj_Netlib[j]\n j += 1\n\n line = open(\"conclusion1.txt\", \"w\")\n line.write(\n \"{0:17s} {2:>17s} {3:>20s} {1:>20s} {4:>20s} {5:>20s}\\r\\n\".format(\n \"Name\", \"Obj fun\", \"Interi time\", \"Scipy time\", \"Interi\", \"Scipy\"\n )\n )\n line.close()\n\n for i in Name_work[1:12]:\n # REMARK usually use Name_work[1:12]\n print(\"\\n\\nProblem name: {}\".format(i))\n c, Aineq, bineq, Aeq, beq, lb, ub = create_problem_from_mps_matlab(i)\n\n # Scipy\n start_time1 = time.time()\n bounds = create_bound_for_scipy(lb, ub)\n res = linprog(\n c=c,\n A_ub=Aineq,\n b_ub=bineq,\n A_eq=Aeq,\n b_eq=beq,\n bounds=bounds,\n method=\"interior-point\",\n options={\"disp\": True},\n )\n # res = np.nan\n end_time1 = time.time()\n\n # Interior\n start_time2 = time.time()\n # obj_fun = interior_sparse(A=A, b=b, c=c, cTlb=cTb, tol=1e-8)\n obj_fun = new_interior_sparse(\n c=c, Aineq=Aineq, bineq=bineq, Aeq=Aeq, beq=beq, lb=lb, ub=ub, tol=1e-6\n )\n end_time2 = time.time()\n\n # information\n print(\"File name : {}\".format(i))\n print(\"obj fun Netlib: {0}\".format(name_benchmark[i]))\n print(\"obj fun interi: {0}\".format(obj_fun))\n print(\"obj fun scipy : {0}\".format(res.fun))\n # print(\"obj fun scipy : {0}\".format(np.nan))\n print(\"interior time : {}\".format(end_time2 - start_time2))\n print(\"scipy time : {}\".format(end_time1 - start_time1))\n\n # save to text file\n line = open(\"conclusion1.txt\", \"a\")\n line.write(\n \"{0:17s} {2:17.2f} {3:>20.2f} {1:20.2f} {4:20.2f} {5:20.2f}\\r\\n\".format(\n i,\n name_benchmark[i],\n end_time2 - start_time2,\n end_time1 - start_time1,\n obj_fun,\n res.fun,\n # np.nan\n )\n )\n line.close()", "def second_heuristic(self):\r\n directions = [[-1, -1], [-1, 1], [1, 1], [1, -1]]\r\n # aceasta matrice indica valoarea pe care o are mutarea unei piese pe o celula aleasa\r\n # se va aduna la media ponderilor adunate in lista weights\r\n\r\n # mijlocul tablei este punctul cel mai vulnerabil\r\n # in timp ce lateralele sunt sigure,iar linia bazei transforma piesa in rege\r\n\r\n points = [[0, 4, 0, 4, 0, 4, 0, 4],\r\n [4, 0, 3, 0, 3, 0, 3, 0],\r\n [0, 3, 0, 2, 0, 2, 0, 4],\r\n [4, 0, 2, 0, 1, 0, 3, 0],\r\n [0, 3, 0, 1, 0, 2, 0, 4],\r\n [4, 0, 2, 0, 1, 0, 3, 0],\r\n [0, 3, 0, 2, 0, 2, 0, 4],\r\n [4, 0, 4, 0, 4, 0, 4, 0]]\r\n\r\n weights = [0 for i in range(4)]\r\n whites, blacks = 0, 0\r\n for i in range(8):\r\n for j in range(8):\r\n\r\n # numaram discurile de fiecare culoarea\r\n blacks += 1 if self.matrix[i][j] in ['N', 'n'] else 0\r\n whites += 1 if self.matrix[i][j] in ['A', 'a'] else 0\r\n\r\n if self.matrix[i][j] in [self.current_player, self.current_player.upper()]:\r\n\r\n # daca e piesa normala\r\n if self.matrix[i][j] == self.current_player:\r\n weights[0] += 4\r\n\r\n # cat de aproape este piesa de a deveni rege ( nr de linii din tabla - cate mai are pana ajunge pe ultima linie)\r\n\r\n # cu cat se apropie piesa mai multe de a deveni rege, scorul creste( negru - rege pentru i=0, alb -rege pentru i =7)\r\n if self.matrix[i][j] == 'n':\r\n weights[1] += (7 - i)\r\n elif self.matrix[i][j] == 'a':\r\n weights[1] += i\r\n else:\r\n # daca e piesa rege\r\n weights[0] += 8\r\n\r\n # cat de aproape este piesa rege de celelalte piese\r\n for d in directions:\r\n if self.matrix[i][j] == self.current_player.upper():\r\n # gaseste pe diagonala in directia d, o piesa adversara,daca exista\r\n x, y = self.find_piesa(i, j, d)\r\n if x and y:\r\n weights[2] += (x - i) * (x - i) + (y - j) * (y - j)\r\n vx = d[0] + i\r\n vy = d[1] + j\r\n back_x = i - d[0]\r\n back_y = j - d[1]\r\n next_x, next_y = vx + d[0], vy + d[1]\r\n # piesele pe care le poate captura jucatorul, daca e piesa rege are un scor mai mare\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(next_x, next_y) and self.matrix[next_x][next_y] == '.':\r\n if self.matrix[next_x][next_y] == self.opponent().upper():\r\n weights[3] += 7\r\n else:\r\n weights[3] += 4\r\n # piese care pot fi capturate; la fel daca este piesa rege atunci se scade mai mult scorul\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(back_x, back_y) and self.matrix[back_x][back_y] == '.':\r\n if self.matrix[vx][vy] == self.opponent().upper():\r\n weights[3] -= 6\r\n else:\r\n weights[3] -= 3\r\n # adunam piesa la media sumei date pentru a face AI-ul in caz de egalitate a scorului\r\n # sa imi aleaga piesa care ma pozitioneaza mai bine\r\n if self.move:\r\n return sum(weights) / 4 + points[self.move[0]][self.move[1]]\r\n return sum(weights) / 4\r\n\r\n def __str__(self):\r\n s = ' '\r\n for i in range(8):\r\n s += str(i) + ' '\r\n s += '\\n'\r\n for index, line in enumerate(self.matrix):\r\n s += str(chr(index + ord('a'))) + ' '\r\n for el in line:\r\n s += str(el) + ' '\r\n s += '\\n'\r\n\r\n return s", "def one_against_rest_all(expi,X,Y):\n pp=expi.pp\n Ntot=Y.size\n numfeat=X.shape[1]\n numclass=len(expi.ddtr)\n W=np.zeros((numfeat,numclass))\n II=range(Ntot)\n # Initial stepsize\n td=1\n # Check stepsize factor isn't too large\n eps=.01\n locltp=pp.pltp\n if pp.stoch<0:\n locltp=min(pp.pltp,abs(1./pp.stoch)-eps)\n print('Time Step', pp.pltp, locltp)\n up=0\n dup=0\n # Iterations.\n for it in range(pp.numit):\n np.random.shuffle(II)\n up=0\n dup=0\n fac1=0\n # Loop over data\n for i in II:\n # Penalized weights\n fac2=locltp/td\n if pp.stoch<0:\n fac1=(1.+pp.stoch*fac2)\n W*=fac1\n\n tempx=X[i,:]\n tempy=Y[i]\n h=np.dot(tempx,W)\n # We are using binary features so updates in the direction of the example occurs only when the features are nonzero\n # This is NOT the general case.\n pi=tempx>0\n # Update perceptrons for each class.\n for c in range(numclass):\n # A different weight constrain version -Max weight contraint\n if pp.stoch==0:\n if tempy==c:\n tempw=W[:,c]<=pp.Jmax-fac2\n if np.sum(tempw)<len(tempw):\n print('hit upper bound')\n tempw.shape=pi.shape\n pi=np.logical_and(pi, tempw)\n else:\n tempw=W[:,c]>=-pp.Jmax+fac2\n if np.sum(tempw)<len(tempw):\n print('hit lower bound')\n tempw.shape=pi.shape\n pi=np.logical_and(pi,tempw)\n # Update weights on class\n if (tempy==c and h[c]<=pp.deltaP):\n dup+=1\n # Count number of updates.\n up+=np.count_nonzero(pi)\n W[pi,c]=W[pi,c]+fac2\n # Update weight for off class examples.\n elif (tempy!=c and h[c]>=-pp.deltaD):\n dup+=1\n # Count number of updates.\n up+=np.count_nonzero(pi)\n W[pi,c]=W[pi,c]-fac2\n # Reduce time step after a sweep through all data.\n if pp.stoch<0:\n td=td+1\n # Show energy value.\n if (np.mod(it,pp.showing)==0):\n DD=0\n for c in range(numclass):\n Yc=2*(Y.T==c)-1\n DD+=np.sum(np.maximum(np.zeros(Ntot),pp.deltaP-np.dot(X,W[:,c])*Yc));\n PP=-.5*pp.stoch*np.sum(W*W)\n EE=DD+PP\n print('td ', td, 'it ', it, 'Number of syn changes ', up, ' at ', dup, ' Data term ', DD, 'Prior term ', PP, 'Total ', EE)\n # Nothing is changing - stop the algorithm.\n if up==0:\n break\n DD=0\n # Final energy.\n for c in range(numclass):\n DD+=np.sum(np.maximum(np.zeros(Ntot),pp.deltaP-np.dot(X,W[:,c])*(2*(Y.T==c)-1)));\n PP=-.5*pp.stoch*np.sum(W*W)\n EE=DD+PP\n print('td ', td, 'it ', it, 'Number of syn changes ', up, ' at ', dup, ' Data term ', DD, 'Prior term ', PP, 'Total ', EE)\n return W", "def compute_cost_matrix(self):\n\n if rank == 0:\n #do random sampling of a parameters\n if self.sampling == \"LHS\":\n lhs = Lhs(lhs_type=\"classic\", criterion=None)\n param_samples = lhs.generate(self.sample_space, self.niters)\n elif self.sampling == \"rsampling\":\n param_samples = self.sample_space.rvs(self.niters)\n elif self.sampling == \"Sobol\":\n sobol = Sobol()\n param_samples = sobol.generate(self.sample_space.dimensions, self.niters)\n \n # generate param samples split\n niters_rank0 = self.niters//size + self.niters % size\n niters_rank = self.niters//size\n count_scatter = [niters_rank0]\n count_scatter.extend((size-2)*[niters_rank])\n count_scatter = np.cumsum(count_scatter)\n\n param_samples_split = np.split(param_samples,count_scatter)\n else:\n param_samples_split = None\n \n #scatter parameter samples data\n param_samps = comm.scatter(param_samples_split,root=0)\n\n # initialize data\n param_samples_dict_rank = {qoi_name:[] for qoi_name in self.funcnames}\n param_samples_diff_dict_rank = {qoi_name:[] for qoi_name in self.funcnames}\n jac_dict_rank = {qoi_name:[] for qoi_name in self.funcnames}\n qoi_dict_rank = {qoi_name:[] for qoi_name in self.funcnames}\n\n \n\n # evaluate QoI at random sampling\n for sample in param_samps: \n qoi_sample, jac_sample = self.jac(sample).values()\n # store output\n for qoi_name in self.funcnames:\n if not (jac_sample[qoi_name] is None):\n param_samples_dict_rank[qoi_name].append(jac_sample[qoi_name])\n jac_dict_rank[qoi_name].append(jac_sample[qoi_name])\n qoi_dict_rank[qoi_name].append(qoi_sample[qoi_name])\n else:\n param_samples_diff_dict_rank[qoi_name].append(sample)\n\n # gather data\n param_samples = None\n param_samples_diff_int = None\n jac_dict = None\n qoi_dict= None\n\n param_samples_dict = comm.gather(param_samples_dict_rank, root=0)\n params_samples_diff_dict = comm.gather(param_samples_diff_dict_rank, root=0)\n jac_dict = comm.gather(jac_dict_rank, root=0)\n qoi_dict = comm.gather(qoi_dict_rank, root=0)\n\n # format gathered data\n if rank == 0:\n #flatten data\n param_samples_dict_flattened = {qoi_name:[] for qoi_name in self.funcnames}\n param_samples_diff_dict_flattened = {qoi_name: [] for qoi_name in self.funcnames}\n jac_dict_flattened = {qoi_name: [] for qoi_name in self.funcnames}\n qoi_dict_flattened = {qoi_name: [] for qoi_name in self.funcnames}\n\n for cpurank in range(size):\n for qoi_name in self.funcnames:\n param_samples_dict_flattened[qoi_name].extend(param_samples_dict[cpurank][qoi_name]) \n param_samples_diff_dict_flattened[qoi_name].extend(params_samples_diff_dict[cpurank][qoi_name])\n jac_dict_flattened[qoi_name].extend(jac_dict[cpurank][qoi_name])\n qoi_dict_flattened[qoi_name].extend(qoi_dict[cpurank][qoi_name])\n\n #compute outer product\n jac_outer_dict = {qoi_name: [] for qoi_name in self.funcnames}\n nfuncs_dict = {qoi_name: 0 for qoi_name in self.funcnames}\n\n for qoi_name in self.funcnames:\n for i in range(len(jac_dict_flattened[qoi_name])):\n jac_sample = jac_dict_flattened[qoi_name][i]\n jac_outer_dict[qoi_name].append(np.outer(jac_sample,jac_sample))\n nfuncs_dict[qoi_name] += 1\n\n # compute cost matrix and norm convergence\n cost_matrix_dict = {}\n cost_matrix_cumul_dict = {}\n norm_convergence_dict = {}\n\n for qoi_name in self.funcnames:\n cost_cumsum = np.cumsum(jac_outer_dict[qoi_name],axis=0)/np.arange(1,nfuncs_dict[qoi_name]+1)[:,None,None]\n cost_matrix_cumul_dict[qoi_name] = cost_cumsum\n cost_matrix_dict[qoi_name] = cost_cumsum[-1,:,:]\n norm_convergence_dict[qoi_name] = np.linalg.norm(cost_cumsum,ord='fro',axis=(1,2))\n\n # compute variance matrix\n variance_matrix_dict = {}\n for qoi_name in self.funcnames:\n variance_mat = np.sum((jac_outer_dict[qoi_name]-cost_matrix_dict[qoi_name])**2/(nfuncs_dict[qoi_name]-1),axis=0) \n variance_matrix_dict[qoi_name] = variance_mat\n\n param_results = {\"PARAM_SAMPLES\": param_samples_dict_flattened,\n \"DIFFICULT_PARAM_SAMPLES\": param_samples_diff_dict_flattened}\n\n fun_results = {\"NUMBER_OF_FUNCTION_SUCCESS\": nfuncs_dict,\n \"NORM_OF_SEQ_OF_CUMUL_SUMS\": norm_convergence_dict,\n \"SEQ_OF_CUMUL_SUMS\": cost_matrix_cumul_dict, \n \"VARIANCE_OF_ENTRIES\": variance_matrix_dict,\n \"FINAL_COST_MATRIX\":cost_matrix_dict}\n\n return {'PARAMETER_RESULTS': param_results, 'FUNCTION_RESULTS': fun_results}", "def reassignedSmethod(fx,nh=2**7-1,tstep=2**4,nfbins=2**9,df=1.0,alpha=4,\r\n thresh=.01,L=5): \r\n \r\n# if type(fx) is list:\r\n# fx=np.array(fx)\r\n# try:\r\n# fn,fm=fx.shape\r\n# if fm>fn:\r\n# fm,fn=fx.shape\r\n# except ValueError:\r\n# fn=len(fx)\r\n# fm=1\r\n# if fm>1:\r\n# print 'computing cross spectra'\r\n# #compute the analytic signal of function f and dctrend\r\n# #fa=sps.hilbert(dctrend(fx[0]))\r\n# #fb=sps.hilbert(dctrend(fx[1]))\r\n# fa=fx[0]\r\n# fb=fx[1]\r\n# fa=fa.reshape(fn)\r\n# fb=fb.reshape(fn)\r\n# else:\r\n# fa=fx\r\n# fa=fa.reshape(fn)\r\n# fb=fa.copy()\r\n\r\n \r\n nx=len(fx) \r\n \r\n #compute gaussian window\r\n h=gausswin(nh,alpha=alpha)\r\n #h=np.hanning(nh)\r\n lh=(nh-1)/2\r\n \r\n #compute ramp window\r\n th=h*np.arange(start=-lh,stop=lh+1,step=1)\r\n \r\n #compute derivative of window\r\n dh=dwindow(h)\r\n \r\n #make a time list of indexes\r\n tlst=np.arange(start=0,stop=nx,step=tstep)\r\n nt=len(tlst)\r\n \r\n #make frequency list for plotting\r\n flst=np.fft.fftfreq(nfbins,1./df)[:nfbins/2]\r\n \r\n #initialize some time-frequency arrays\r\n tfh=np.zeros((nfbins,nt),dtype='complex128')\r\n tfth=np.zeros((nfbins,nt),dtype='complex128')\r\n tfdh=np.zeros((nfbins,nt),dtype='complex128')\r\n \r\n #compute components for reassignment\r\n for ii,tt in enumerate(tlst):\r\n #create a time shift list\r\n tau=np.arange(start=-min([np.round(nx/2.),lh,tt-1]),\r\n stop=min([np.round(nx/2.),lh,nx-tt-1])+1)\r\n #compute the frequency spots to be calculated\r\n ff=np.remainder(nfbins+tau,nfbins)\r\n #make lists of data points for each window calculation\r\n xlst=tt+tau\r\n hlst=lh+tau\r\n normh=np.sqrt(np.sum(abs(h[hlst])**2))\r\n tfh[ff,ii]=fx[xlst]*h[hlst].conj()/normh\r\n tfth[ff,ii]=fx[xlst]*th[hlst].conj()/normh\r\n tfdh[ff,ii]=fx[xlst]*dh[hlst].conj()/normh\r\n \r\n #compute Fourier Transform\r\n spech=np.fft.fft(tfh,axis=0)\r\n specth=np.fft.fft(tfth,axis=0)\r\n specdh=np.fft.fft(tfdh,axis=0)\r\n \r\n #get only positive frequencies\r\n spech=spech[nfbins/2:,:]\r\n specth=specth[nfbins/2:,:]\r\n specdh=specdh[nfbins/2:,:]\r\n \r\n #check to make sure no spurious zeros floating around\r\n szf=np.where(abs(spech)<1.E-6)\r\n spech[szf]=0.0+0.0j\r\n zerofind=np.nonzero(abs(spech))\r\n twspec=np.zeros((nfbins/2,nt),dtype='float')\r\n dwspec=np.zeros((nfbins/2,nt),dtype='float')\r\n twspec[zerofind]=np.round(np.real(specth[zerofind]/spech[zerofind]))\r\n dwspec[zerofind]=np.round(np.imag((nfbins/2.)*specdh[zerofind]/\r\n spech[zerofind])/(np.pi))\r\n \r\n #get shape of spectrogram\r\n nf,nt=spech.shape\r\n \r\n #-----calculate s-method-----\r\n Llst=np.arange(start=-L/2+1,stop=L/2+1,step=1,dtype='int')\r\n\r\n #make and empty array of zeros\r\n sm=np.zeros_like(spech)\r\n \r\n #put values where L cannot be value of L, near top and bottom\r\n sm[0:L/2,:]=abs(spech[0:L/2,:])**2\r\n sm[-L/2:,:]=abs(spech[-L/2:,:])**2\r\n\r\n #calculate s-method\r\n for ff in range(L/2,nf-L/2-1):\r\n sm[ff,:]=2*np.real(np.sum(spech[ff+Llst,:]*spech[ff-Llst,:].conj(),\r\n axis=0))/L\r\n \r\n #------compute reassignment----- \r\n\r\n \r\n rtfarray=np.zeros((nfbins/2,nt))\r\n \r\n threshold=thresh*np.max(abs(sm))\r\n \r\n for nn in range(nt):\r\n for kk in range(nf):\r\n if abs(spech[kk,nn])>threshold:\r\n #get center of gravity index in time direction from spectrogram \r\n nhat=int(nn+twspec[kk,nn])\r\n nhat=int(min([max([nhat,1]),nt-1]))\r\n #get center of gravity index in frequency direction from spec\r\n khat=int(kk-dwspec[kk,nn])\r\n khat=int(np.remainder(np.remainder(khat-1,nfbins/2)+nfbins/2,\r\n nfbins/2))\r\n rtfarray[khat,nhat]=rtfarray[khat,nhat]+abs(sm[kk,nn])\r\n else:\r\n rtfarray[kk,nn]=rtfarray[kk,nn]+sm[kk,nn]\r\n\r\n #place values where L cannot be L \r\n rtfarray[:L/2,:]=abs(sm[:L/2,:])\r\n rtfarray[-L/2:,:]=abs(sm[-L/2:,:])\r\n \r\n tz=np.where(rtfarray==0)\r\n rtfarray[tz]=1.0\r\n \r\n tz=np.where(sm==0.0)\r\n sm[tz]=1.0 \r\n \r\n #scale\r\n rtfarray=abs(rtfarray)\r\n \r\n return rtfarray,tlst,flst,sm", "def _matrix_store_smooth_downhill(self):\n \n import time\n from scipy import sparse as sparse\n from scipy.sparse import linalg as linalgs \n \n\n t = time.clock()\n\n\n size = 0\n for nl in self.neighbour_array_lo_hi:\n size += 3 # len(nl)\n\n row_array = np.empty(size, dtype = int)\n col_array = np.empty(size, dtype = int)\n slope_array = np.zeros(size)\n local_slope_array = np.zeros(64)\n\n\n idx=0 \n for row in range(0, len(self.neighbour_array_lo_hi)): \n neighbours = self.neighbour_array_lo_hi[row] \n npoints = self.tri.points[neighbours]\n\n ## work out (downhill) gradient to (max of three) nearby neighbours\n \n\n for col, column in enumerate(neighbours[0:3]): \n \n delta_h = self.height[column] - self.height[row] \n\n\n if delta_h < 0.0:\n delta_s2 = (self.x[column] - self.x[row])**2 + (self.y[column] - self.y[row])**2\n local_slope_array[col] = ( delta_h**2 / delta_s2 )**5\n\n elif delta_h == 0.0 and self.bmask[row] == False:\n local_slope_array[col] = 1.0e-20\n\n else:\n local_slope_array[col] = 1.0e-20 \n \n # Normalise this so that it conserves mass (note - low points will have no contributions here !) \n \n norm = local_slope_array[0:len(neighbours)].sum()\n if norm != 0.0:\n norm = 1.0 / norm\n\n for col, column in enumerate(neighbours[0:3]): \n row_array[idx] = row\n col_array[idx] = column \n slope_array[idx] = local_slope_array[col] * norm\n\n idx += 1\n\n # We can re-pack this array into a sparse matrix for v. fast computation of downhill operator \n\n slopeCOO = sparse.coo_matrix( (slope_array, (row_array, col_array)) ).T\n slopeMat = slopeCOO.tocsr() \n \n print \"SlopeMat.shape \", slopeMat.shape, size\n\n # slopeNormVec = np.array(slopeMat.sum(axis=1)).T[0]\n # slopeNormVec[slopeNormVec != 0.0] = 1.0 / slopeNormVec[slopeNormVec != 0.0]\n # slopeNormMat = sparse.eye(self.tri.npoints)\n # slopeNormMat.setdiag(slopeNormVec)\n # slopeMat = slopeNormMat.dot(slopeMat)\n\n slopeMat.eliminate_zeros()\n self.smoothDownhillMat = slopeMat\n\n return", "def cp_apr(X, Y1, R, Minit=None, tol=1e-4, maxiters=1000, maxinner=50,\n epsilon=1e-10, kappatol=1e-10, kappa=1e-2):\n N = X.ndims()\n \n ## Random initialization\n if Minit == None:\n F = tensorTools.randomInit(X.shape, R)\n Minit = ktensor.ktensor(np.ones(R), F);\n nInnerIters = np.zeros(maxiters);\n\n ## Initialize M and Phi for iterations\n M = Minit\n M.normalize(1)\n Phi = [[] for i in range(N)]\n kktModeViolations = np.zeros(N)\n kktViolations = -np.ones(maxiters)\n nViolations = np.zeros(maxiters)\n\n lambda2=0.1\n lambda3=0.1\n sita=np.random.rand(R+1,1);\n ## statistics\n cpStats = np.zeros(7)\n '''\n print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'\n print M.U[0][1,:]\n print M.U[0].shape\n print Demog[1]\n print DemoU[1]\n print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'\n '''\n for iteration in range(maxiters):\n startIter = time.time()\n isConverged = True;\n for n in range(N):\n startMode = time.time()\n ## Make adjustments to M[n] entries that violate complementary slackness\n if iteration > 0:\n V = np.logical_and(Phi[n] > 1, M.U[n] < kappatol)\n if np.count_nonzero(V) > 0:\n nViolations[iteration] = nViolations[iteration] + 1\n #print 'V:',V.shape,V.dtype\n #print 'M.U[n]',M.U[n].shape,M.U[n].dtype\n M.U[n][V > 0] = M.U[n][V > 0] + kappa\n if n==0:\n sita=__solveLinear(M.U[n],Y1,lambda3)\n # lr=LogisticRegression()\n #sita=lr.fit(M.U[n],Y1).coef_\n #print 'sita'\n #print sita\n #print 'demoU'\n #print DemoU[0]\n M, Phi[n], inner, kktModeViolations[n], isConverged = __solveSubproblem1(X, M, n, maxinner, isConverged, epsilon, tol,sita,Y1, lambda2)\n else:\n M, Phi[n], inner, kktModeViolations[n], isConverged = __solveSubproblem0(X, M, n, maxinner, isConverged, epsilon, tol)\n elapsed = time.time() - startMode\n # only write the outer iterations for now\n #cpStats = np.vstack((cpStats, np.array([iteration, n, inner, tensorTools.lsqrFit(X,M), tensorTools.loglikelihood(X,[M]), kktModeViolations[n], elapsed])))\n\n kktViolations[iteration] = np.max(kktModeViolations)\n elapsed = time.time()-startIter\n #cpStats = np.vstack((cpStats, np.array([iter, -1, -1, kktViolations[iter], __loglikelihood(X,M), elapsed])))\n print(\"Iteration {0}: Inner Its={1} with KKT violation={2}, nViolations={3}, and elapsed time={4}\".format(iteration, nInnerIters[iteration], kktViolations[iteration], nViolations[iteration], elapsed))\n if isConverged:\n break\n\n cpStats = np.delete(cpStats, (0), axis=0) # delete the first row which was superfluous\n ### Print the statistics\n #fit = tensorTools.lsqrFit(X,M)\n #ll = tensorTools.loglikelihood(X,[M])\n print(\"Number of iterations = {0}\".format(iteration))\n #print(\"Final least squares fit = {0}\".format(fit))\n #print(\"Final log-likelihood = {0}\".format(ll))\n print(\"Final KKT Violation = {0}\".format(kktViolations[iteration]))\n print(\"Total inner iterations = {0}\".format(np.sum(nInnerIters)))\n \n #modelStats = {\"Iters\" : iter, \"LS\" : fit, \"LL\" : ll, \"KKT\" : kktViolations[iteration]}\n return M, cpStats", "def determine_analytic_solution(self):\n\n self._Janalytic = np.where(self.xr <= self.xint, self.S, 0.5 * self.S)\n self._Hanalytic = np.where(self.xr <= self.xint, 0, 0.25 * self.S)\n self._Kanalytic = np.where(self.xr <= self.xint, 1./3. * self.S,\n 1./6. * self.S)", "def calc_F1_cv(SOM_data_cv, GTD_cv, best_cluster_set, persis_thresh, nodes_arr, seas):\r\n blocked_days = GTD_cv.values\r\n SOM_data_node_list_cv = [(SOM_data_cv[:,:,i]*nodes_arr).sum(axis=0).sum(axis=0) for i in range(SOM_data_cv.shape[2])]\r\n SOM_nodenum_cv = xr.concat(SOM_data_node_list_cv, dim = \"node_num\") \r\n \r\n blocked_days_clus = calc_blocked_days_clus(blocked_days, persis_thresh, SOM_nodenum_cv, best_cluster_set)\r\n blocked_days_clus_xr = xr.DataArray(blocked_days_clus, name = \"blocking\", dims = {\"time\": GTD_cv['time']})\r\n blocked_days_clus_xr['time'] = GTD_cv['time']\r\n blocked_days_clus_sel = blocked_days_clus_xr.sel(time = np.isin(blocked_days_clus_xr['time.season'], seas))\r\n GTD_cv_seas = GTD_cv.sel(time = np.isin(blocked_days_clus_xr['time.season'], seas))\r\n prec, recall, F1 = calc_pr_rc_F1(GTD_cv_seas, blocked_days_clus_sel) \r\n if F1 == np.nan:\r\n F1 = 0\r\n return F1, prec, recall", "def get_score_matrix(self) -> int:", "def GramSchmidt(A):\r\n n = len(A)\r\n # Finds the number of lists in the list, which is also the number of rows\r\n m = len(A[0])\r\n # Finds the number of elements in list one, which is also the number of columns\r\n V = A\r\n R = [[0]*n for i in range(n)]\r\n # creates an empty list R with dimensions of n rows and n columns\r\n Q = [[0]*m for i in range(n)]\r\n # creates an empty list Q with dimensions of n rows and m columns\r\n inputStatus = True\r\n # inputStatus is true at this point until proven otherwise\r\n for i in range(n):\r\n for j in range(m):\r\n if ((type(A[i][j]) != int) and (type(A[i][j]) != float) and (type(A[i][j]) != complex)):\r\n inputStatus = False\r\n print(\"Invalid Input\")\r\n # this checks each value in the matrix A to make sure it is some time of number, if it isnt a number then the input status will be false \r\n # if the input status is false then an error message will be displayed stating that this is an invalid input\r\n if inputStatus == True:\r\n # if the given list does not fall under the previous if statement then the input status will continue to be true and we can continue to find the QR factorization \r\n for i in range(n):\r\n # for loop which continues as long as there are still lists in A \r\n R[i][i] = norm(V[i])\r\n # Creates the border for the upper triangle matrix R, where each value in the diagonal is the 2 norm of the corresponding vector in the original matrix A \r\n Q[i] = unit(V[i])\r\n # Each vector in Q is the unit vector of the corresponding vector in A \r\n for j in range(i+1,n):\r\n # the position j will be 1 more than the position i \r\n R[j][i] = dot(Q[i],V[j])\r\n # The element in R[i+1][i] is the dot product of Q[i] and V[i+1] \r\n temp = scalarmul(R[j][i],Q[i])\r\n # This is the scalar multiplication of R[i+1][i] and Q[i] which will be labeled as temp \r\n V[j] = subtract(V[j],temp)\r\n # V[j] is the difference between the original V[j] and temp \r\n return[Q,R]", "def __init__(self, M, rat):\n self.M = M\n xc0, _ = np.polynomial.chebyshev.chebgauss(M-0)\n xc1, _ = np.polynomial.chebyshev.chebgauss(M-1)\n xc2, _ = np.polynomial.chebyshev.chebgauss(M-2)\n # vandermonde and inverse vandermonde matrices\n self.V0 = np.polynomial.chebyshev.chebvander(xc0, M-1)\n self.V1 = np.polynomial.chebyshev.chebvander(xc1, M-2)\n self.V2 = np.polynomial.chebyshev.chebvander(xc2, M-3)\n self.VI0 = np.linalg.inv(self.V0)\n self.VI1 = np.linalg.inv(self.V1)\n self.VI2 = np.linalg.inv(self.V2)\n # differentiation matrices\n DC01 = np.polynomial.chebyshev.chebder(np.eye(M-0)) / rat\n DC12 = np.polynomial.chebyshev.chebder(np.eye(M-1)) / rat\n DC00 = np.row_stack([DC01, np.zeros(M)])\n self.D00 = self.V0.dot(DC00.dot(self.VI0))\n self.D01 = self.V1.dot(DC01.dot(self.VI0))\n self.D12 = self.V2.dot(DC12.dot(self.VI1))\n # boundary condition operators\n self.ibc_dirichlet = np.polynomial.chebyshev.chebvander(1, M-1).dot(self.VI0)\n self.obc_dirichlet = np.polynomial.chebyshev.chebvander(-1, M-1).dot(self.VI0)\n self.ibc_neumann = self.ibc_dirichlet.dot(self.D00)\n self.obc_neumann = self.obc_dirichlet.dot(self.D00)\n # rank reduction operators\n temp = np.zeros([M-1, M-0], dtype=float)\n np.fill_diagonal(temp, 1.0)\n self.R01 = self.V1.dot(temp.dot(self.VI0))\n temp = np.zeros([M-2, M-1], dtype=float)\n np.fill_diagonal(temp, 1.0)\n self.R12 = self.V2.dot(temp.dot(self.VI1))\n self.R02 = self.R12.dot(self.R01)\n # get poof operator from M-1 --> M\n temp = np.zeros([M, M-1], dtype=float)\n np.fill_diagonal(temp, 1.0)\n self.P10 = self.V0.dot(temp.dot(self.VI1))", "def solve(self):", "def kkt_check_redund(c, A, x, basis, i, tol=1e-8, threshold=1e-3, max_iter=100000, verbose=True):\n improvement = False\n init_actives = [i]\n ab = np.arange(A.shape[0])\n a = np.arange(A.shape[1])\n\n maxupdate = 10\n B = BGLU(A, basis, maxupdate, False)\n iteration = 0\n while True:\n bl = np.zeros(len(a), dtype=bool)\n bl[basis] = 1\n xb = x[basis]\n\n try:\n l = B.solve(c[basis], transposed=True) # similar to v = linalg.solve(B.T, c[basis])\n except LinAlgError:\n np.set_printoptions(threshold=np.inf)\n mp_print('This matrix seems to be singular:', PRINT_IF_RANK_NONZERO=True)\n mp_print(B.B, PRINT_IF_RANK_NONZERO=True)\n mp_print('Iteration:' + str(iteration), PRINT_IF_RANK_NONZERO=True)\n mp_print('u:', PRINT_IF_RANK_NONZERO=True)\n mp_print(u, PRINT_IF_RANK_NONZERO=True)\n print(\"LinAlgError in B.solve\")\n np.set_printoptions(threshold=1000)\n return True, 1\n\n sn = c - l.dot(A) # reduced cost\n sn = sn[~bl]\n\n if np.all(sn >= -tol): # in this case x is an optimal solution\n return True, 0\n\n entering = a[~bl][np.argmin(sn)]\n u = B.solve(A[:, entering])\n\n i = u > tol # if none of the u are positive, unbounded\n if not np.any(i):\n mp_print(\"Warning: unbounded problem in KKT_check\")\n return True, 0\n\n th = xb[i] / u[i]\n l = np.argmin(th) # implicitly selects smallest subscript\n if basis[i][l] in init_actives: # if either plus or minus leaves basis, LP has made significant improvement\n improvement = True\n\n step_size = th[l] # step size\n\n # Do pivot\n x[basis] = x[basis] - step_size * u\n x[entering] = step_size\n x[abs(x) < 10e-20] = 0\n B.update(ab[i][l], entering) # modify basis\n basis = B.b\n\n # if np.dot(c, x) < -threshold: # found a better solution, so not adjacent\n if improvement:\n if not np.dot(c, x) < -threshold:\n mp_print('Original way of finding non-adjacents does not say these are non-adjacent', True)\n # if verbose:\n # mp_print(\"Did %d steps in kkt_check, found False - c*x %.8f\" % (iteration, np.dot(c, x)))\n return False, 0\n\n iteration += 1\n if iteration % 10000 == 0:\n print(\"Warning: reached %d iterations\" % iteration)\n if iteration % max_iter == 0:\n mp_print(\"Cycling? Starting again with new perturbation.\")\n return True, 2\n\n return True, 1", "def rawsolve(self,):\n m = self.m\n n = self.n\n z = self.z\n mark = self.mark\n kAAt = self.kAAt\n iAAt = self.iAAt\n AAt = self.AAt\n diag = self.diag\n consistent = True\n eps = 0.0\n m2 = m+n\n\n if self.ndep:\n eps = self.epssol * np.abs(z).max()\n\n #/*------------------------------------------------------+\n #| |\n #| -1 |\n #| z <- L z |\n #| */\n\n for i in range(m2):\n if mark[i]:\n beta = z[i]\n for k in range(kAAt[i], kAAt[i+1]):\n row = iAAt[k]\n z[row] -= AAt[k]*beta\n elif abs(z[i]) > eps:\n consistent = False\n else:\n z[i] = 0.0\n\n #/*------------------------------------------------------+\n #| |\n #| -1 |\n #| z <- D z |\n #| */\n\n for i in range(m2-1, -1, -1):\n if mark[i]:\n z[i] = z[i]/diag[i]\n elif abs(z[i]) > eps:\n consistent = False\n else:\n z[i] = 0.0\n\n #/*------------------------------------------------------+\n #| |\n #| t -1 |\n #| z <- (L ) z |\n #| */\n\n for i in range(m2-1, -1, -1):\n if mark[i]:\n beta = z[i]\n for k in range(kAAt[i], kAAt[i+1]):\n beta -= AAt[k]*z[iAAt[k]]\n z[i] = beta\n elif abs(z[i]) > eps:\n consistent = False\n else:\n z[i] = 0.0\n\n return consistent", "def _fit_ridge_alpha(trn_fs,trn_data,val_fs,val_data,alphas=DEFAULT_ALPHAS,\n chunk_sz=5000,is_efficient=True,dtype=np.single, is_verbose=False, pthr=0.005,\n square_alpha=False,return_resids=False): \n n_tps,n_voxels = trn_data.shape\n n_chunks = np.ceil(n_voxels/np.float(chunk_sz)).astype(np.int32)\n cc = np.zeros((n_voxels,len(alphas)),dtype=dtype)\n if return_resids:\n resids = np.zeros((n_tps,n_voxels,len(alphas)),dtype=dtype)\n pred_A = []\n if is_efficient:\n # Efficient Ridge regression from A. Huth, Part (1):\n # Full multiplication for validation (here, random split of\n # training data) prediction is: \n # pred = (Xval*Vx) * Dx * (pinv(Ux)*Ychunk) # NOTE: pinv(Ux) = Ux'\n # We will pre-compute the first and third terms in parentheses:\n # pred = XvalVx * Dx * UxYchunk\n if is_verbose: \n print('->Doing SVD of stimulus design matrix')\n t0 = time.time()\n #time.sleep(.01); # To ensure printing?\n m,n = trn_fs.shape\n if m>n:\n Ux,Sx,Vx = _utils._svd(trn_fs,full_matrices=False)\n else:\n Vx,Sx,Ux = _utils._svd(trn_fs.T,full_matrices=False)\n # Switcheroo of Vx and Ux due to transpose of input matrix\n Ux = Ux.T\n Vx = Vx.T\n\n if is_verbose:\n t1 = time.time()\n print('->Done with SVD in %0.2f sec'%(t0-t1))\n # For more efficient computation:\n #k = len(Sx) \n ## OR: \n ## singcutoff = (XX);\n ## k = sum(sx > singcutoff);\n ## sx = sx(1:k);\n XvalVx = val_fs.dot(Vx.T) # NOTE: IN MATLAB, No Vx', because Matlab leaves V in transposed form!\n else:\n raise NotImplementedError(\"Sorry, not done yet!\")\n\n for iChunk in range(n_chunks):\n print('Running chunk %d of %d...\\n'%(iChunk+1,n_chunks))\n ChIdx = np.arange(chunk_sz) + chunk_sz*iChunk\n ChIdx = ChIdx[ChIdx<n_voxels] # clip extra voxels in last run.\n Ychunk = trn_data[:,ChIdx]\n\n # Fit model with all lambdas (for subset of voxels)\n if not is_efficient:\n raise Exception('LAME! no slow reliable ridge implemented.')\n #[Wt L] = ridgemulti(X,Ychunk,params.lambdas);\n else:\n # Efficient Ridge regression from A. Huth, part (2)\n # NOTE: weights are never explicitly computed!\n UxYchunk = Ux.T.dot(Ychunk)\n \n if is_verbose:\n print('Checking model predictions...')\n for iA,A in enumerate(alphas):\n if not is_efficient:\n pred = np.cast(np.single)[Xval.dot(Wt[:,:,iA])]\n else:\n # Efficient Ridge regression from A. Huth, part (3)\n # Normalize lambda by Frobenius norm for stim matrix\n aX = A # * norm(X,'fro'); # ... or not\n # Need to decide for final whether aX**2 or not\n if square_alpha:\n Dx = Sx/(Sx**2 + aX**2) \n else:\n Dx = Sx/(Sx**2 + aX) \n # Compute predicitons (XvalVx and UxYchunk computed above)\n # (mult diag is slightly faster than matrix multiplication in timing tests)\n pred = _utils.mult_diag(Dx, XvalVx, left=False).dot(UxYchunk) \n # Compute prediction accuracy (correlations)\n cc[ChIdx,iA]=_sutils.column_corr(pred,val_data[:,ChIdx])\n if return_resids:\n resids[:,ChIdx,iA] = val_data[:,ChIdx]-pred\n if return_resids:\n return cc,resids\n else:\n return cc", "def test_SIS():\r\n def is_symmetric_mode(beta, k0, g, a_over_d, h):\r\n \"\"\"\r\n Eq (7i) of paper\r\n beta is what I call kx\r\n k0 is vacuum angular wavenumber\r\n g is thickness of air layer\r\n h is thickness of corrugated layer\r\n a_over_d is the fraction of corrugated layer which is air\r\n \"\"\"\r\n lhs = ((cmath.sqrt(beta**2 - k0**2) / k0)\r\n * cmath.tanh(g/2 * cmath.sqrt(beta**2 - k0**2)))\r\n rhs = a_over_d * cmath.tan(k0 * h)\r\n return floats_are_equal(lhs, rhs, tol=1e-4)\r\n\r\n def is_antisymmetric_mode(beta, k0, g, a_over_d, h):\r\n \"\"\"\r\n Eq (7ii) of paper\r\n \"\"\"\r\n lhs = ((cmath.sqrt(beta**2 - k0**2) / k0)\r\n / cmath.tanh(g/2 * cmath.sqrt(beta**2 - k0**2)))\r\n rhs = a_over_d * cmath.tan(k0 * h)\r\n return floats_are_equal(lhs, rhs, tol=1e-4)\r\n # Choose some parameters (can be anything, these are from Fig. 3 caption)\r\n w = 2 * pi * (4 * nu.THz)\r\n h = 50 * nu.um\r\n g = 50 * nu.um\r\n a_over_d = 0.1\r\n \r\n # Now run analysis\r\n k0 = w / nu.c0\r\n d_over_a = a_over_d**-1\r\n # epsilon of a PEC (perfect electric conductor) is -infinity, but code\r\n # doesn't allow that. Use big value instead...\r\n PEC_eps = -1e11\r\n params = {'d_list': [inf, h, g, h, inf],\r\n 'ex_list': [PEC_eps, d_over_a, 1, d_over_a, PEC_eps],\r\n 'ez_list': [PEC_eps, PEC_eps, 1, PEC_eps, PEC_eps],\r\n 'mu_list': [1, a_over_d, 1, a_over_d, 1],\r\n 'w': w}\r\n \r\n kx_list = find_kx(params, grid_points=30, iterations=11, reduction_factor=14,\r\n plot_full_region=True,\r\n search_domain=[-1e5 * nu.m**-1, 1e5 * nu.m**-1, 0, 1e5 * nu.m**-1])\r\n \r\n print('kx_list -- ' + str(len(kx_list)) + ' entries...')\r\n print(['(%.5g+%.5gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1)\r\n for kx in kx_list])\r\n # Here, I'm only interested in solutions on the positive real axis\r\n kx_list = [kx for kx in kx_list if abs(kx.real) > 1e5 * abs(kx.imag)]\r\n kx_list = [-kx if kx.real < 0 else kx for kx in kx_list]\r\n # Delete repeats with tolerance 1e-4\r\n kx_list_norepeat = []\r\n for kx in kx_list:\r\n if not any(floats_are_equal(kx, kx2, tol=1e-4) for kx2 in kx_list_norepeat):\r\n kx_list_norepeat.append(kx)\r\n kx_list = kx_list_norepeat\r\n print('kx_list (cleaned up) -- ' + str(len(kx_list)) + ' entries...')\r\n print(['(%.5g+%.5gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1)\r\n for kx in kx_list])\r\n found_sym_mode = False\r\n for kx in kx_list:\r\n if is_symmetric_mode(kx, k0, g, a_over_d, h):\r\n found_sym_mode = True\r\n print('Found symmetric mode! ',\r\n '(%.5g+%.5gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1))\r\n params2 = deepcopy(params)\r\n params2['kx'] = kx\r\n params2 = find_all_params_from_kx(params2)\r\n if check_mode(params2) is not True:\r\n print('Not a real mode? ... Error code:')\r\n print(check_mode(params2))\r\n else:\r\n plot_mode(params2)\r\n assert found_sym_mode\r\n found_anti_mode = False\r\n for kx in kx_list:\r\n if is_antisymmetric_mode(kx, k0, g, a_over_d, h):\r\n found_anti_mode = True\r\n print('Found antisymmetric mode! ',\r\n '(%.5g+%.5gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1))\r\n params2 = deepcopy(params)\r\n params2['kx'] = kx\r\n params2 = find_all_params_from_kx(params2)\r\n if check_mode(params2) is not True:\r\n print('Not a real mode? ... Error code:')\r\n print(check_mode(params2))\r\n else:\r\n plot_mode(params2)\r\n assert found_anti_mode\r\n \r\n print('Congratulations, the solver found the correct kx for both the')\r\n print('symmetric and antisymmetric mode of the structure, consistent')\r\n print('with the analytical formula in the literature.')", "def find_optimal_low_rank_matrix( self, orig_similarity_matrix, orig_rank, u, s, v, singular_reduction ):\n '''rank_list = list()\n sum_singular_values = list()\n for rank in range( 0, orig_rank ):\n compute_result = self.compute_low_rank_matrix( u, s, v, rank + 1 )\n rank_list.append( ( rank + 1 ) / float( orig_rank ) )\n sum_singular_values.append( compute_result[ 1 ] )\n utils._plot_singular_values_rank( rank_list, sum_singular_values )'''\n return self.compute_low_rank_matrix( u, s, v, int( singular_reduction * orig_rank ) )", "def _rdm12_lowfilling(self, bradata: Optional['FqeData'] = None\n ) -> Tuple['Nparray', 'Nparray']:\n norb = self.norb()\n nalpha = self.nalpha()\n nbeta = self.nbeta()\n lena = self.lena()\n lenb = self.lenb()\n nlt = norb * (norb + 1) // 2\n\n outpack = numpy.zeros((nlt, nlt), dtype=self.coeff.dtype)\n outunpack = numpy.zeros((norb, norb, norb, norb),\n dtype=self.coeff.dtype)\n if nalpha - 2 >= 0:\n alpha_map, _ = self._core.find_mapping(-2, 0)\n alpha_array = self._to_array1(alpha_map, norb)\n\n def compute_intermediate0(coeff):\n tmp = numpy.zeros((nlt, int(binom(norb, nalpha - 2)), lenb),\n dtype=self.coeff.dtype)\n _apply_array12_lowfillingaa(self.coeff, alpha_array, tmp)\n return tmp\n\n inter = compute_intermediate0(self.coeff)\n inter2 = inter if bradata is None else compute_intermediate0(\n bradata.coeff)\n outpack += numpy.tensordot(inter2.conj(),\n inter,\n axes=((1, 2), (1, 2)))\n\n if self.nalpha() - 1 >= 0 and self.nbeta() - 1 >= 0:\n alpha_map, beta_map = self._core.find_mapping(-1, -1)\n inter = numpy.zeros((norb, norb, int(binom(\n norb, nalpha - 1)), int(binom(norb, nbeta - 1))),\n dtype=self._dtype)\n\n alpha_array = self._to_array2(alpha_map, norb)\n beta_array = self._to_array2(beta_map, norb)\n\n alpha_map, beta_map = self._core.find_mapping(-1, -1)\n _apply_array12_lowfillingab(self.coeff, alpha_array, beta_array,\n nalpha, nbeta, inter)\n\n if bradata is None:\n inter2 = inter\n else:\n inter2 = numpy.zeros((norb, norb, int(binom(\n norb, nalpha - 1)), int(binom(norb, nbeta - 1))),\n dtype=self._dtype)\n _apply_array12_lowfillingab(bradata.coeff, alpha_array, beta_array, \\\n nalpha, nbeta, inter2)\n\n # 0.25 needed since _apply_array12_lowfillingab adds a factor 2\n outunpack += numpy.tensordot(\n inter2.conj(), inter, axes=((2, 3), (2, 3))) * 0.25\n\n if self.nbeta() - 2 >= 0:\n _, beta_map = self._core.find_mapping(0, -2)\n beta_array = self._to_array1(beta_map, norb)\n\n def compute_intermediate2(coeff):\n tmp = numpy.zeros((nlt, lena, int(binom(norb, nbeta - 2))),\n dtype=self.coeff.dtype)\n _apply_array12_lowfillingaa(self.coeff,\n beta_array,\n tmp,\n alpha=False)\n\n return tmp\n\n inter = compute_intermediate2(self.coeff)\n inter2 = inter if bradata is None else compute_intermediate2(\n bradata.coeff)\n outpack += numpy.tensordot(inter2.conj(),\n inter,\n axes=((1, 2), (1, 2)))\n\n out = numpy.zeros_like(outunpack)\n for i in range(norb):\n for j in range(norb):\n ij = min(i, j) + max(i, j) * (max(i, j) + 1) // 2\n parityij = 1.0 if i < j else -1.0\n for k in range(norb):\n for l in range(norb):\n parity = parityij * (1.0 if k < l else -1.0)\n out[i, j, k,\n l] -= outunpack[i, j, k, l] + outunpack[j, i, l, k]\n mnkl, mxkl = min(k, l), max(k, l)\n work = outpack[ij, mnkl + mxkl * (mxkl + 1) // 2]\n out[i, j, k, l] -= work * parity\n\n return self.rdm1(bradata)[0], out", "def problem2(self, s):\n \n points = self.neighbor(100, 10, s.exhaustive_search)\n points += self.neighbor(10, 100, s.exhaustive_search)\n points += 1\n\n _testDriver.get_code(s.exhaustive_search)\n print \"\\n(Check that scipy.spatial.KDTree is not used)\"\n points *= self.grade(1)\n\n return points", "def SwissRollWithConstrain(nei = [5,25,50]):\n n_samples = 4000\n n_neighbor = 60\n noise = 0\n X, _ = make_swiss_roll(n_samples, noise=noise, random_state=42)\n X = X*2 #scaling ths Swiss\n\n neigh = NearestNeighbors(n_neighbors=n_neighbor).fit(X)\n _, indxes = neigh.kneighbors(X)\n\n SwissConstrain = np.delete(X,indxes[1500,:], axis=0)\n SwissConstrainNoisy = SwissConstrain + np.random.normal(0,1,[n_samples-n_neighbor,3])\n\n elevation = 10\n azimoth = 60\n fig = plt.figure(figsize=(21,7))\n ax1 = fig.add_subplot(131, projection='3d')\n ax1.set_zlim(-30, 30)\n ax1.scatter(X[:, 0], X[:, 1], X[:, 2], c=np.linalg.norm((X[:, 0], X[:, 1]), axis=0))\n ax1.set_title('Swiss Roll')\n ax1.view_init(elev=elevation, azim=azimoth)\n ax1 = fig.add_subplot(132, projection='3d')\n ax1.set_zlim(-30, 30)\n ax1.scatter(SwissConstrain[:, 0], SwissConstrain[:, 1], SwissConstrain[:, 2],\n c=np.linalg.norm((SwissConstrain[:, 0], SwissConstrain[:, 1]), axis=0))\n ax1.set_title('Swiss Roll with constrain')\n ax1.view_init(elev=elevation, azim=azimoth)\n ax1 = fig.add_subplot(133, projection='3d')\n ax1.set_zlim(-30, 30)\n ax1.scatter(SwissConstrainNoisy[:, 0], SwissConstrainNoisy[:, 1], SwissConstrainNoisy[:, 2],\n c=np.linalg.norm((SwissConstrainNoisy[:, 0], SwissConstrainNoisy[:, 1]), axis=0))\n ax1.set_title('Noisy Swiss Roll with constrain')\n ax1.view_init(elev=elevation, azim=azimoth)\n plt.savefig('Swiss Roll with different petubations')\n\n DataToPlot = [X,SwissConstrain,SwissConstrainNoisy]\n DataName = ['Swiss ISOMAP','Swiss with constrain ISOMAP', 'Swiss with constrain and noise ISOMAP']\n\n # Ploting Swiss Isomapping\n for neighbors in nei:\n fig = plt.figure(figsize=(30, 10))\n for i, j in enumerate(DataToPlot):\n Swiss_isomap = Isomap(j, 2, neighbors)\n method = DataName[i]\n ax = fig.add_subplot(1, len(DataToPlot), i + 1)\n ax.scatter(Swiss_isomap[:, 0], Swiss_isomap[:, 1],\n c=np.linalg.norm((Swiss_isomap[:, 0], Swiss_isomap[:, 1]), axis=0), cmap=plt.cm.Spectral)\n ax.set_title('{} with {} Neighbours'.format(method, neighbors))\n # making_plot(Swiss_isomap, pallete=Swiss_isomap[:, 0:1], neighbors=neighbors, method=method) #An option to plot single graphs\n plt.savefig('Swiss ISOMAP embbeding for {} neighbour'.format(neighbors))\n\n DataName = ['Swiss LLE', 'Swiss with constrain LLE', 'Swiss with constrain and noise LLE']\n # Ploting Swiss LLE\n for neighbors in nei:\n fig = plt.figure(figsize=(30, 10))\n for i, j in enumerate(DataToPlot):\n Swiss_LLE = LLE(j, 2, neighbors)\n method = DataName[i]\n ax = fig.add_subplot(1, len(DataToPlot), i + 1)\n ax.scatter(Swiss_LLE[:, 0], Swiss_LLE[:, 1],\n c=np.linalg.norm((Swiss_LLE[:, 0], Swiss_LLE[:, 1]), axis=0), cmap=plt.cm.Spectral)\n ax.set_title('{} with {} Neighbours'.format(method, neighbors))\n # making_plot(Swiss_LLE, pallete=Swiss_LLE[:, 0:1], neighbors=neighbors, method=method) #An option to plot single graphs\n plt.savefig('Swiss LLE embbeding for {} neighbour'.format(neighbors))\n return", "def perf_adaptive(R2_true,converted_adaptive_file,epsilon):\n assert(len(converted_adaptive_file)==2)\n pop = converted_adaptive_file[1][0]\n region = converted_adaptive_file[1][1]\n eps_adapt = converted_adaptive_file[1][2]\n n_adapt = converted_adaptive_file[1][3]\n blen_adapt = converted_adaptive_file[1][4]\n assert((epsilon > 0) & (epsilon <= 1.0))\n n = R2_true.shape[0]\n mat = converted_adaptive_file[0][:n,:n]\n m = mat.shape[0]\n assert(n==m)\n R2_x = np.tril(R2_true,k=-1)\n nonzero_r2_true = R2_x[R2_x > 0.0]\n nonzero_r2_adaptive = mat[mat > 0.0]\n corrcoef = corrcoef_PxP(R2_true,mat)\n return (pop,region,eps_adapt,n_adapt,blen_adapt,epsilon,np.sum(nonzero_r2_adaptive > epsilon),np.sum(nonzero_r2_true > epsilon),\n np.count_nonzero(nonzero_r2_adaptive),np.count_nonzero(nonzero_r2_true),corrcoef)", "def find_results(data,weight_matrix,params):\r\n \r\n data = data.astype(np.float32)\r\n weight_matrix = weight_matrix.astype(np.float32)\r\n \r\n rank = params['rank']\r\n lamb = params['lambda']\r\n lr = params['lr']\r\n hidden_pairs = params['hidden_pairs']\r\n cost_functions.lamb = lamb\r\n\r\n f = cost_functions.frobenius \r\n V_masked = create_mask(data,hidden_pairs)\r\n bool_mask = V_masked.notnull().values\r\n tf_mask = tf.Variable(bool_mask)\r\n \r\n V = tf.constant(V_masked.values)\r\n laplacian_matrix = laplacian(weight_matrix).astype(np.float32)\r\n W, H = init_W_H(V.shape, rank=rank)\r\n WH = tf.matmul(W, H)\r\n L = tf.constant(laplacian_matrix)\r\n WTLW = tf.matmul(tf.matmul(tf.transpose(W), L), W)\r\n\r\n cost = f(V, tf_mask, WH, WTLW)\r\n train_step = tf.train.ProximalGradientDescentOptimizer(lr).minimize(cost)\r\n init = tf.global_variables_initializer()\r\n clip = get_clip(W, H)\r\n\r\n sess = tf.Session()\r\n sess.run(init)\r\n\r\n previous_cost = sess.run(cost)\r\n sess.run(train_step)\r\n sess.run(clip)\r\n initial_difference = previous_cost - sess.run(cost)\r\n\r\n matrix_errors = []\r\n graph_errors = []\r\n imputation_error = []\r\n\r\n learnt_W = sess.run(W).astype(np.float32)\r\n learnt_H = sess.run(H).astype(np.float32)\r\n imputation_norm = np.linalg.norm((data - learnt_W.dot(learnt_H))[~bool_mask])\r\n \r\n i = 0\r\n while np.isfinite(sess.run(cost)) and previous_cost-sess.run(cost) > TARGET_DIFFERENCE * initial_difference and i<=max_iterations:\r\n previous_cost = sess.run(cost)\r\n sess.run(train_step)\r\n sess.run(clip)\r\n matrix_errors.append(sess.run(cost_functions.matrix_cost))\r\n graph_errors.append(sess.run(cost_functions.graph_cost))\r\n i+=1\r\n\r\n learnt_W = sess.run(W).astype(np.float32)\r\n learnt_H = sess.run(H).astype(np.float32)\r\n\r\n imputation_norm = np.linalg.norm((data - learnt_W.dot(learnt_H))[~bool_mask])\r\n imputation_error.append(imputation_norm)\r\n\r\n return {'imputation_error':imputation_norm,'W':sess.run(W),'H':sess.run(H),\r\n 'graph_error':graph_errors,'matrix_error':matrix_errors,'imputation_error_list':imputation_error}", "def fAVM(RHOB,Dw,Ds,Df,Dc1,PHIc1,Ck,Dk,PHIk,RSK):\n#\n# 5.1.1 Initialise Outputs & Check for missing values in inputs:\n# --------------------------------------------------------------\n\tPHIt=MissingValue\n\tPHIe=MissingValue\n\tCBW=MissingValue\n\tBVW=MissingValue\n\tHCPV=MissingValue\n\tVf=MissingValue\n\tVs=MissingValue\n\tSwt=MissingValue\n\tSwe=MissingValue\n\tVc1=MissingValue\n\tVc2=MissingValue\n\tVc3=MissingValue\n\tVk=MissingValue\n\tToc=MissingValue\n\tQc=MissingValue\n\tGDen=MissingValue\n\tif MissingValue in (RHOB,Dw,Ds,Df,Dc1,PHIc1,Ck,Dk,PHIk,RSK):\n\t\treturn PHIt,PHIe,CBW,BVW,HCPV,Vf,Vs,Swt,Swe,Vc1,Vc2,Vc3,Vk,Toc,Qc,GDen\n#\n# 5.1.2 Initialise parameters:\n# ----------------------------\n\tNIter=0\n\tNIterMax=100\n\tErrIter=10000\n\tTolErrIter=0.0001\n\tIterEnd=0\n\tVk=0.000 # Initially assumme no kerogen\n\tDh=Df\n#\n#\t5.1.3 Start interative loop:\n#\t-----------------------------\n\twhile IterEnd==0:\n#\n# 5.5.3.1 Organic and Inorganic Component Density Values:\n# -------------------------------------------------------\n\t\tDBI=(1-PHIc1)*Dc1+(PHIc1*Dw) # Bulk Density of Inorganic Component\n\t\tDBO=(1-PHIk)*Dk+(PHIk*Dh)# Bulk Density of Organic Component\n#\n# 5.1.3.2 Compute Volume of Organic and Inorganic Component:\n# ----------------------------------------------------------\n\t\tVOR=(DBI-RHOB)/(DBI-DBO)\n\t\tVOR=ImposeLimits(VOR,0,1)\n\t\tVIN=(1-VOR)\n#\n# 5.1.3.3 Compute Volumetrics, Total & Effective Porosity and Total & Effective Water Saturation:\n# ---------------------------------------\t-------------------------------------------------------\n\t\tVc1=VIN*(1-PHIc1)\n\t\tVc2=0.000\n\t\tVc3=0.000\n\t\tVk=VOR*(1-PHIk)\n\t\tPHIt=VIN*PHIc1+VOR*PHIk\n\t\tPHIe=VOR*PHIk\n\t\tSwt=1-((VOR*PHIk)/PHIt)\n\t\tSwt=ImposeLimits(Swt,0,1)\n\t\tSwe=0.000\n\t\tSxot=Swt\n\t\tSxoe=Swe\n#\n# 5.1.3.4 Compute Bulk Volume of Water, Hydrocarbon Pore Volume and Pore Space Fluid Properties:\n# ---------------------------------------\t------------------------------------------------------\n\t\tBVW=PHIe*Swe\n\t\tHCPV=PHIe*(1-Swe)\n\t\tVs=RSK*Vk # Estimate volume of adsorbed (sorbed) hydrocarbon\n\t\tVs=ImposeLimits(Vs,0,HCPV)\n\t\tVf=(HCPV-Vs)\n\t\tVf=ImposeLimits(Vf,0,(HCPV-Vs))\n#\n# 5.1.3.5 Recompute hydrocarbon properties in the pore space:\n# -----------------------------------------------------------\n\t\tSum=Vs+Vf\n\t\tif(Sum<=0.000):\n\t\t\tDh=Df\n\t\telse:\n\t\t\tDh=(Ds*Vs+Df*Vf)/(Vs+Vf)\n#\n# 5.1.4 Test for interative computations:\n# ---------------------------------------\n\t\tNIter=NIter+1\n\t\tif(NIter>=NIterMax):\n\t\t\tIterEnd=1\n\t\telse:\t\t\t\n\t\t\tif(NIter<=2):\n\t\t\t\tResultOld=[1,1,1,1,1,1,1,1,1] # Initial Setting\n\t\t\t\tResultNew=[Vc1,Vc2,Vc3,Vk,Vs,Vf,PHIe,Swt,Swe] # Current Results\n\t\t\t\tErrIter=ComputeMatrixDifference(ResultOld,ResultNew)\n\t\t\t\tResultOld=ResultNew\n\t\t\telse:\n\t\t\t\tResultNew=[Vc1,Vc2,Vc3,Vk,Vs,Vf,PHIe,Swt,Swe] # Current Results\n\t\t\t\tErrIter=ComputeMatrixDifference(ResultOld,ResultNew)\n\t\t\t\tResultOld=ResultNew\n\t\t\t\tif(ErrIter<=TolErrIter):\n\t\t\t\t\tIterEnd=1\n#\n# 5.1.6 Preoutput computations:\n# ------------------------------\n\tQc=MissingValue\n\tDc2=0.00\n\tDc3=0.00\n\tCBW=PHIt-PHIe # The assumption is that all microporosity can be considered to be clay bound water.\n\tToc=fToc_Wtf(Vc1,Vc2,Vc3,Vk,0,Ck,Dc1,Dc2,Dc3,Dk,Dw) # TOC-wt fraction. Note: Vrw=0 in fToc_Wtf(Vc1,Vc2,Vc3,Vk,Vrw,Ck,Dc1,Dc2,Dc3,Dk,Dw)\n\tGDen=fOrmGDen(Vc1,Vc2,Vc3,Vk,0,Dc1,Dc2,Dc3,Dk,Dw) # Grain Density. Note: Vrw=0 in fOrmGDen(Vc1,Vc2,Vc3,Vk,Vrw,Dc1,Dc2,Dc3,Dk,Dw)\n#\n# 5.5.7 Output Results:\n# \t-------------------\n\treturn PHIt,PHIe,CBW,BVW,HCPV,Vf,Vs,Swt,Swe,Vc1,Vc2,Vc3,Vk,Toc,Qc,GDen", "def principal_strain(strain_tensor_data, k, sample_ID, initial_step, ch_list):\n\n\n k = str(k)\n it = int(initial_step)\n dir = [\"xx\",\"yy\",\"zz\",\"xy\",\"yz\",\"zx\"]\n ch = ch_list.loc[\"ch\",:]\n\n\n\n \"\"\" ~~~~~~~~~~input from data file~~~~~~~~~~~~~~~~~ \"\"\"\n\n sdata = strain_tensor_data\n time_p = sdata.loc[:,\"Elapsed Time\"] \n\n \"\"\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n time_n = time_p.values\n t = len(sdata.index)\n\n\n \"\"\" ~~~~~~~~~~Create strain tensor ~~~~~~~~~~~~~~~~~ \"\"\"\n\n stensor = np.empty((t,3,3))\n for i in range(0,t):\n strain = sdata.loc[i+1, dir]\n\n s1 = strain.at[\"xx\"]\n s2 = strain.at[\"xy\"]\n s3 = strain.at[\"zx\"]\n s4 = strain.at[\"yy\"]\n s5 = strain.at[\"yz\"]\n s6 = strain.at[\"zz\"]\n\n stensor[i,:,:] = np.array([[s1,s2,s3],\n [s2,s4,s5],\n [s3,s5,s6]])\n\n \"\"\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n w,v = LA.eigh(stensor) #calculate eigen vectors & eigenvalues\n\n\n \"\"\" ~~~~~~~~~~ Output data ~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n time = time_n[it:]\n\n w = w[it:,:]\n v = v[it:,:,:]\n\n\n v1 = v[:,:,2]\n v2 = v[:,:,1]\n v3 = v[:,:,0]\n\n\n w_ave = np.mean(w, axis=0)\n v_ave = np.mean(v, axis=0)\n\n v1_ave = v_ave[:,2]\n v2_ave = v_ave[:,1]\n v3_ave = v_ave[:,0]\n \"\"\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n\n def plunge_trend(n):\n \n norm = np.linalg.norm(n)\n n = n/norm\n \n x = n[0]\n y = n[1]\n z = n[2]\n \n plunge = np.arcsin(z) \n \n if x == 0 and y > 0:\n trend = pi*0.5\n elif x == 0 and y < 0:\n trend = pi*1.5\n elif x > 0 and y == 0:\n trend = 0\n elif x < 0 and y == 0:\n trend = pi\n elif x == 0 and y == 0:\n trend = 0\n else:\n trend = np.arctan(abs(y/x))\n \n if x > 0 and y>0:\n trend = trend \n elif x > 0 and y< 0:\n trend = 2*pi - trend\n elif x <0 and y <0:\n trend = 1.5*pi - trend\n elif x <0 and y >0:\n trend = trend + 0.5*pi\n \n plunge = np.rad2deg(plunge)\n trend = np.rad2deg(trend)\n return plunge, trend\n\n\n def plot_schmidt(ax, plunge, trend, style, label = \"\", markersize = 30, alpha = 1):\n if plunge >= 0:\n ax.line(plunge, trend, style,label = label, markersize = markersize, alpha = alpha)\n elif plunge < 0:\n ax.line(-plunge, trend, style,label = label, markerfacecolor = \"#ffffff\", markersize = markersize, alpha = alpha)\n\n\n fig = plt.figure(figsize=(30,30))\n ax = fig.add_subplot(3,1,1,projection=\"stereonet\")\n ax.set_azimuth_ticklabels([\"N\",\"\",\"E\",\"\",\"S\",\"\",\"W\"])\n ax.grid(which=\"both\")\n \"\"\" ~~~~~~~~~~ Lower-himisphere Schmidt net plot of principal strain directions ~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n for i in range(1, len(time)):\n plunge111, trend111 = plunge_trend(v1[i,:])\n plot_schmidt(ax,plunge111,trend111, \"ro\", markersize=5)\n\n plunge112, trend112 = plunge_trend(v2[i,:])\n plot_schmidt(ax,plunge112,trend112, \"go\", markersize=5)\n\n plunge113, trend113 = plunge_trend(v3[i,:])\n plot_schmidt(ax,plunge113,trend113, \"bo\", markersize=5)\n\n\n plunge1, trend1 = plunge_trend(v1[0,:])\n plot_schmidt(ax,plunge1,trend1, \"r^\",markersize =20)\n\n plunge2, trend2 = plunge_trend(v2[0,:])\n plot_schmidt(ax,plunge2,trend2, \"g^\",markersize =20)\n\n plunge3, trend3 = plunge_trend(v3[0,:])\n plot_schmidt(ax,plunge3,trend3, \"b^\",markersize =20)\n\n\n plunge1, trend1 = plunge_trend(v1[-1,:])\n plot_schmidt(ax,plunge1,trend1, \"ro\",markersize =20)\n\n plunge2, trend2 = plunge_trend(v2[-1,:])\n plot_schmidt(ax,plunge2,trend2, \"go\",markersize =20)\n\n plunge3, trend3 = plunge_trend(v3[-1,:])\n plot_schmidt(ax,plunge3,trend3, \"bo\",markersize =20)\n\n \"\"\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n \"\"\" ~~~~~~~~~~ Lower-himisphere Schmidt net plot of averaged principal strain directions ~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n plunge1, trend1 = plunge_trend(v1_ave)\n plot_schmidt(ax,plunge1,trend1, \"r*\",markersize =20, label = \"$\\sigma_1$\")\n\n plunge2, trend2 = plunge_trend(v2_ave)\n plot_schmidt(ax,plunge2,trend2, \"g*\",markersize =20,label = \"$\\sigma_2$\")\n\n plunge3, trend3 = plunge_trend(v3_ave)\n plot_schmidt(ax,plunge3,trend3, \"b*\", markersize =20,label = \"$\\sigma_3$\")\n\n ax.legend(bbox_to_anchor = (1.2, 1), loc=\"upper left\")\n \"\"\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n \n fig.text(0.15,0.7,ch)\n\n\n \"\"\" ~~~~~~~~~~ Plot of max & min horizontal strain directions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n \n zr = np.empty((360,1))\n for i in range(0,360):\n th_deg = i\n th = th_deg*pi*180**(-1) \n\n vector = np.array([[np.cos(th)],[np.sin(th)],[0]])\n sstensor = stensor[-1,:,:]\n z = sstensor.dot(vector)\n zz = vector.T.dot(z)\n zr[i] = zz\n\n th_max = zr.argmax()\n th_min = zr.argmin()\n\n #th_max = th_max*pi*180**(-1) \n #th_min = th_min*pi*180**(-1) \n\n #n_max_1 = np.array([[np.cos(th_max)],[np.sin(th_max)],[0]])\n #n_max_2 = np.array([[np.cos(th_max+pi)],[np.sin(th_max+pi)],[0]])\n\n #n_min_1 = np.array([[np.cos(th_min)],[np.sin(th_min)],[0]])\n #n_min_2 = np.array([[np.cos(th_min+pi)],[np.sin(th_min+pi)],[0]])\n\n plunge11, trend11 = 0, th_max\n plunge12, trend12 = 0, th_max+180\n #plunge11, trend11 = plunge_trend(n_max_1)\n #plunge12, trend12 = plunge_trend(n_max_2)\n plot_schmidt(ax,plunge11,trend11, \"rD\",markersize =30)\n plot_schmidt(ax,plunge12,trend12, \"rD\",markersize =30)\n\n plunge22, trend22 = 0, th_min\n plunge23, trend23 = 0, th_min + 180\n #plunge22, trend22 = plunge_trend(n_min_1)\n #plunge23, trend23 = plunge_trend(n_min_2)\n plot_schmidt(ax,plunge22,trend22, \"bD\",markersize =30)\n plot_schmidt(ax,plunge23,trend23, \"bD\",markersize =30)\n\n \"\"\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n\n \"\"\" ~~~~~~~~~~ Plot of time change of principal strain magnitudes ~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n ax1 = fig.add_subplot(3,1,2)\n w1 = w[:,2]-w[0,2]\n w2 = w[:,1]-w[0,1]\n w3 = w[:,0]-w[0,0]\n time = time[:]-time[0]\n\n\n ax1.plot(time,w1,label=\"$\\epsilon_1$\")\n ax1.plot(time,w2,label=\"$\\epsilon_2$\")\n ax1.plot(time,w3,label=\"$\\epsilon_3$\")\n ax1.set(xlabel=\"Elapsed Time[h]\",ylabel=\"Strain[$\\mu$strain]\")\n ax1.legend()\n\n \"\"\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n\n \"\"\" ~~~~~~~~~~ Plot of time change of principal strain magnitudes ratios ~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n ax2 = fig.add_subplot(3,1,3)\n w1 = w1[1:]\n w2 = w2[1:]\n w3 = w3[1:]\n time1 = time[1:]\n \n w21 = w2/w1\n w31 = w3/w1\n\n ax2.plot(time1,w21,label=\"$\\epsilon_2$/$\\epsilon_1$\")\n ax2.plot(time1,w31,label=\"$\\epsilon_3$/$\\epsilon_1$\")\n ax2.set(xlabel=\"Elapsed Time[h]\")\n ax2.legend()\n\n \"\"\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n fig.suptitle(sample_ID+\"_\"+k,fontsize=\"large\", fontweight=\"bold\")\n fig.savefig(\"result_\"+sample_ID+\"_\"+k+\".png\")\n plt.close(fig)\n\n return w, v", "def estimate_fundamental_matrix(points_a, points_b):\n mean_a = np.mean(points_a, axis=0)\n mean_b = np.mean(points_b, axis=0)\n std_a = np.std(points_a, axis=0)\n std_b = np.std(points_b, axis=0)\n T_a = np.asarray([[1.0/std_a[0], 0, -mean_a[0]/std_a[0]],\n [0, 1.0/std_a[1], -mean_a[1]/std_a[1]],\n [0, 0, 1]])\n T_b = np.asarray([[1.0/std_b[0], 0, -mean_b[0]/std_b[0]],\n [0, 1.0/std_b[1], -mean_b[1]/std_b[1]],\n [0, 0, 1]])\n points_a = np.hstack((points_a, np.ones((len(points_a), 1)))).T\n points_b = np.hstack((points_b, np.ones((len(points_b), 1)))).T\n points_a = np.dot(T_a, points_a)[:2].T\n points_b = np.dot(T_b, points_b)[:2].T\n\n A = []\n for pa, pb in zip(points_a, points_b):\n ua, va = pa\n ub, vb = pb\n A.append([ua*ub, va*ub, ub, ua*vb, va*vb, vb, ua, va, 1])\n A = np.vstack(A)\n _, _, Vt = np.linalg.svd(A)\n F = Vt[-1, :].reshape((3, 3))\n\n # enforce the singularity constraint\n U, D, Vt = np.linalg.svd(F)\n D[-1] = 0\n F = np.dot(np.dot(U, np.diag(D)), Vt)\n\n F = np.dot(np.dot(T_b.T, F), T_a)\n\n return F", "def explanation(self, instance):\r\n \"\"\"\r\n Args:\r\n instance: [numpy.array or sparse matrix] instance on which \r\n to explain the model prediction\r\n \r\n Returns:\r\n A tuple (explanation_set[0:self.max_explained], number_active_elements, \r\n number_explanations, minimum_size_explanation, time_elapsed, where:\r\n \r\n explanation_set: explanation(s) ranked from high to low change in predicted score or probability.\r\n The number of explanations shown depends on the argument max_explained.\r\n \r\n number_active_elements: number of active elements of the instance of interest.\r\n \r\n number_explanations: number of explanations found by algorithm.\r\n \r\n minimum_size_explanation: number of features in the smallest explanation.\r\n \r\n time_elapsed: number of seconds passed to generate explanation(s).\r\n \r\n explanations_score_change: change in predicted score/probability when removing\r\n the features in the explanation, ranked from high to low change.\r\n \"\"\"\r\n \r\n# *** INITIALIZATION ***\r\n \r\n time_max=0\r\n tic=time.time()\r\n instance=lil_matrix(instance)\r\n iteration=0\r\n nb_explanations=0\r\n minimum_size_explanation=np.nan\r\n explanations=[]\r\n explanations_sets=[]\r\n explanations_score_change=[]\r\n \r\n class_index = np.argmax(self.classifier_fn_multiclass(instance))\r\n score_predicted = self.classifier_fn_multiclass(instance)[class_index] \r\n #a tuple of predicted scores of one vs rest\r\n #get predicted score for the class that is predicted\r\n \r\n indices_active_elements=np.nonzero(instance)[1]\r\n number_active_elements=len(indices_active_elements)\r\n indices_active_elements=indices_active_elements.reshape((number_active_elements,1))\r\n threshold=-1\r\n stop=0\r\n expanded_combis=[]\r\n \r\n #use orderedset() \r\n combinations_to_expand=[]\r\n for features in indices_active_elements:\r\n combinations_to_expand.append(OrderedSet(features))\r\n #in the first iteration, the new combinations to explore\r\n #whether it are explanations are the combinations_to_expand\r\n new_combinations=combinations_to_expand.copy() \r\n \r\n #indices of active features are the feature set to explore\r\n feature_set=[]\r\n for features in indices_active_elements:\r\n feature_set.append(frozenset(features))\r\n \r\n time_max += (time.time()-tic)\r\n \r\n print('Initialization complete.')\r\n print('\\n Elapsed time %d \\n' %(time.time()-tic))\r\n\r\n while (iteration < self.max_iter) and (nb_explanations < self.max_explained) and (len(combinations_to_expand)!=0) and (len(new_combinations)!=0) and (time_max<(self.time_maximum)): \r\n \r\n time_extra=time.time()\r\n \r\n iteration+=1\r\n print('\\n Iteration %d \\n' %iteration)\r\n \r\n new_combinations_to_expand=[]\r\n scores_new_combinations_to_expand=[]\r\n for combination in new_combinations: #verify each set in new_combinations if it is an explanation or not\r\n perturbed_instance=instance.copy()\r\n for feature_in_combination in combination: \r\n perturbed_instance[:,feature_in_combination]=0\r\n score_new = self.classifier_fn_multiclass(perturbed_instance)[class_index]\r\n \r\n if (score_new[0] != np.max(self.classifier_fn_multiclass(perturbed_instance))): #if class_index has no longer the top predicted score, an explanation is found.\r\n explanations.append(combination)\r\n explanations_sets.append(set(combination))\r\n explanations_score_change.append(score_predicted - score_new)\r\n nb_explanations+=1\r\n else:\r\n new_combinations_to_expand.append(combination)\r\n scores_new_combinations_to_expand.append(score_new)\r\n \r\n if (len(new_combinations[0]) == number_active_elements): \r\n stop=1\r\n else:\r\n stop=0 \r\n \r\n if (self.BB==True): #branch-and-bound\r\n if (len(explanations)!=0):\r\n lengths=[]\r\n for explanation in explanations:\r\n lengths.append(len(explanation))\r\n lengths=np.array(lengths)\r\n max_length=lengths.min() \r\n else: \r\n max_length=number_active_elements \r\n else: \r\n max_length=number_active_elements\r\n \r\n if (len(scores_new_combinations_to_expand) != 0):\r\n index_combi_max = np.argmax(score_predicted - scores_new_combinations_to_expand) #best-first combination or feature is chosen.\r\n new_score = scores_new_combinations_to_expand[index_combi_max]\r\n difference = score_predicted - new_score\r\n if difference[0] >= threshold:\r\n expand = 1\r\n else:\r\n expand = 0\r\n else:\r\n expand = 0\r\n\r\n if ((len(new_combinations[0]) < max_length) and (expand == 1) and (stop==0) and (nb_explanations < self.max_explained) and (len(new_combinations[0]) < self.max_features)): \r\n \r\n print('length of new_combinations is %d features.' %len(new_combinations[0]))\r\n print('new combinations can be expanded')\r\n \r\n comb=new_combinations_to_expand[index_combi_max]\r\n func=fn_1(comb, expanded_combis, feature_set, combinations_to_expand, explanations_sets)\r\n new_combinations=func[0]\r\n combinations_to_expand=func[1]\r\n expanded_combis=func[2]\r\n \r\n #Calculate new threshold\r\n scores_combinations_to_expand=[]\r\n for combination in combinations_to_expand:\r\n perturbed_instance=instance.copy()\r\n for feature_in_combination in combination:\r\n perturbed_instance[:,feature_in_combination]=0\r\n score_new = self.classifier_fn_multiclass(perturbed_instance)[class_index]\r\n \r\n if (score_new[0] == np.max(self.classifier_fn_multiclass(perturbed_instance))):\r\n scores_combinations_to_expand.append(score_new)\r\n \r\n index_combi_max = np.argmax(score_predicted - scores_combinations_to_expand)\r\n new_score = scores_combinations_to_expand[index_combi_max]\r\n threshold = score_predicted - new_score\r\n \r\n time_extra2=time.time()\r\n time_max+=(time_extra2-time_extra)\r\n print('\\n Elapsed time %d \\n' %time_max)\r\n size_COMBIS=len(combinations_to_expand)\r\n print('\\n size combis to expand %d \\n' %size_COMBIS)\r\n \r\n else:\r\n \r\n print('length of new_combinations is %d features.' %len(new_combinations[0]))\r\n print('new combination cannot be expanded')\r\n \r\n combinations=[]\r\n for combination in combinations_to_expand:\r\n if ((len(combination) < number_active_elements) and (len(combination) < (max_length)) and (len(combination) < self.max_features)):\r\n combinations.append(combination)\r\n \r\n if (len(combinations) == 0) or (nb_explanations >= self.max_explained) or (len(combinations_to_expand) == len(new_combinations)):\r\n new_combinations=[]\r\n \r\n elif (len(combinations) != 0):\r\n \r\n new_combinations=[]\r\n it=0\r\n indices=[]\r\n new_score=0\r\n combinations_to_expand_copy = combinations.copy()\r\n \r\n scores_combinations_to_expand2=[]\r\n for combination in combinations_to_expand_copy:\r\n perturbed_instance=instance.copy()\r\n for feature_in_combination in combination:\r\n perturbed_instance[:,feature_in_combination]=0\r\n score_new = self.classifier_fn_multiclass(perturbed_instance)[class_index]\r\n \r\n if (score_new[0] != np.max(self.classifier_fn_multiclass(perturbed_instance))):\r\n scores_combinations_to_expand2.append(2 * score_predicted)\r\n else:\r\n scores_combinations_to_expand2.append(score_new)\r\n \r\n while ((len(new_combinations) == 0) and (it<len(scores_combinations_to_expand2)) and ((time_max+(time.time() - time_extra))<self.time_maximum)):\r\n \r\n print('while loop %d' %it)\r\n \r\n if (it!=0):\r\n for index in indices:\r\n scores_combinations_to_expand2[index]= 2 * score_predicted\r\n #to make sure this index is never chosen again\r\n \r\n index_combi_max=np.argmax(score_predicted - scores_combinations_to_expand2) #best-first combi\r\n indices.append(index_combi_max)\r\n \r\n comb=combinations_to_expand_copy[index_combi_max]\r\n func=fn_1(comb, expanded_combis, feature_set, combinations_to_expand_copy, explanations_sets)\r\n new_combinations=func[0]\r\n combinations_to_expand=func[1]\r\n expanded_combis=func[2]\r\n \r\n #Calculate new threshold\r\n scores_combinations_to_expand=[]\r\n for combination in combinations_to_expand:\r\n perturbed_instance=instance.copy()\r\n for feature_in_combination in combination:\r\n perturbed_instance[:,feature_in_combination]=0\r\n score_new = self.classifier_fn_multiclass(perturbed_instance)[class_index]\r\n \r\n if (score_new[0] == np.max(self.classifier_fn_multiclass(perturbed_instance))):\r\n scores_combinations_to_expand.append(score_new) \r\n \r\n if (len(scores_combinations_to_expand)!=0): \r\n index_combi_max=np.argmax(score_predicted - scores_combinations_to_expand) #best-first combi\r\n new_score=scores_combinations_to_expand[index_combi_max]\r\n threshold=score_predicted - new_score\r\n it+=1 \r\n print('length of new_combinations is %d features.' %len(new_combinations))\r\n print('score_predicted minus new_score is %f.' %(score_predicted - new_score))\r\n \r\n time_max += (time.time()-time_extra)\r\n print('\\n Elapsed time %d \\n' %time_max)\r\n print('\\n size combis to expand %d \\n' %len(combinations_to_expand))\r\n\r\n print(\"iterations are done\") \r\n explanation_set=[]\r\n explanation_feature_names=[]\r\n for i in range(len(explanations)):\r\n explanation_feature_names=[]\r\n for features in explanations[i]:\r\n explanation_feature_names.append(self.feature_names[features])\r\n explanation_set.append(explanation_feature_names)\r\n \r\n if (len(explanations)!=0):\r\n lengths_explanation=[]\r\n for explanation in explanations:\r\n l=len(explanation)\r\n lengths_explanation.append(l)\r\n minimum_size_explanation=np.min(lengths_explanation)\r\n \r\n number_explanations=len(explanations)\r\n #show explanation in explanation set which is minimum in size and highest score change (delta)\r\n if (np.size(explanations_score_change)>1):\r\n inds=np.argsort(explanations_score_change, axis=0)\r\n inds = np.fliplr([inds])[0]\r\n inds_2=[]\r\n for i in range(np.size(inds)):\r\n inds_2.append(inds[i][0])\r\n explanation_set_adjusted=[]\r\n for i in range(np.size(inds)):\r\n j=inds_2[i]\r\n explanation_set_adjusted.append(explanation_set[j])\r\n explanations_score_change_adjusted=[]\r\n for i in range(np.size(inds)):\r\n j=inds_2[i]\r\n explanations_score_change_adjusted.append(explanations_score_change[j])\r\n explanation_set=explanation_set_adjusted\r\n explanations_score_change=explanations_score_change_adjusted\r\n \r\n toc=time.time()\r\n time_elapsed=toc-tic\r\n print('\\n Elapsed time %d \\n' %time_elapsed)\r\n\r\n return (explanation_set[0:self.max_explained], number_active_elements, number_explanations, minimum_size_explanation, time_elapsed, explanations_score_change[0:self.max_explained], iteration)", "def SolveSCP(self):\n\n t0 = time()\n\n # Some predicates\n Lu_min = 0.\n niters_max = self._maxiters\n maxfracchange = self._maxfracchange\n\n # initialization, resetting ...\n self.reset_all() # including _u_naught(), first application\n scp_min = self.greedy()\n\n # re-initialization iteration; col fixing ignored for the moment\n niters = 0\n f_change = _largenumber\n while (f_change>maxfracchange) and (niters<niters_max):\n # re-initialize u\n if (np.mod(niters, 2)==0): \n self.reset_u(random=True)\n else:\n self.reset_u()\n u_tmp, Lu_tmp = self.subgradient() # find a near-optimal solution \n u, Lu = self.subgradient() # rerun subgradient to get a set of Lagrangian multipliers\n\n scp_all = np.zeros(self._subg_nsteps)\n for i in np.arange(self._subg_nsteps):\n #self.reset_s()\n self.s = np.copy(self.f)\n scp_all[i] = self.greedy(u=u[:,i])\n\n # check if the solution is gettting better\n imin_tmp = (np.where(scp_all==np.amin(scp_all)))[0]\n imin = imin_tmp[np.argmax(Lu[imin_tmp])]\n imax = np.argmax(Lu)\n if (np.mod(niters, 5)==0):\n print(\"This Best solution: UB={0}, LB={1}, UB1={2}, LB1={3}\".format(scp_all[imin], Lu[imin], scp_all[imax], Lu[imax]))\n if (niters==0) or ((scp_all[imin]<=scp_min) and ((Lu[imin]-Lu_min)>-(np.fabs(Lu_min)*self._LB_maxfracchange))):\n scp_min = scp_all[imin]\n u_min = np.copy(u[:,imin])\n Lu_min = Lu[imin]\n self.stepsize = _stepsize\n\n LB = Lu_min\n\n # final step, needs to get u_min back\n self.u = np.copy(u_min)\n self.s = np.copy(self.f)\n UB = self.greedy()\n\n # Which is better? absolute change or fractional change? \n # Both are fine, but cost should be normalized over the mean/median.\n GAP = (UB-LB)/np.fabs(UB)\n f_change = GAP\n if (np.mod(niters, 5)==0):\n print(\"Current Best Solution: UB={0}, LB={1}, change={2}% @ niters={3}\".format(UB,LB,f_change*100.,niters))\n niters = niters + 1\n if (niters == niters_max): \n #warnings.warn(\"Iteration reaches maximum = {0}\".format(niters))\n print(\"Iteration in re-initialization reaches maximum number = {0}\".format(niters))\n\n # Need to remove redundant columns\n # self.remove_redundant() # this itself is NP-hard ...\n\n print(\"Current Best Solution: UB={0}, LB={1}, change={2}% @ niters={3}\".format(UB,LB,f_change*100.,niters))\n print(\"Final Best solution: {0}\".format(UB))\n time_used = (time()-t0)/60.\n print(\"Took {0:.3f} minutes to reach current solution.\".format(time_used))\n\n return (UB,time_used)", "def evaluate_all_points():\n start_time = timeit.default_timer()\n mua, vra = pgen.get_pdf()\n slack = ptsl.D\n\n all_alloc = list(itertools.product(range(1,ptsl.M+1),repeat=ptsl.NPH))\n riska = []\n f2 = open(\"risk-file-D216-NPH5.csv\",\"w\")\n f2.write(\"alloc1,alloc2,alloc3,alloc4,alloc5,risk,util\\n\")\n count = 0\n for a in all_alloc :\n a1, a2, a3, a4, a5 = a\n r = compute_risk(mua, vra, a, slack)\n \n if r > 0.00001 and r < 1 - 0.00001 :\n riska.append(r)\n util = a1 * mua[a1-1] + a2 * mua[a2-1] + a3 * mua[a3-1] + a4 * mua[a4-1] + a5 * mua[a5-1]\n f2.write(\"%d,%d,%d,%d,%d,%f,%f\\n\"%(a1,a2,a3,a4,a5,r,util))\n count = count + 1\n f2.close()\n np.save(\"stored_risk\",riska)\n elapsed = timeit.default_timer() - start_time\n print(\"Brute Force Evaluation Time for %d points : %fs\"%(count,elapsed))", "def method3(self):\n cres=0.\n Ux_aloc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy_aloc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.ALDM[ix ,iy, : , : ]\n mat2=self.ALDM[(ix%self.kS.Nx)+1, iy, : , : ]\n mat3=self.ALDM[ix ,(iy%self.kS.Ny)+1, : , : ]\n \n Ux_aloc[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat2)[self.NL-1:,self.NL-1:])\n Uy_aloc[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat3)[self.NL-1:,self.NL-1:])\n\n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux_aloc[ix,iy]*Uy_aloc[ix+1,iy]/Ux_aloc[ix,iy+1]/Uy_aloc[ix,iy])\n cres+=ftemp/2./pi/1j\n \n return cres.real\n #End of method3", "def test_rastrigin(self):\n rastrigin = get_problem('rastrigin', dimension=self.dimension)\n self.assertEqual(rastrigin(self.array), 0.0)", "def test_equivalence():\n\t\n\tfrom . import spectra as sp\n\t\n\t#analytic\n\tp_dict = {'Bfield':15000,'rb85frac':1,'Btheta':0*np.pi/180,'Bphi':0*np.pi/180,'lcell':1e-3,'T':84,'Dline':'D2','Elem':'Rb'}\n\tchiL1,chiR1,chiZ1 = sp.calc_chi([-18400],p_dict)\n\tRotMat1, n11, n21 = solve_diel(chiL1,chiR1,chiZ1,0,150,force_numeric=False)\n\t\n\t#numeric\n\tchiL2, chiR2, chiZ2 = chiL1, chiR1, chiZ1\n\t#chiL2,chiR2,chiZ2 = sp.calc_chi([-18400],p_dict)\n\tRotMat2, n12, n22 = solve_diel(chiL2,chiR2,chiZ2,0,150,force_numeric=True)\n\t\n\tprint('RM 1')\n\tprint(RotMat1)\n\n\tprint('RM 2')\n\tprint(RotMat2)\t\n\t\n\tprint('n1_1 (analytic)')\n\tprint(n11)\n\tprint('n1_2')\n\tprint(n12)\n\tprint('n2_1 (analytic)')\n\tprint(n21)\n\tprint('n2_2')\n\tprint(n22)\n\t\n\tprint('chi1')\n\tprint((chiL1, chiR1, chiZ1))\n\n\tprint('chi2')\n\tprint((chiL2, chiR2, chiZ2))", "def evaltest(x_solution,ntest,pred):\n \n large = 10.0**30\n e0 = 0.0\n y=0.0\n for i in range(ntest): # Computation of correct piece\n e0 += cfg.a_unscaled[cfg.ntrain+i][-1]\n pind = 0\n ipbest = 0\n pbest = -large # for max\n \n for j1 in range(cfg.nomax):\n ipmin=pind\n pmin=large # for min\n for _ in range(cfg.jk[j1]):\n piece=x_solution[(pind+1)*cfg.nfea-1] \n for j3 in range(cfg.nfea-1): #\n piece += x_solution[pind*cfg.nfea+j3]*cfg.a_unscaled[cfg.ntrain+i][j3]\n if piece < pmin:\n ipmin = pind\n pmin = piece\n pind += 1 \n \n if pmin > pbest:\n ipbest = ipmin\n pbest = pmin\n \n pred[i] = x_solution[(ipbest+1)*cfg.nfea-1] # Computation of prediction\n for j1 in range(cfg.nfea-1):\n pred[i] += x_solution[ipbest*cfg.nfea+j1]*cfg.a_unscaled[cfg.ntrain+i][j1]\n y += pred[i]\n \n y = y/ntest \n e0 = e0/ntest\n \n # Computation of indices\n rmse = 0.0\n mae = 0.0\n e1 = 0.0\n for i in range(ntest):\n rmse += (pred[i]-cfg.a_unscaled[cfg.ntrain+i][-1])**2\n mae += np.abs(pred[i]-cfg.a_unscaled[cfg.ntrain+i][-1]) \n e1 += (cfg.a_unscaled[cfg.ntrain+i][-1] - e0)**2\n ce = 1.0 - rmse/e1 \n rmse = np.sqrt(rmse/ntest)\n mae = mae/ntest\n\n if ntest > 1:\n sx=0.0\n sy=0.0\n rcor=0.0\n for i in range(ntest):\n sx += (pred[i]-y)**2\n sy += (cfg.a_unscaled[cfg.ntrain+i][-1]-e0)**2 \n rcor += (pred[i]-y) * (cfg.a_unscaled[cfg.ntrain+i][-1]-e0) \n\n r = rcor/np.sqrt(sx*sy)\n \n return pred,rmse,mae,ce,r", "def eight_point(points_lst):\r\n\r\n # get H for normalization and produce normalized points\r\n points_lst = np.array(points_lst)\r\n h_l = get_h(points_lst[:, 0])\r\n h_r = get_h(points_lst[:, 1])\r\n p_l_norm = [h_l @ np.array([p[0], p[1], 1]) for p in points_lst[:, 0]]\r\n p_r_norm = [h_r @ np.array([p[0], p[1], 1]) for p in points_lst[:, 1]]\r\n\r\n # create A using normalized points\r\n a = []\r\n for p_l, p_r in zip(p_l_norm, p_r_norm):\r\n x_l, y_l = p_l[0], p_l[1]\r\n x_r, y_r = p_r[0], p_r[1]\r\n a.append([x_r * x_l, x_r * y_l, x_r, y_r * x_l, y_r * y_l, y_r, x_l, y_l, 1])\r\n a = np.array(a)\r\n\r\n u, s, vh = np.linalg.svd(a)\r\n f_mat = np.reshape(vh[-1, :], (3, 3))\r\n\r\n # enforce singularity constraint\r\n u, s, vh = np.linalg.svd(f_mat)\r\n s[-1] = 0\r\n f_unscaled = (u * s) @ vh\r\n\r\n # rescale F\r\n return np.linalg.inv(h_r) @ f_unscaled @ np.linalg.inv(h_l)", "def beam_search(X, u, w, b, relLabels):\n\n candidate_paths = [[] for _ in range(10)] # contains the candidate label sets\n candidate_vals =[[] for _ in range(10)] # contains the label values (-1/1) for each candidate set\n candidate_scores = [0. for _ in range(10)]\n min_score = -1000\n\n iter = 0\n start = 0\n while True:\n # print(\"Iter: \", iter)\n intermediate_paths = {}\n # intermediate_paths_val = []\n interim_scores = []\n hash_table = {}\n\n cnt_paths = 0\n for cp in range(5):\n labels_curr = candidate_paths[cp]\n labels_val_curr = candidate_vals[cp]\n scores_curr = candidate_scores[cp]\n Y = -np.ones((10, 1))\n for lv in range(len(labels_val_curr)):\n Y[labels_curr[lv]] = labels_val_curr[lv]\n\n for l in range(10):\n candidate_interim = labels_curr[:]\n candidate_vals_interim = labels_val_curr[:]\n # if l in labels_curr:\n # continue\n\n temp_relLabels = []\n for lc in range(len(labels_curr)):\n temp_relLabels.extend(relLabels[labels_curr[lc]])\n\n # temp_relLabels = np.array(list(set(temp_relLabels)))\n temp_relLabels = np.array(list(set(relLabels[l]).intersection(set(labels_curr))))\n model_pos = returnModelVal(X, Y, 1.0, u[l], u[l], b[l][0], np.array(temp_relLabels))\n candidate_interim.append(l)\n\n if model_pos < 0:\n # print('hello')\n candidate_vals_interim.append(-1)\n interim_scores.append(-model_pos)\n else:\n candidate_vals_interim.append(1)\n interim_scores.append(model_pos)\n\n hash_table[cnt_paths] = candidate_interim\n intermediate_paths[cnt_paths] = candidate_vals_interim\n cnt_paths += 1\n # For the first iteration, just iterate once - all labels in one iteration\n if start == 0:\n start = 1\n break\n\n temp_paths = intermediate_paths\n interim_zip = zip(intermediate_paths, interim_scores)\n sorted_scores = sorted(interim_zip, key=lambda x: x[1], reverse=True)[:5]\n intermediate_paths, scores = zip(*sorted_scores)\n\n temp_cand = []\n temp_val = []\n for i in range(len(intermediate_paths)):\n temp_cand.append(hash_table[intermediate_paths[i]])\n temp_val.append(temp_paths[intermediate_paths[i]])\n # candidate_scores[i] += scores[i]\n\n candidate_paths = temp_cand\n candidate_vals = temp_val\n print(candidate_paths)\n print(candidate_vals)\n # print(scores)\n # candidate_scores = scores\n\n # Exit condition from loop\n # if max(interim_scores) < min_score:\n # break\n #\n # min_score = min(interim_scores)\n\n iter += 1\n if iter > 5:\n break\n\n candidate_dict = {}\n for i in range(5):\n for c in range(len(candidate_paths[i])):\n if candidate_paths[i][c] not in candidate_dict:\n candidate_dict[candidate_paths[i][c]] = candidate_vals[i][c]\n elif candidate_dict[candidate_paths[i][c]] != 2:\n if candidate_dict[candidate_paths[i][c]] != candidate_vals[i][c]:\n candidate_dict[candidate_paths[i][c]] = 2.\n\n print(candidate_dict)\n exit()\n return candidate_dict", "def ransac_plane_estimation (numpy_cloud, threshold, fixed_point=None, w = .9, z = 0.95 ):\r\n\r\n # variables\r\n current_consensus = 0 # keeps track of how many points match the current plane\r\n best_consensus = 0 # shows how many points matched the best plane yet\r\n consensus_points = np.array([]) # np.ndarray of points matching the cloud\r\n best_normal_vector = np.array ([]) # current best normal vector\r\n\r\n # determine probabilities and number of draws\r\n b = np.float_power(w, 3 ) # probability that all three observations belong to the model\r\n k = ceil(np.log(1-z ) / np.log(1-b )) # estimated number of draws\r\n\r\n # copy cloud\r\n numpy_cloud = numpy_cloud[:, 0:3].copy ()\r\n\r\n # estimate k * 3 random planes, defined through one normal vector and one plane parameter d, respectively\r\n normal_vectors, plane_parameters_d = random_plane_estimation (numpy_cloud, k * 3, fixed_point )\r\n\r\n # iterate through all planes found to see which one performs best\r\n for (normal_vector, d) in zip (normal_vectors, plane_parameters_d ):\r\n\r\n # count all points that consent with the plane\r\n current_consensus, current_consensus_points = plane_consensus (numpy_cloud, normal_vector, d, threshold )\r\n\r\n # is the current consensus match higher than the previous ones?\r\n if (current_consensus > best_consensus ):\r\n\r\n # keep best consensus set\r\n consensus_points = current_consensus_points\r\n best_normal_vector = normal_vector\r\n best_consensus = current_consensus\r\n\r\n return best_normal_vector, consensus_points", "def lowest_rank_approx(A,e):\n \n \n U,s,Vh=la.svd(A,full_matrices=False)\n t=s.copy()\n t[t>e]=0\n i=t.nonzero()[0][0]\n \n return U[:,:i].dot(np.diag(s[:i])).dot(Vh[:i,:])", "def solve(m):\n\t\n #with the assumption that at least one terminal state is given:\n if(len(m)==2 or len(m)==1): return [1,1]\n \n #Normalizing the in. matrix and identifying the trans./abs. states:\n m = normalizeProbabilityMatrix(m)\n t = getTransientStates(m)\n a = getAbsorbingStates(m)\n\t\n if len(a) >0:\n print( str(len(a)) + \" absorbing state\" + (\"\" if len(a)<=1 else \"s\" ))\n else:\n print(\"No absorbing state detected\")\n return\n \n #Getting the matrices Q and R as in the canonical form:\n Q = getQ(m,t)\n R = getR(m,t,a)\n I = getIdentity(len(Q))\n I_Q = subtractMatrices(I, Q)\n \n #Getting the fundamental matrix\n N = invertMatrix(I_Q)\n F = multiplyMatrices(N,R)\n \n #packing the result with a common denominator:\n gcd = getGCD(F[0]).denominator\n res=[]\n sum = 0\n for r in F[0]:\n val = int(r.numerator*(gcd/r.denominator))\n sum+=val\n res.append(val)\n res.append(sum) \n return res", "def SK(all_black,all_white,all_other):\n real_zone_1=[]\n real_zone_2=[]\n real_zone_3=[]\n global p\n #FIRST defining the zone value since the more center you are, the\n #more value you will have.\n \n #Zone 1: the gratest value zone\n zone_1=[]\n zone_1_val=0.3\n for i in all_other:\n if 125<=int(i[0])<=1100 and 125<=int(i[1])<=825:\n zone_1.append(i)\n\n #zone 2: second greatest value zone\n zone_2=[]\n zone_2_val=0.2\n for i in all_other:\n if 0<=int(i[0])<=125 and 125<=int(i[1])<=825:\n zone_2.append(i)\n if 1100<=int(i[0])<=1225 and 125<=int(i[1])<=825:\n zone_2.append(i)\n if 125<=int(i[0])<=1100 and 0<=int(i[1])<=125:\n zone_2.append(i)\n if 125<=int(i[0])<=1100 and 825<=int(i[1])<=950:\n zone_2.append(i)\n\n #zone 3: smallest value zone\n zone_3=[]\n zone_3_val=0.1\n for i in all_other:\n if 0<=int(i[0])<=125 and 0<=int(i[1])<=125:\n zone_3.append(i)\n if 0<=int(i[0])<=125 and 825<=int(i[1])<=950:\n zone_3.append(i)\n if 1100<=int(i[0])<=1225 and 0<=int(i[1])<=125:\n zone_3.append(i)\n if 1100<=int(i[0])<=1225 and 825<=int(i[1])<=950:\n zone_3.append(i)\n\n if all_black==[] and all_white==[]:\n p=0 #First hand Black\n #all_black.append([25*25,19*25])\n return[25*25,19*25]\n\n\n \n\n #Calculation of the values\n val=0\n value_list=[] #[[coordinate],val]\n if p == 0: #First hand Black\n for i in all_black:\n x=i[0]\n y=i[1]\n #right down↘️\n if [x+25 ,y+25] in all_other:\n val=1\n value_list.append([[x+25,y+25],val])\n #print('右下 if',value_list)\n #print('Right D if',val)\n else:\n val=1\n for a in range(1,4):\n if [x+25*a,y+25*a] in all_black:\n val+=1\n elif [x+25*a,y+25*a] in all_other:\n value_list.append([[x+25*a,y+25*a],val])\n #print('Right D',val)\n #print('右下',value_list)\n elif [x+25*a,y+25*a] in all_white:\n break\n \n #left up↖️\n if [x-25,y-25] in all_other:\n val=1\n value_list.append([[x-25,y-25],val])\n #print('Left U if')\n else:\n val=1\n for a in range(1,4):\n if [x-25*a,y-25*a] in all_black:\n val+=1\n elif [x-25*a,y-25*a] in all_other:\n value_list.append([[x-25*a,y-25*a],val])\n #print('Left U')\n elif [x-25*a,y-25*a] in all_white:\n break\n \n #right up↗️ \n if [x+25,y-25] in all_other:\n val=1\n value_list.append([[x+25,y-25],val])\n #print('RU if')\n else:\n val=1\n for a in range(1,4):\n if [x+25*a,y-25*a] in all_black:\n val+=1\n elif [x+25*a,y-25*a] in all_other:\n value_list.append([[x+25*a,y-25*a],val])\n #print('右上')\n elif [x+25*a,y-25*a] in all_white:\n break\n\n #left down↙️\n if [x-25,y+25] in all_other:\n val=1\n value_list.append([[x-25,y+25],val])\n #print('左下 if') \n else:\n val=1\n for a in range(1,4):\n if [x-25*a,y+25*a] in all_black:\n val+=1\n elif [x-25*a,y+25*a] in all_other:\n value_list.append([[x-25*a,y+25*a],val])\n #print('左下')\n elif [x-25*a,y+25*a] in all_white:\n break\n\n #right➡️\n if [x+25,y] in all_other:\n val=1\n value_list.append([[x+25,y],val])\n #print('右',value_list)\n #print('右 if')\n else:\n val=1\n for a in range(1,4):\n if [x+25*a,y] in all_black:\n val+=1\n elif [x+25*a,y] in all_other:\n value_list.append([[x+25*a,y],val])\n #print('右')\n elif [x+25*a,y] in all_white:\n break\n\n #left⬅️ \n if [i[0]-25,i[1]] in all_other:\n val=1\n value_list.append([[i[0]-25,i[1]],val])\n #print('左', value_list)\n #print('左 if')\n else:\n val=1\n for a in range(1,4):\n if [i[0]-25*a,i[1]] in all_black:\n val+=1\n elif [i[0]-25*a,i[1]] in all_other:\n value_list.append([[i[0]-25*a,i[1]],val])\n #print('左')\n elif [i[0]-25*a,i[1]] in all_white:\n break\n\n #down⬇️ \n if [i[0],i[1]+25] in all_other:\n val=1\n value_list.append([[i[0],i[1]+25],val])\n #print('下', value_list)\n #print('下 if')\n else:\n val=1\n for a in range(1,4):\n if [i[0],i[1]+25*a] in all_black:\n val+=1\n elif [i[0],i[1]+25*a] in all_other:\n value_list.append([[i[0],i[1]+25*a],val])\n #print('下')\n elif [i[0],i[1]+25*a] in all_white:\n break\n \n #up⬆️\n if [i[0],i[1]-25] in all_other:\n val=1\n value_list.append([[i[0],i[1]-25],val])\n #print('上',value_list)\n #print('上 if')\n else:\n val=1\n for a in range(1,4):\n if [i[0],i[1]-25*a] in all_black:\n val+=1\n elif [i[0],i[1]-25*a] in all_other:\n value_list.append([[i[0],i[1]-25*a],val])\n #print('上')\n elif [i[0],i[1]-25*a] in all_white:\n break\n\n\n\n all_val=[]\n #print(value_list,'这是value_list')\n\n \n sum_value=[]\n coord=[]\n for a in value_list:\n if a[0] not in coord:\n coord.append(a[0])\n #print(coord)\n for b in coord:\n he=[]\n for c in value_list:\n if b == c[0]:\n he.append(c[1])\n #print(he,'这是和')\n sum_value.append([b,sum(he)])\n\n\n\n #print(sum_value,'同样坐标下val相加')\n for i in sum_value:\n all_val.append(i[1])\n #print(all_val,'所有的相加之后的val')\n numb=-1\n all_max=[]\n for v in all_val:\n numb+=1\n if v == max(all_val):\n max_val_list = value_list[numb][0] #max (x,y)\n if value_list[numb][0] in all_other:\n all_max.append(value_list[numb])\n \n \n #print(max(all_val),'max val')\n for u in all_max:\n if u[0] in zone_1:\n real_zone_1.append(u[0])\n if u[0] in zone_2:\n real_zone_2.append(u[0])\n if u[0] in zone_3:\n real_zone_3.append(u[0])\n if real_zone_1 != []:\n print('real_1')\n return real_zone_1[0]\n elif real_zone_2 != []:\n print('Its zone 2')\n return real_zone_2[0]\n elif real_zone_3 != []:\n print('Its zone 3')\n return real_zone_3[0]\n else:\n return \"mistake\"", "def search_optimal_capacities(network, step_size, tolerance, filename):\r\n ## Initialization\r\n # Initialize the value of total flow over the network\r\n totalflow = max(network.lb_totalflow, step_size)\r\n \r\n # An auxiliary threshold of the total flow computed based on the capacity upper bounds, used in Line 4 of Algorithm 3.\r\n aux_bound = 1 - np.exp(network.beta - network.b + network.phi/network.u)\r\n \r\n \r\n # Initialize the bounds for flow over each route\r\n ub_flow = np.zeros(network.num_routes)\r\n lb_flow = np.zeros(network.num_routes)\r\n \r\n # Initialize the optimal solution over the network\r\n opt_socialwelfare = np.array([])\r\n opt_totalflow = 0\r\n opt_flows = np.array([])\r\n opt_capacity = np.zeros(network.num_routes)\r\n \r\n\r\n# # For debugging only\r\n# lower_bound = np.zeros(network.num_routes)\r\n# upper_bound = np.zeros(network.num_routes)\r\n# count = 0\r\n \r\n # Try to plot out the (totalflow, social_welfare) scatter plot\r\n z = []\r\n hz = []\r\n# # End of debugging\r\n\r\n ## Start the search\r\n while totalflow < 1 - tolerance:\r\n flag_nofeasibleflow = False\r\n \r\n # Compute the bounds for the flow.\r\n for i in range(network.num_routes):\r\n # Line 3-8 of Algorithm 3. Compute the upper bounds for the flow.\r\n if totalflow >= aux_bound[i]: \r\n x3_star = bisection_search(zeta, 0, 1, [tolerance, tolerance], True, network, totalflow, i, 3) \r\n if x3_star > network.u[i]:\r\n flag_nofeasibleflow = True\r\n break \r\n else:\r\n ub_flow[i] = x3_star \r\n else: \r\n ub_flow[i] = 1 \r\n # Line 9-10 of Algorithm 3. Compute the lower bounds of the flow.\r\n x1_star = bisection_search(zeta, 0, 1, [tolerance, tolerance], True, network, totalflow, i, 1)\r\n x2_star = bisection_search(zeta, 0, 1, [tolerance, tolerance], True, network, totalflow, i, 2)\r\n lb_flow[i] = max(x1_star, x2_star)\r\n \r\n \r\n if not flag_nofeasibleflow:\r\n # Check feasibility of the flow based on the current total flow, lower and upper bounds of the flow\r\n if totalflow < np.sum(lb_flow) or totalflow > np.sum(ub_flow): \r\n totalflow += step_size \r\n\r\n# # For debugging only\r\n# print(\"\\nThe current total flow is: \" + str(totalflow))\r\n# print(\"\\nThe capacity upper bound when optimal flow is found: \")\r\n# print(upper_bound)\r\n# print(\"\\nThe capacity lower bound when optimal flow is found: \")\r\n# print(lower_bound)\r\n# print(str(count))\r\n# # Eng of debugging\r\n# \r\n continue\r\n \r\n # The implementation of line 11 to 18. Find the optimal flow given the current value of z.\r\n [opt_obj, opt_x] = ip.max_sum_xlogx(network.num_routes, totalflow, lb_flow, ub_flow) \r\n \r\n \r\n # Line 18 of Algorithm 3. Compute the social welfare given the current z and optimal q(z).\r\n temp = opt_obj - totalflow * np.log(1-totalflow)\r\n\r\n ##### Testing: to plot out the function of h(z)\r\n z.append(totalflow)\r\n hz.append(temp)\r\n ##### End of Testing: to plot out the function of h(z)\r\n \r\n if opt_socialwelfare.size == 0 or temp > opt_socialwelfare:\r\n opt_socialwelfare = temp\r\n opt_flows = opt_x\r\n opt_totalflow = totalflow \r\n \r\n # For debugging only\r\n# print(\"\\nUpdate optimal flow\")\r\n# print(opt_x)\r\n# print(lb_flow)\r\n# print(ub_flow)\r\n# print(\"Total flow is \" + str(opt_totalflow)) \r\n \r\n # For debugging\r\n# np.copyto(lower_bound, lb_flow) \r\n# np.copyto(upper_bound, ub_flow) \r\n# count += 1\r\n# print(\"The lower and upper bounds are: \")\r\n# print(lb_flow)\r\n# print(lower_bound)\r\n# print(\"\\n\")\r\n# print(ub_flow)\r\n# print(upper_bound)\r\n# print(\"\\n\")\r\n \r\n totalflow += step_size \r\n\r\n \r\n \r\n# # For debugging only\r\n# print(\"\\n----------------\\n Exiting the while loop.\")\r\n# print(\"\\nThe capacity upper bound when optimal flow is found: \")\r\n# print(upper_bound)\r\n# print(\"\\nThe capacity lower bound when optimal flow is found: \")\r\n# print(lower_bound)\r\n# print(str(count)) \r\n# # Eng of debugging\r\n \r\n # Line 20 of ALgorithm 3\r\n if opt_flows.size > 0:\r\n network.update_flow(opt_flows) \r\n for i in range(network.num_routes): \r\n network.compute_capacity(opt_totalflow, i)\r\n opt_capacity[i] = network.capacity[i]\r\n print(\"\\n--------------\\nThe optimal flow is: \")\r\n print(opt_flows)\r\n print(\"\\n--------------\\nThe optimal parking capacity is: \")\r\n print(opt_capacity) \r\n print(\"\\n--------------\\nThe optimal total flow is \" + str(opt_totalflow))\r\n print(\"\\n--------------\\nThe maximum social welfare is \" + str(opt_socialwelfare) +\".\")\r\n \r\n \r\n ##### Testing: to plot out the function of h(z)\r\n #plt.scatter(z, hz, c='r', marker='r')\r\n plt.plot(z, hz, '-', linewidth=0.5)\r\n #plt.xlim(0.5, 1)\r\n plt.savefig(filename + '.png', bbox_inches='tight')\r\n ##### End of Testing: to plot out the function of h(z)\r\n \r\n \r\n \r\n# # For debugging\r\n# temp1 = np.zeros(network.num_routes)\r\n# temp2 = np.zeros(network.num_routes)\r\n# temp3 = np.zeros(network.num_routes)\r\n# for i in range(network.num_routes): \r\n# temp1[i] = zeta(network, i, opt_flows[i], opt_totalflow, 1)\r\n# temp2[i] = zeta(network, i, opt_flows[i], opt_totalflow, 2)\r\n# temp3[i] = zeta(network, i, opt_flows[i], opt_totalflow, 3)\r\n# print(\"The function value of zeta at the optimal flow: \")\r\n# print(temp1)\r\n# print(temp2)\r\n# print(temp3)\r\n# \r\n# # For debugging\r\n# print(\"\\nThe capacity upper bound when optimal flow is found: \")\r\n# print(upper_bound)\r\n# print(\"\\nThe capacity lower bound when optimal flow is found: \")\r\n# print(lower_bound)\r\n# print(str(count))\r\n# # End of debugging\r\n \r\n return opt_flows, opt_capacity, opt_socialwelfare \r\n else:\r\n print(\"\\nNo optimal solution is found!\")\r\n return np.array([]), opt_capacity, opt_socialwelfare", "def diagonal_hessian_guess(self, geom, Z, connectivity, guess_type=\"SIMPLE\"):\n\n logger = logging.getLogger(__name__)\n\n if guess_type == \"SIMPLE\":\n return 0.1\n\n elif guess_type == \"SCHLEGEL\":\n R_BC = v3d.dist(geom[self.B], geom[self.C])\n Rcov = qcel.covalentradii.get(Z[self.B], missing=4.0) + qcel.covalentradii.get(Z[self.C], missing=4.0)\n a = 0.0023\n b = 0.07\n if R_BC > (Rcov + a / b):\n b = 0.0\n return a - (b * (R_BC - Rcov))\n\n elif guess_type == \"FISCHER\":\n R = v3d.dist(geom[self.B], geom[self.C])\n Rcov = qcel.covalentradii.get(Z[self.B], missing=4.0) + qcel.covalentradii.get(Z[self.C], missing=4.0)\n a = 0.0015\n b = 14.0\n c = 2.85\n d = 0.57\n e = 4.00\n\n # Determine connectivity factor L\n Brow = connectivity[self.B]\n Crow = connectivity[self.C]\n Bbonds = 0\n Cbonds = 0\n for i in range(len(Crow)):\n Bbonds = Bbonds + Brow[i]\n Cbonds = Cbonds + Crow[i]\n L = Bbonds + Cbonds - 2\n logger.info(\"Connectivity of central 2 torsional atoms - 2 = L = %d\\n\" % L)\n return a + b * (np.power(L, d)) / (np.power(R * Rcov, e)) * (np.exp(-c * (R - Rcov)))\n\n elif guess_type == \"LINDH_SIMPLE\":\n\n R_AB = v3d.dist(geom[self.A], geom[self.B])\n R_BC = v3d.dist(geom[self.B], geom[self.C])\n R_CD = v3d.dist(geom[self.C], geom[self.D])\n k_tau = 0.005\n\n Lindh_Rho_AB = hguess_lindh_rho(Z[self.A], Z[self.B], R_AB)\n Lindh_Rho_BC = hguess_lindh_rho(Z[self.B], Z[self.C], R_BC)\n Lindh_Rho_CD = hguess_lindh_rho(Z[self.C], Z[self.D], R_CD)\n return k_tau * Lindh_Rho_AB * Lindh_Rho_BC * Lindh_Rho_CD\n\n else:\n logger.warning(\n \"\"\"Hessian guess encountered unknown coordinate type.\\n \n As default, identity matrix is used\"\"\"\n )\n return 1.0", "def solution(n, s, a, b, fares):\n\n table = [[float(\"inf\")]*n for _ in range(n)]\n for (c, d, f) in fares:\n table[c-1][d-1] = f\n table[d-1][c-1] = f\n\n for idx in range(n):\n table[idx][idx] = 0\n\n # do floyd to find all shortest paths\n for kdx in range(n):\n for idx in range(n):\n for jdx in range(n):\n table[idx][jdx] = min(table[idx][jdx], table[idx][kdx] + table[kdx][jdx])\n \n# for row in table:\n# print(row)\n \n answer = table[s-1][a-1] + table[s-1][b-1]\n # print(\"seperate:\", answer)\n for idx in range(n):\n # print(\"idx 경유:\", idx, table[s-1][idx] + table[idx][a-1] + table[idx][b-1])\n answer = min(answer, table[s-1][idx] + table[idx][a-1] + table[idx][b-1])\n\n # print(\"answer:\", answer)\n return answer", "def unit_tests():\r\n import utils\r\n import parse_file\r\n\r\n nb_bidders = 2\r\n max_value = 20\r\n\r\n bid_pdf = {}\r\n bid_pdf[(0, 0)] = 0.5\r\n bid_pdf[(100, 100)] = 0.5\r\n\r\n # When the tolerance is 0, should be infeasible\r\n tol = 0\r\n lower_bound, upper_bound = inverse_bce_sparse(\r\n nb_bidders, max_value, bid_pdf, utils.first_moment, tolerance=tol)\r\n if not lower_bound and not upper_bound:\r\n print(\"Infeasibility test: passed\")\r\n else:\r\n print(\"Infeasibility test: failed\")\r\n\r\n # When the tolerance is infinite, we should get the whole range\r\n tol = 10**6\r\n lower_bound, upper_bound = inverse_bce_sparse(\r\n nb_bidders, max_value, bid_pdf, utils.first_moment, tolerance=tol)\r\n if lower_bound == 0 and upper_bound == max_value:\r\n print(\"Tolerance test: passed\")\r\n else:\r\n print(\"Tolerance test: failed\")\r\n\r\n # The sparse and not sparse version should give the same solutions on reasonable examples.\r\n # Using the data with max_bid=5, max_value=10 as an example here.\r\n max_value = 20\r\n max_bid = 10\r\n\r\n wb = openpyxl.load_workbook('Dataset.xlsx')\r\n sheet1 = wb.get_sheet_by_name('Tract79')\r\n sheet2 = wb.get_sheet_by_name('Trbid79')\r\n\r\n bid_pdf = parse_file.bins_from_data(wb, sheet1, sheet2, nb_bidders, max_bid)\r\n\r\n tol = 0.1\r\n lower_bound_sparse, upper_bound_sparse = inverse_bce_sparse(\r\n nb_bidders, max_value, bid_pdf, utils.first_moment, tolerance=tol)\r\n lower_bound, upper_bound = inverse_bce(\r\n nb_bidders, max_value, bid_pdf, utils.first_moment, tolerance=tol)\r\n\r\n if abs(upper_bound_sparse - upper_bound) < 0.00001 and abs(lower_bound_sparse - lower_bound) < 0.00001:\r\n print(\"Equality test: passed\")\r\n else:\r\n print(\"Equality test: failed\")\r\n\r\n # The parametrized sparse code for min tolerance should return 0 when the data is generated using compute_bce\r\n mu = 5\r\n sigma = 5\r\n density = [np.exp(-(v - mu)**2 / (2 * sigma**2))\r\n for v in range(max_value + 1)]\r\n bid_pdf = compute_bce(nb_bidders, max_value, density)\r\n min_tol = inverse_bce_parameterized_sparse_min_tolerance(nb_bidders, \r\n max_value, bid_pdf, utils.first_moment, density)\r\n if min_tol <= 10**(-5):\r\n print(\"Minimum tolerance test: passed\")\r\n else:\r\n print(\"Minimum tolerance test: failed\")\r\n print(\"Minimum tolerance: \" + str(min_tol))\r\n\r\n bid_pdf = {}\r\n bid_pdf[(0, 0)] = 0.5\r\n bid_pdf[(100, 100)] = 0.5\r\n\r\n # When the tolerance is 0, the parameterized codes should be infeasible\r\n tol = 0\r\n lower_bound = inverse_bce_parameterized_sparse(\r\n nb_bidders, max_value, bid_pdf, utils.first_moment, density, tolerance=tol)\r\n lower_bound_sparse = inverse_bce_parameterized_sparse(\r\n nb_bidders, max_value, bid_pdf, utils.first_moment, density, tolerance=tol)\r\n if not lower_bound and not lower_bound_sparse:\r\n print(\"Parameterized infeasibility test: passed\")\r\n else:\r\n print(\"Parameterized infeasibility test: failed\")\r\n\r\n # When the tolerance is infinite, we should get the whole range for the parameterized code\r\n tol = 10**6\r\n lower_bound = inverse_bce_parameterized_sparse(\r\n nb_bidders, max_value, bid_pdf, utils.first_moment, density, tolerance=tol)\r\n lower_bound_sparse = inverse_bce_parameterized_sparse(\r\n nb_bidders, max_value, bid_pdf, utils.first_moment, density, tolerance=tol)\r\n if lower_bound == 0 and lower_bound_sparse == 0:\r\n print(\"Parameterized tolerance test: passed\")\r\n else:\r\n print(\"Parameterized tolerance test: failed\")", "def test_inverse( centering='SYMMETRIC'):\n\n\n npupil = 300 #156\n pctr = int(npupil/2)\n npix = 100 #1024\n u = 20 #100 # of lam/D\n\n npix, u = 2000, 200\n s = (npupil,npupil)\n\n\n\n\n mft1 = matrixDFT.MatrixFourierTransform(centering=centering)\n\n ctr = (float(npupil)/2.0, float(npupil)/2.0 )\n #print ctr\n pupil = makedisk(s=s, c=ctr, r=float(npupil)/2.0001, t=np.float64, grey=0)\n pupil /= np.sqrt(pupil.sum())\n\n pupil[100:200, 30:50] = 0\n pupil[0:50, 140:160] = 0\n\n plt.subplot(141)\n plt.imshow(pupil)\n\n print \"Pupil 1 total:\", pupil.sum() \n\n a = mft1.perform(pupil, u, npix)\n\n asf = a.real.copy()\n cpsf = a * a.conjugate()\n psf = cpsf.real.copy()\n print \"PSF total\", psf.sum()\n \n plt.subplot(142)\n plt.imshow(psf, norm=matplotlib.colors.LogNorm(1e-8, 1.0))\n\n plt.subplot(143)\n\n pupil2 = mft1.inverse(a, u, npupil)\n pupil2r = (pupil2 * pupil2.conjugate()).real\n plt.imshow( pupil2r)\n\n print \"Pupil 2 total:\", pupil2r.sum() \n\n\n\n a2 = mft1.perform(pupil2r, u, npix)\n psf2 = (a2*a2.conjugate()).real.copy()\n print \"PSF total\", psf2.sum()\n plt.subplot(144)\n plt.imshow(psf2, norm=matplotlib.colors.LogNorm(1e-8, 1.0))", "def initial_guess(self):\n\n x0 = [self.material_model.isotropic_matrix.c1, self.material_model.isotropic_matrix.c2]\n\n if not self.material_model.is_isotropic:\n # c5 is scaled in the optimisation function\n x0.append(self.material_model.fibres.c5 / c5_factor)\n\n if self.include_lm:\n x0.append(self.material_model.fibres.lm)\n\n return np.asarray(x0)", "def exercise():\n pi_good = get_pdb_inputs(pdb_str=pdb_str_answer, restraints=False)\n map_data = get_map(xrs=pi_good.xrs)\n xrs_good = pi_good.xrs.deep_copy_scatterers()\n pi_good.ph.write_pdb_file(file_name=\"answer.pdb\",\n crystal_symmetry=xrs_good.crystal_symmetry())\n #\n pi_poor = get_pdb_inputs(pdb_str=pdb_str_poor, restraints=True)\n pi_poor.ph.write_pdb_file(file_name=\"poor.pdb\")\n xrs_poor = pi_poor.xrs.deep_copy_scatterers()\n #\n d = xrs_good.distances(other=xrs_poor)\n print(d.min_max_mean().as_tuple())\n assert flex.max(d)>2\n assert flex.mean(d)>0.7\n #\n xrs_refined = xrs_poor\n for i in range(3):\n ero = individual_sites.easy(\n map_data = map_data,\n xray_structure = xrs_refined,\n pdb_hierarchy = pi_poor.ph,\n geometry_restraints_manager = pi_poor.grm)\n xrs_refined = ero.xray_structure\n # comapre\n d = xrs_good.distances(other=xrs_refined)\n print(d.min_max_mean().as_tuple())\n assert flex.max(d)<0.15\n assert flex.mean(d)<0.03\n ero.pdb_hierarchy.write_pdb_file(file_name=\"refined.pdb\",\n crystal_symmetry=xrs_good.crystal_symmetry())", "def FitFundamentalMatrix(mss):\n\n\tbatch_size = mss.shape[0]\n\tnum_correspondences = mss.shape[1]\t\n\n\tmss_src, T_src = utils.normalise2dpts(mss[:, :, :2])\n\tmss_target, T_target = utils.normalise2dpts(mss[:, :, 2:])\n\n\tones = torch.tensor([1]).double().cuda().view(1, 1, 1).repeat(batch_size, num_correspondences, 1)\n\tmss_src = torch.cat([mss_src, ones], dim = 2)\n\tmss_target = torch.cat([mss_target, ones], dim = 2)\n\n\trow = torch.cat([\n\t\tmss_src * mss_target[:, :, 0].view(batch_size, num_correspondences, 1), \n\t\tmss_src * mss_target[:, :, 1].view(batch_size, num_correspondences, -1),\n\t\tmss_src \n\t], dim = 2)\n\n\n\touter_product = row.unsqueeze(3) * row.unsqueeze(2)\n\tmat = outer_product.sum(dim = 1)\n\n\n\tU = []\n\tS = []\n\tVT = []\n\n\tfor i in range(batch_size):\n\n\t\t_, V = torch.symeig(mat[i])\n\t\tu, s, v = torch.svd(V[:, 0].view(3, 3))\n\t\tvt = v.permute(1, 0)\n\t\ts[2] = 0\n\n\t\tU.append(u.unsqueeze(0))\n\t\tS.append(torch.diag(s).unsqueeze(0))\n\t\tVT.append(vt.unsqueeze(0))\n\n\tU = torch.cat(U, dim = 0)\n\tS = torch.cat(S, dim = 0)\n\tVT = torch.cat(VT, dim = 0)\n\n\tF = torch.bmm(torch.bmm(U, S), VT)\n\tF = torch.bmm(T_src, torch.bmm(F.permute(0, 2, 1), T_target.permute(0, 2, 1)))\n\tF = F / F[:, 2, 2].view(batch_size, 1, 1)\n\n\tutils.nan_check(F)\n\n\treturn F", "def test():\n # test getCl\n ISWoutFile = 'ISWout_scalCls.fits'\n ISWinFile = 'ISWin_scalCls.fits'\n ell,temps = getCl(ISWoutFile)\n\n \"\"\"\n # test showCl\n showCl(ell,temps)\n\n # test makeLegendreTable\n # this works fine for small lmax values, but ell=86 and higher have problems\n # possibly due to exceeding the maximum size of a float64 dtype\n makeLegendreTable(9,'testTable.npy')\n table = symLoad('testTable.npy')\n print table\n\n # test powerArray\n powers = powerArray(2,9)\n print powers\n \"\"\"\n\n # test makeCmatrix\n # measured time: 4.25 hrs for 6110 point mask\n startTime = time.time()\n\n # old files no longer used\n #saveMatrixFile = 'covar6110_R010_lowl.npy'\n #saveMatrixFile = 'covar6110_R010.npy'\n #maskFile = '/shared/Data/PSG/hundred_point/ISWmask2_din1_R160.fits'\n #saveMatrixFile = 'covar9875_R160b.npy'\n\n # huge mask\n #maskFile = 'ISWmask9875_RING.fits' #19917 pixels\n #saveMatrixFile = 'covar19917_ISWout_bws_hp12_RING.npy'\n #covMat = makeCmatrix(maskFile, ISWoutFile, highpass=12, beamSmooth=True, pixWin=True, nested=False)\n # took 24.83 hours\n\n # use ISWin to model expected signal\n #maskFile = 'ISWmask6110_RING.fits'\n #saveMatrixFile = 'covar6110_ISWin_bws_hp12_RING.npy'\n #covMat = makeCmatrix(maskFile, ISWinFile, highpass=12, nested=True)\n maskFile = 'ISWmask9875_RING.fits' #9875 pixels\n saveMatrixFile = 'covar9875_ISWin_bws_hp12_RING.npy'\n covMat = makeCmatrix(maskFile, ISWinFile, highpass=12, beamSmooth=True, pixWin=True, nested=False)\n\n # no beam nor window smoothing, high lmax\n #saveMatrixFile = 'covar6110_ISWout_nBW_hp12_RING.npy'\n #covMat = makeCmatrix(maskFile, ISWoutFile, highpass=12, beamSmooth=False, pixWin=False, lmax=2200, nested=False)\n\n print 'time elapsed: ',int((time.time()-startTime)/60),' minutes'\n symSave(covMat,saveMatrixFile)\n \"\"\"\n\n # test subMatrix\n subMask = '/shared/Data/PSG/small_masks/ISWmask_din1_R010_trunc0500.fits'\n subCmat = subMatrix(subMask,maskFile,saveMatrixFile)\n print 'time elapsed: ',int((time.time()-startTime)/60),' minutes'\n \"\"\"", "def _rdm12_lowfilling_python(self, bradata: Optional['FqeData'] = None\n ) -> Tuple['Nparray', 'Nparray']:\n norb = self.norb()\n nalpha = self.nalpha()\n nbeta = self.nbeta()\n lena = self.lena()\n lenb = self.lenb()\n nlt = norb * (norb + 1) // 2\n\n outpack = numpy.zeros((nlt, nlt), dtype=self.coeff.dtype)\n outunpack = numpy.zeros((norb, norb, norb, norb),\n dtype=self.coeff.dtype)\n if nalpha - 2 >= 0:\n alpha_map, _ = self._core.find_mapping(-2, 0)\n\n def compute_intermediate0(coeff):\n tmp = numpy.zeros((nlt, int(binom(norb, nalpha - 2)), lenb),\n dtype=self.coeff.dtype)\n for i in range(norb):\n for j in range(i + 1, norb):\n for source, target, parity in alpha_map[(i, j)]:\n tmp[i + j * (j + 1) //\n 2, target, :] += coeff[source, :] * parity\n return tmp\n\n inter = compute_intermediate0(self.coeff)\n inter2 = inter if bradata is None else compute_intermediate0(\n bradata.coeff)\n outpack += numpy.tensordot(inter2.conj(),\n inter,\n axes=((1, 2), (1, 2)))\n\n if self.nalpha() - 1 >= 0 and self.nbeta() - 1 >= 0:\n alpha_map, beta_map = self._core.find_mapping(-1, -1)\n\n def compute_intermediate1(coeff):\n tmp = numpy.zeros((norb, norb, int(binom(\n norb, nalpha - 1)), int(binom(norb, nbeta - 1))),\n dtype=self.coeff.dtype)\n for i in range(norb):\n for j in range(norb):\n for sourcea, targeta, paritya in alpha_map[(i,)]:\n paritya *= (-1)**(nalpha - 1)\n for sourceb, targetb, parityb in beta_map[(j,)]:\n work = coeff[sourcea,\n sourceb] * paritya * parityb\n tmp[i, j, targeta, targetb] += work\n return tmp\n\n inter = compute_intermediate1(self.coeff)\n inter2 = inter if bradata is None else compute_intermediate1(\n bradata.coeff)\n outunpack += numpy.tensordot(inter2.conj(),\n inter,\n axes=((2, 3), (2, 3)))\n\n if self.nbeta() - 2 >= 0:\n _, beta_map = self._core.find_mapping(0, -2)\n\n def compute_intermediate2(coeff):\n tmp = numpy.zeros((nlt, lena, int(binom(norb, nbeta - 2))),\n dtype=self.coeff.dtype)\n for i in range(norb):\n for j in range(i + 1, norb):\n for source, target, parity in beta_map[(i, j)]:\n tmp[i + j * (j + 1) //\n 2, :, target] += coeff[:, source] * parity\n\n return tmp\n\n inter = compute_intermediate2(self.coeff)\n inter2 = inter if bradata is None else compute_intermediate2(\n bradata.coeff)\n outpack += numpy.tensordot(inter2.conj(),\n inter,\n axes=((1, 2), (1, 2)))\n\n out = numpy.zeros_like(outunpack)\n for i in range(norb):\n for j in range(norb):\n ij = min(i, j) + max(i, j) * (max(i, j) + 1) // 2\n parityij = 1.0 if i < j else -1.0\n for k in range(norb):\n for l in range(norb):\n parity = parityij * (1.0 if k < l else -1.0)\n out[i, j, k,\n l] -= outunpack[i, j, k, l] + outunpack[j, i, l, k]\n mnkl, mxkl = min(k, l), max(k, l)\n work = outpack[ij, mnkl + mxkl * (mxkl + 1) // 2]\n out[i, j, k, l] -= work * parity\n\n return self.rdm1(bradata)[0], out", "def fRCrim(Swe,Vc1,Vc2,Vc3,Vk,PHIe,Rc1,Rc2,Rc3,Rk,Rw,Rh,Cwv,Ckv,Alpha,Tout):\n#\n# 1. Compute and normalise volumetric components:\n#\t-----------------------------------------------\n\tVw=PHIe*Swe\n\tVh=PHIe*(1-Swe)\n\tVwe=(Vw-Cwv)/(1-Cwv)\n\tVwe=ImposeLimits(Vwe,0,1)\n\tVke=(Vk-Ckv)/(1-Ckv)\n\tVke=ImposeLimits(Vke,0,1)\n\tSum=abs(Vc1)+abs(Vc2)+abs(Vc3)+abs(Vke)+abs(Vwe)+abs(Vh)\n\tVc1=abs(Vc1)/Sum\n\tVc2=abs(Vc2)/Sum\n\tVc3=abs(Vc3)/Sum\n\tVk=abs(Vk)/Sum\n\tVw=abs(Vw)/Sum\n\tVh=abs(Vh)/Sum\n#\n#\t2. Determine conductivity of components:\n#\t----------------------------------------\n\tSigc1=1/Rc1\n\tSigc2=1/Rc2\n\tSigc3=1/Rc3\n\tSigk=1/Rk\n\tSigw=1/Rw\n\tSigh=1/Rh\n#\n#\t3. Compute Conductivity:\n#\t========================\n\tTrm1=Vc1*(Sigc1**(1/Alpha))\n\tTrm2=Vc2*(Sigc2**(1/Alpha))\n\tTrm3=Vc3*(Sigc3**(1/Alpha))\n\tTrm4=(Vk**2.2)*(Sigk**(1/Alpha)) # Factor of 2.2 included to get data to fit to Yang et al\n\tTrm5=Vw*(Sigw**(1/Alpha))\n\tTrm6=Vh*(Sigh**(1/Alpha))\n\tCrf=(Trm1+Trm2+Trm3+Trm4+Trm5+Trm6)**Alpha\n#\n#\n# 4. Output result:\n#\t-----------------\n\tif(Tout==0):\n\t\tFr=Crf\n\telse:\n\t\tFr=1/Crf\n\treturn Fr", "def rothesstri(A, b):\n n = shape(A)[0]\n A = hstack([A, b])\n for k in range(n-1):\n r = linalg.norm([ A[k , k] , A[k + 1, k] ])\n if r>0:\n c=A[k, k]/r; s=A[k + 1, k]/r\n A[[k, k + 1],(k + 1):(n + 1)]=[[c, s],[-s, c]]*A[[k, k + 1],(k + 1):(n + 1)]\n A[k, k] = r; A[k+1,k] = 0\n z = A[:, n].copy()\n rbacksolve(A[:, :n], z, n)\n return z", "def part1b_2():\n xs = exampleInput\n z = 5.881\n forward = [\n Counter({'-FEAT-': 0.622, '-SIZE-': 0.377}), \n Counter({'-SIZE-': 0.761, '-FEAT-': 0.238}), \n Counter({'-SIZE-': 0.741, '-FEAT-': 0.258})]\n \n z_, forward_ = submission.computeForward(simpleCRF, xs)\n for vec, vec_ in zip( forward, forward_):\n grader.requireIsTrue( Counters.approximateEquals( vec, vec_ ) )\n grader.requireIsEqual( z, z_, 1e-2)", "def xBounds2cvxoptMatrix(p):\n\n #TODO: is reshape/flatten required in newest numpy versions?\n indLB, indUB, indEQ = \\\n where(isfinite(p.lb) & ~(p.lb == p.ub))[0], \\\n where(isfinite(p.ub) & ~(p.lb == p.ub))[0], \\\n where(p.lb == p.ub)[0] \n \n initLenB = Len(p.b)\n initLenBeq = Len(p.beq)\n nLB, nUB, nEQ = Len(indLB), Len(indUB), Len(indEQ)\n\n if nLB>0 or nUB>0:\n A, b = copy(p.A), copy(p.b)\n p.A = zeros([Len(p.b) + nLB+nUB, p.n])\n p.b = zeros(Len(p.b) + nLB+nUB)\n p.b[:Len(b)] = b.flatten() # sometimes flatten is needed when called before runProbSolver(), from tests\n p.A[:Len(b)] = A\n for i in range(len(indLB)):\n p.A[initLenB+i, indLB[i]] = -1\n p.b[initLenB+i] = -p.lb[indLB[i]]\n for i in range(len(indUB)):\n p.A[initLenB+len(indLB)+i, indUB[i]] = 1\n p.b[initLenB+len(indLB)+i] = p.ub[indUB[i]]\n\n if nEQ>0:\n Aeq, beq = copy(p.Aeq), copy(p.beq)\n p.Aeq = zeros([Len(p.beq) + nEQ, p.n])\n p.beq = zeros(Len(p.beq) + nEQ)\n p.beq[:Len(beq)] = beq\n p.Aeq[:Len(beq)] = Aeq\n for i in range(len(indEQ)):\n p.Aeq[initLenBeq+i, indEQ[i]] = 1\n p.beq[initLenBeq+i] = p.lb[indEQ[i]] # = p.ub[indEQ[i]], because they are the same\n\n p.lb = -inf*ones(p.n)\n p.ub = inf*ones(p.n)", "def cross_junctions(I, bounds, Wpts):\n #--- FILL ME IN ---\n\n Ipts = np.zeros((2, 48))\n\n#parameters\n alpha = 0.15 #typically 0.04 to 0.06\n threshold = 1500 #default 2000\n sigma = 2\n ws = 12 #window size for saddle point\n\n#building Harris Detecter\n I = I/255.0\n gradx, grady = np.gradient(I)\n IxIx = gaussian_filter(gradx*gradx,sigma)\n IxIy = gaussian_filter(gradx*grady,sigma)\n IyIy = gaussian_filter(grady*grady,sigma)\n print(I.shape)\n\n #get harris score\n cand_score = []\n cand_index = []\n cand = []\n s_cand = []\n\n for j in range(len(I)):\n for i in range(len(I[0])):\n a11 = IxIx[j][i]\n a12 = IxIy[j][i]\n a21 = a12\n a22 = IyIy[j][i]\n A = np.array([[a11, a12],[a21, a22]])\n ev0, ev1 = np.linalg.eigvals(A)\n h_score = ev0*ev1 - alpha*(ev0+ev1)**2\n cand_score.append(-h_score)\n cand_index.append([i, j])\n\n #get the coordinates of the top 5000 scores\n sorted_ind = np.argsort(cand_score)\n sorted_score = np.sort(cand_score).tolist()\n\n for ind in sorted_ind[:threshold]:\n cand.append(cand_index[ind])\n s_cand = sorted_score[:threshold]\n\n\n#clustering\n #using homography to project candidate points to a up-front view\n new_bbox = np.array([[0, 100, 100, 0],[0, 0, 80, 80]])\n H = dlt_homography(bounds, new_bbox)\n cand = np.array(cand).T\n cand = np.vstack((cand, np.ones(cand.shape[1])))\n Ho_cand = np.matmul(H,cand).T\n for pt in Ho_cand:\n pt[0] = pt[0]/pt[2]\n pt[1] = pt[1]/pt[2]\n Ho_cand = Ho_cand[:,:2]\n Ho_cand = Ho_cand.tolist()\n\n #get rid of points that are not in the boundry\n temp_Ho_cand = []\n temp_s_cand = []\n for i in range(len(Ho_cand)):\n pt = Ho_cand[i]\n if (pt[0]>=100) or (pt[0]<0) or (pt[1]>=80) or (pt[1]<0):\n continue\n else:\n temp_Ho_cand.append(pt)\n temp_s_cand.append(s_cand[i])\n Ho_cand = np.array(temp_Ho_cand)\n s_cand = temp_s_cand\n #divide candidates into clusters\n assignment = []\n assignment_score = []\n\n #first put in the point that has the highest score\n assignment.append([Ho_cand[0]])\n assignment_score.append([s_cand[0]])\n for i in range(len(Ho_cand)):\n pt = Ho_cand[i]\n dist = []\n for c in assignment:\n dist.append(np.linalg.norm(pt - c[0]))\n if min(dist) > 6:\n assignment.append([pt])\n assignment_score.append([s_cand[i]])\n\n assignment = np.array(assignment)\n\n #assign points to clusters\n for i in range(len(Ho_cand)):\n pt = Ho_cand[i]\n if (pt[0] == Ho_cand[0][0]) and (pt[1] == Ho_cand[0][1]):\n continue\n dist = []\n for c in assignment:\n dist.append(np.linalg.norm(pt - c[0]))\n index = np.argsort(dist)[-1]\n np.append(assignment[index], pt)\n assignment_score[index].append(s_cand[i])\n\n #get centroids for each cluster\n Ho_centroids = []\n for i in range(len(assignment)):\n cl = assignment[i]\n cl = np.array(cl)\n Ho_centroids.append([np.mean(cl.T[0]),np.mean(cl.T[1])])\n assignment_score[i] = sum(assignment_score[i])\n\n print(len(assignment_score))\n\n Ho_centroids = np.array(Ho_centroids)\n #get rid of edge points\n\n xmin = np.amin(Ho_centroids.T[0])\n xmax = np.amax(Ho_centroids.T[0]) \n ymin = np.amin(Ho_centroids.T[1])\n ymax = np.amax(Ho_centroids.T[1])\n\n final_cand = []\n final_score = []\n for i in range(len(Ho_centroids)):\n pt = Ho_centroids[i]\n if (abs(pt[0] - xmin) <= 3) or (abs(pt[0] - xmax) <= 3) or (abs(pt[1] - ymin) <= 3) or (abs(pt[1] - ymax) <= 3):\n continue\n else:\n final_cand.append(pt)\n final_score.append(assignment_score[i])\n print(\"Number of corner found: \")\n print(len(final_cand))\n\n #get rid of fake corners\n if (len(final_cand)>48):\n ultimate_cand =[]\n for ind in np.argsort(final_score)[:48]:\n ultimate_cand.append(final_cand[ind])\n final_cand = ultimate_cand\n print(\"real corners count:\", len(ultimate_cand))\n\n\n #sort the points\n final_cand = np.array(final_cand)\n y_sort_ind = np.argsort(final_cand.T[1])\n final_cand = final_cand.tolist()\n rows = []\n for i in range(6):\n row = []\n for ind in y_sort_ind[i*8:(i+1)*8]:\n row.append(final_cand[ind])\n rows.append(row)\n\n ordered = []\n for row in rows:\n r = []\n x_sort_ind = np.argsort(np.array(row).T[0])\n for ind in x_sort_ind:\n r.append(row[ind])\n ordered.append(r)\n\n final_cand = []\n for row in ordered:\n for pt in row:\n final_cand.append(pt)\n \n\n\n #get coordinates of the centroids in the original frame\n Ho_centroids = np.array(final_cand)\n\n centroids = np.vstack((Ho_centroids.T, np.ones(Ho_centroids.shape[0])))\n centroids = np.matmul(np.linalg.inv(H), centroids).T\n for pt in centroids:\n pt[0] = int(pt[0]/pt[2])\n pt[1] = int(pt[1]/pt[2])\n centroids = centroids[:,:2]\n\n#finding saddle points around the centroids\n saddle_points = []\n for pt in centroids:\n img = I[int(pt[1]-ws):int(pt[1]+ws), int(pt[0]-ws):int(pt[0]+ws)]\n saddle = saddle_point(img)\n saddle = [saddle[0][0]+pt[0]-ws, saddle[1][0]+pt[1]-ws]\n saddle_points.append(saddle)\n\n saddle_points = np.array(saddle_points)\n #------------------\n print(saddle_points.T)\n return saddle_points.T" ]
[ "0.6203597", "0.5916464", "0.5894118", "0.5867515", "0.5715989", "0.56956524", "0.56905115", "0.5686345", "0.56403846", "0.55984086", "0.5590803", "0.5577823", "0.5559308", "0.5542196", "0.5525735", "0.55202436", "0.55189615", "0.55174667", "0.5481329", "0.5479639", "0.54680353", "0.5448406", "0.54449385", "0.5394492", "0.5389445", "0.53708994", "0.53643274", "0.5359837", "0.53582895", "0.53452563", "0.53446096", "0.5342537", "0.5342229", "0.53416127", "0.5330309", "0.5321422", "0.53083634", "0.52950096", "0.5288007", "0.5286932", "0.5283794", "0.5276441", "0.5273803", "0.5268442", "0.52652687", "0.52608323", "0.5253563", "0.5252607", "0.52508545", "0.5250511", "0.524654", "0.52464396", "0.52296376", "0.52270746", "0.5225984", "0.5213786", "0.52124536", "0.521023", "0.52078915", "0.52047116", "0.5202165", "0.5200002", "0.5199372", "0.519302", "0.5192227", "0.5167348", "0.5164294", "0.5163783", "0.5162571", "0.51602423", "0.51583916", "0.51523244", "0.5149355", "0.51384705", "0.5133454", "0.51320696", "0.5131829", "0.51299536", "0.51282793", "0.5128032", "0.51278335", "0.5126323", "0.5121947", "0.5117848", "0.51168174", "0.51156133", "0.5113494", "0.5107247", "0.5106238", "0.5105374", "0.5095095", "0.5093701", "0.50901765", "0.5087117", "0.5085116", "0.5084038", "0.5070339", "0.5063991", "0.50629747", "0.5062934" ]
0.699575
0
=========================================================== DateFormatedSQL(x) =========================================================== this function converts the the date read from a list to a datetime format
def DateFormatedSQL(x): x=[i[0] for i in x] x1=[] for i in x: if len(i)==19: x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(i[14:16]),int(i[17:18]) )) # elif len(i)==13: # x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(0),int(0) )) # else: # x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(0),int(0),int(0) )) # del i,x return x1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_format_from_input_to_datetime(list_d_t_t):\n data_output = []\n\n for row in list_d_t_t:\n data_output.append([datetime.datetime.strptime(row[0] + \" \" + row[1], \"%Y-%m-%d %H:%M:%S\"),\n datetime.datetime.strptime(row[0] + \" \" + row[2], \"%Y-%m-%d %H:%M:%S\")])\n\n return data_output", "def sql_date(date):\n return \"to_date('{}', 'dd.mm.yyyy')\".format(date)", "def DateFormated(x):\n \n x1=[]\n for i in x:\n if len(i)==19:\n x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(i[14:16]),int(i[17:18]) ))\n# elif len(i)==13:\n# x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(0),int(0) ))\n# else:\n# x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(0),int(0),int(0) ))\n# del i,x\n return x1", "def datetime_to_sql(connection, obj):\n return connection.string_literal(datetime_to_str(obj))", "def _to_date(self, x):\n if isinstance(x, datetime.datetime):\n return x.date()\n return x", "def get_date():\n temp = pd.read_sql_query(_query['date'], connect())\n return temp.values", "def to_date(d, date_format = \"%Y-%m-%d %H:%M:%S.%f\"):\n if type(d) == pd.core.series.Series:\n d = list(d)\n if type(d) == list:\n return [datetime.strptime(date,date_format) if type(date) == str else date for date in d]\n elif type(d) == str:\n return datetime.strptime(d,date_format)\n else:\n raise ValueError(\"Either String or list of Strings is accepted.\")", "def convert_datetime_objs(list_of_dates):\n datetime_list = []\n for date in list_of_dates:\n date_obj = datetime.datetime.strptime(date, '%d.%m.%Y')\n datetime_list.append(date_obj)\n return datetime_list", "def datetimefstr(date_list, datetimeformat, longdatetimeformat):\n try:\n # including year\n parts = longdatetimeformat.count(' ') + 1\n dtstring = ' '.join(date_list[0:parts])\n dtstart = datetime.strptime(dtstring, longdatetimeformat)\n for _ in range(parts):\n date_list.pop(0)\n except ValueError:\n # without year\n parts = datetimeformat.count(' ') + 1\n dtstring = ' '.join(date_list[0:parts])\n dtstart = datetime.strptime(dtstring, datetimeformat)\n if dtstart.timetuple()[0] == 1900:\n dtstart = datetime(date.today().timetuple()[0],\n *dtstart.timetuple()[1:5])\n # if start date lies in the past use next year\n #if dtstart < datetime.today():\n #dtstart = datetime(dtstart.timetuple()[0] + 1,\n #*dtstart.timetuple()[1:6])\n for _ in range(parts):\n date_list.pop(0)\n return dtstart", "def timefstr(date_list, timeformat):\n time_start = time.strptime(date_list[0], timeformat)\n time_start = dtime(*time_start[3:5])\n day_start = date.today()\n dtstart = datetime.combine(day_start, time_start)\n date_list.pop(0)\n return dtstart", "def fixDate(weatherRDDRecord):\n fieldList = weatherRDDRecord.split(\",\")\n fieldList = [i.replace('\\\"', '') for i in fieldList] #remove quotation marks\n fieldList[0] = fieldList[0].replace('-', '/')\n \n swapDateOrder = fieldList[0].split('/')\n fieldList[0] = swapDateOrder[2] + '/' + swapDateOrder[1] + '/' + swapDateOrder[0]\n \n return (fieldList[0],(fieldList[1:]))", "def convert_column_dates2str(self, info_in, output='list'):\n if hasattr(info_in, 'keys'):\n items = [(el, el) for el in self._columns.keys()]\n elif hasattr(info_in, '__getitem__'):\n items = [(ii, el) for ii,el in enumerate(self._columns.keys())]\n else:\n raise Exception('Only accepts dict, dict or list')\n \n if output == 'dict':\n return dict([(el1, self.date2str(info_in[el0])) if self.column_is_date[el1] else (el1, info_in[el0]) for el0, el1 in items])\n elif output == 'list':\n return [self.date2str(info_in[el0]) if self.column_is_date[el1] else info_in[el0] for el0, el1 in items]\n else:\n raise Exception('output type %s unkown'%output)", "def convert_date_string(df,col_name):\n df[col_name] = pd.to_datetime(df[col_name], infer_datetime_format=True)\n return df", "def build_date():\n def r(x):\n return tuple(ord(i) for i in x)\n return r", "def __call__(self, x: Sequence[datetime]) -> Sequence[str]:\n if self.tz is not None:\n x = [d.astimezone(self.tz) for d in x]\n return [d.strftime(self.fmt) for d in x]", "def sas_date_converter(row, base_date='1960-01-01'):\n if row is None:\n return row\n return datetime.strptime(base_date, '%Y-%m-%d') + timedelta(int(row))", "def _convert(frame):\n frame = frame.convert_objects(convert_numeric=True)\n for column in frame:\n if column in c.dates:\n frame[column] = frame[column].astype('datetime64')\n return frame", "def _handle_sql_types(value):\n if type(value) is datetime:\n return value.isoformat()\n return str(value)", "def date_trans_z(x):\n \"\"\"2017.01.09->2017/01/09 \"\"\"\n date_list=x.split('.')\n return date_list[0]+'/'+date_list[1]+'/'+date_list[2]", "def transform_datetimes(data: Any) -> Any:\n\n if isinstance(data, (datetime, date)):\n return data.isoformat()\n\n if isinstance(data, (list, tuple)):\n tmp_data = [transform_datetimes(elem) for elem in data]\n\n return tuple(tmp_data) if isinstance(data, tuple) else tmp_data\n\n if isinstance(data, dict):\n for key, val in data.items():\n data[key] = transform_datetimes(val)\n\n return data", "def get_dt_string(type_list):\n output = ''\n for entry in type_list:\n output = output+entry+'64,'\n return output[0:-1]", "def serialize(date):\n # From database to client\n\n # Convert date-object to datetime\n # See: https://stackoverflow.com/questions/1937622/convert-date-to-datetime-in-python\n dt = datetime.combine(date, datetime.min.time())\n date_format = \"%Y-%m-%d\"\n return datetime.strftime(dt, date_format)", "def convert_column_str2dates(self, info_in, output='list'):\n if hasattr(info_in, 'keys'):\n items = [(el, el) for el in self._columns.keys()]\n elif hasattr(info_in, '__getitem__'):\n items = [(ii, el) for el in enumerate(self._columns.keys())]\n else:\n raise Exception('Only accepts dict, dict or list')\n \n if output == 'dict':\n return dict([(el1, self.str2date(info_in[el0])) if self.column_is_date[el1] else (el1, info_in[el0]) for el0, el1 in items])\n elif output == 'list':\n return [self.str2date(info_in[el0]) if self.column_is_date[el1] else info_in[el0] for el0, el1 in items]\n else:\n raise Exception('output type %s unkown'%output)", "def join_date_strings(dates, separator=\"','\", df=\"%d-%m-%Y\"):\n return separator.join([x.strftime(df) for x in dates])", "def change_format_to_database_index(self, date):\n year = date[0:4] + ','\n month = date[4:6]\n day = date[6:8]\n if month[0] == '0':\n month = month[1]\n\n if day[0] == '0':\n day = day[1]\n\n day = ' ' + day + ','\n month = ' ' + month\n\n return year + day + month", "def dateToString(self, date_objs: list) -> list:\n date_strings = []\n try:\n if isinstance(date_objs, list) == False:\n return date_strings\n\n for date_obj in date_objs:\n if isinstance(date_obj, datetime) == False:\n continue\n date_strings.append(datetime.strftime(date_obj, '%d %b %Y'))\n\n return date_strings\n except Exception as e:\n logging.error(e)", "def datetimeify(t):\n if type(t) in [datetime, Timestamp]:\n return t\n fmts = ['%Y-%m-%d %H:%M:%S', '%Y-%m-%d', '%Y %m %d %H %M %S',]\n for fmt in fmts:\n try:\n return datetime.strptime(t, fmt)\n except ValueError:\n pass\n raise ValueError(\"time data '{:s}' not a recognized format\".format(t))", "def _process_date(self, data):\n def helper(val):\n # Sometime the date has a (1) or (2) following it. Strip that off\n # so that we can successful convert to date.\n s = val.find(\" (\")\n if s >= 0:\n val = val[0:s]\n dv = dt.datetime.strptime(val, '%A, %b %d')\n dv = dv.replace(year=self.start_date.year)\n return dv\n data['Date'] = data['Date'].apply(helper)\n return data", "def format_datetimes(self, datetimes, format=\"%B %d %Y %I:%M %p\"):\n date, times, space_character = datetimes.split(\", \")\n start_time, end_time = times.split(\" - \")\n year = datetime.now().strftime(\"%Y\")\n return (\n datetime.strptime(\n date + \" \" + year + \" \" + start_time.replace(\".\", \"\"), format\n ),\n datetime.strptime(\n date + \" \" + year + \" \" + end_time.replace(\".\", \"\"), format\n ),\n )", "def _date_to_string(v):\n\n if not isinstance(v,(list,tuple)):\n raise InstrumentParameterException('Value %s is not a list, tuple.' % str(v))\n \n if not len(v)==3:\n raise InstrumentParameterException('Value %s is not length 3.' % str(v))\n \n months = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep',\n 'Oct','Nov','Dec']\n day = v[0]\n month = v[1]\n year = v[2]\n \n if len(str(year)) > 2:\n year = int(str(year)[-2:])\n \n if not isinstance(day,int) or day < 1 or day > 31:\n raise InstrumentParameterException('Value %s is not a day of month.' % str(day))\n \n if not isinstance(month,int) or month < 1 or month > 12:\n raise InstrumentParameterException('Value %s is not a month.' % str(month))\n\n if not isinstance(year,int) or year < 0 or year > 99:\n raise InstrumentParameterException('Value %s is not a 0-99 year.' % str(year))\n \n return '%02i-%s-%02i' % (day,months[month-1],year)", "def dates_as_strings(self, dates):\n\n return [i.strftime('%Y-%m-%d') for i in dates]", "def date_to_operate_format(self, date):\n date = date.replace(\" \", \"\")\n date = date.split(',')\n day = date[1]\n month = date[2]\n\n day = self.check_and_repair_right_format(day)\n month = self.check_and_repair_right_format(month)\n\n right_format = date[0] + month + day\n return right_format", "def fix_dates(self, row):\r\n for field in self.date_fields:\r\n if field in row:\r\n if not type(row[field]) is datetime:\r\n try:\r\n row[field] = datetime.fromtimestamp(float(row[field]))\r\n except Exception as e:\r\n row[field] = None", "def calculate_date(x, now):\n\t#now = datetime.datetime.now()\n\tn = int(extract_only_number(x))\n\tif n > 0:\n\t\treturn (now - datetime.timedelta(n)).strftime(\"%d-%m-%Y\")\n\treturn now.strftime(\"%d-%m-%Y\")", "def date_to_list(data_index):\n results = []\n for row in data:\n results.append(datetime.strptime(row[data_index], '%Y-%m-%d'))\n return results", "def modis_to_from_pydatetime(date):\n \n if isinstance(date, (str, unicode)): \n return dt.datetime.strptime(date[1:], '%Y%j').date()\n return dt.datetime.strftime(date, 'A%Y%j')", "def date_trans_x(x):\n \"\"\"2017.01.09->2017.1.09 \"\"\"\n date_list=x.split('.')\n return date_list[0]+'.'+str(int(date_list[1]))+'.'+date_list[2]", "def sql(df_list: List[pd.DataFrame], query: str):\n # TODO [#8]: add example in docs for sql\n\n # Pandasql looks up tables by names given in query. Here we are passed a list of dfs without names.\n # Therefore we need to extract the names of the tables from the query, then assign\n # those names to the dfs in df_list in the locals dictionary.\n table_names = _extract_table_names_from_sql(query)\n for i, name in enumerate(table_names):\n locals().update({name: df_list[i]})\n\n # Get date variable column names\n datevars: List[str] = []\n for d in df_list:\n datevars += _get_datetime_cols(d)\n datevars = list(set(datevars)) # remove duplicates\n\n merged = PandaSQL()(query)\n\n # Convert back to datetime\n for date in [d for d in datevars if d in merged.columns]:\n merged[date] = pd.to_datetime(merged[date])\n return merged", "def reformatdate(self, date):\n# print('DATE', self.__str__())\n if 'dummy' in date:\n return '1970_01_01'\n# datesplit = date.split('/')\n datesplit = date.split('-') # Really? This had to be changed?!\n# print('DATE', date, datesplit)\n\n # dates used to be as follows\n# month = datesplit[0]\n# day = datesplit[1]\n# year = datesplit[2]\n\n # dates as of 12 June 2018 now done this way\n year = datesplit[0]\n month = datesplit[1]\n day = datesplit[2]\n\n return year + '_' + month + '_' + day", "def format_date(self, date_val):\n try:\n if type(date_val) is not datetime:\n d = date.fromisoformat(date_val[0:10])\n else:\n d = date_val\n return d.strftime('%Y-%m-%d')\n except Exception as e:\n self.error((str(e)))", "def convert_date(raw_date):\n if raw_date:\n date = datetime.strptime(raw_date, \"%Y-%m-%d\")\n return date.strftime(\"%m/%d/%YZ\")", "def format_date_sortkey(self, data):\n return self.input['start_date'].date().strftime('%Y%m%d')", "def convertSODate(datenum):\n #Date numbers seem to start with 0 = 2001-01-01\n base_date = datetime.date(2001, 1, 1)\n #add key from the spot on object to this base date to get the date\n record_date = base_date + datetime.timedelta(days=int(datenum))\n record_date = record_date.isoformat()\n return record_date", "def examples():\n lst = ['today +7d', '11:20 +4d', '2014-02-15 +1w', 'jan 10', \\\n 'jan 10 2pm', '10 jan at 10am', 'now-3d', '+7d', '7', '11:15',\\\n '11:15p', '11p', 'aug', 'jan 5', 'aug 2019', 'now', \"tomorrow 2pm\",\\\n \"now +4h\", 'today + 1w', '1w', '+3y', 'w', '1w']\n for tpl in zip(lst, [phrase_to_datetime(str_e) for str_e in lst]):\n print tpl", "def _reduce_datetimes(row):\n\n row = list(row)\n\n for i, val in enumerate(row):\n if hasattr(val, \"strftime\"):\n row[i] = val.strftime(\"%Y-%m-%d %H:%M:%S\")\n elif hasattr(val, 'isoformat'):\n row[i] = val.isoformat()\n return tuple(row)", "def to_datetime(*dts):\n return tuple([pd.to_datetime(x) for x in dts])", "def dt64_to_dt(dt64_list):\n\n from datetime import datetime\n ns = 1e-9\n dt_list = []\n for dt64 in dt64_list:\n dt_list.append(datetime.utcfromtimestamp(dt64.astype(int) * ns))\n\n return dt_list", "def singular_sql_dates(master_table, dates, date_col):\n\n dts = [parse_date(x) for x in dates]\n\n # Map the specific dates to the specific years isomg a default dict\n years_dict = defaultdict(list)\n for dt in dts:\n years_dict[dt.year].append(dt)\n\n # Iterate through each of the years and add the trading dates belonging\n # to each query to a specific query, yield this SQL string as a generator\n for year in years_dict:\n # Set up a custom string and then place it inside brackets for\n # Use in the query\n strings = join_date_strings(years_dict[year])\n date_string = \"('%s')\" % strings\n\n # The base query string to use\n query_string = \"\"\" SELECT * FROM %s_%s WHERE %s in %s\"\"\"\n\n # Substitute the values into the string\n SQL = query_string % (master_table, year, date_col, date_string)\n\n yield SQL", "def _reformat_date_jan_1999():\n reader = csv.reader(open(\"temperatures_1999.csv\"), delimiter=\";\")\n for (day, month, temp) in reader:\n date = datetime.datetime.strptime(\"-\".join([\"1999\", month, day]), \n \"%Y-%m-%d\")\n print \"%s; %s\" % (date.strftime(\"%Y-%m-%d\"), temp)", "def reformat_date(all_data, min_date):\n all_data[\"date\"] = [datetime.timedelta(x) for x in all_data[\"date\"]]\n all_data[\"date\"] = all_data[\"date\"] + min_date", "def timedelta_to_sql(connection, obj):\n return connection.string_literal(timedelta_to_str(obj))", "def make_date(cls, df: pd.DataFrame, date_field: str) -> pd.DataFrame:\n field_dtype = df[date_field].dtype\n if isinstance(field_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):\n field_dtype = np.datetime64\n if not np.issubdtype(field_dtype, np.datetime64):\n df[date_field] = pd.to_datetime(df[date_field], infer_datetime_format=True)\n return df", "def pretty_date(date):\r\n if not type(date) == datetime:\r\n raise ValueError('the mistake of time format :(')\r\n how_long_days = (NOW - date).days\r\n if how_long_days < 0:\r\n raise ValueError('back to the future :)')\r\n if how_long_days >= 2:\r\n return date.strftime('%m/%d/%y')\r\n how_long_seconds = (NOW - date).seconds + how_long_days * DAY\r\n for time_offset in TIME_OFFSETS:\r\n if how_long_seconds < time_offset.offset:\r\n if time_offset.divider:\r\n how_long_seconds = int(how_long_seconds // time_offset.divider)\r\n result = time_offset.date_str\r\n if '{}' in time_offset.date_str:\r\n result = result.format(how_long_seconds)\r\n break\r\n return result", "def _reformat_date(exp_dates):\n time_fmt = \"%Y-%m-%dT%H:%M:%S\"\n wrong_time_fmt = \"%Y-%m-%d %H:%M:%S\"\n if exp_dates == 'NN':\n return exp_dates\n if exp_dates != 'NN' and not isinstance(exp_dates, list):\n try:\n datetime.strptime(exp_dates, time_fmt)\n except ValueError:\n try:\n exp_dates = datetime.strptime(exp_dates,\n wrong_time_fmt).strftime(time_fmt)\n except ValueError:\n exp_dates = datetime.strptime(exp_dates,\n \"%m/%d/20 %H:%M\").strftime(time_fmt)\n\n if exp_dates != 'NN' and isinstance(exp_dates, list):\n try:\n datetime.strptime(exp_dates[0], time_fmt)\n except ValueError:\n exp_dates = [datetime.strptime(c, wrong_time_fmt).strftime(time_fmt)\n for c in exp_dates]\n\n return exp_dates", "def fix_dates(df, column=None):\n if isinstance(column, list):\n for x in column:\n df[x] = pd.to_datetime(df[x], errors='coerce')\n df[x] = df[x].dt.strftime('%m-%d-%Y')\n df[x].replace('NaT', np.nan, inplace=True)\n return df\n else:\n df[column] = pd.to_datetime(df[column], errors='coerce')\n df[column] = df[column].dt.strftime('%m-%d-%Y')\n df[column].replace('NaT', np.nan, inplace=True)\n return df", "def date_to_python(self, value):\r\n # this throws away fractions of a second\r\n return datetime(*strptime(value[:-5], \"%Y-%m-%dT%H:%M:%S\")[0:6])", "def df_multicolumn_date_to_datetime(row):\n year = row['arrival_date_year']\n month = row['arrival_date_month']\n day = row['arrival_date_day_of_month']\n # create datetime object from string of form \"YearMonthDay\" using full month name\n return datetime.datetime.strptime(f\"{year}{month}{day}\", '%Y%B%d').date()", "def dump_datetime(value):\n if value is None:\n return\n return [value.strftime(\"%Y-%m-%d\"), value.strftime(\"%H:%M:%S\")]", "def orm2date(value, tformat=ORM_DATE_FORMAT, default=None):\n if not value:\n return default\n return datetime.strptime(value, tformat).date()", "def _get_date(self):\n for fax_in in self:\n DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n date_planned = False\n if fax_in.date:\n from_dt = datetime.datetime.strptime(str(fax_in.date[:19]), DATETIME_FORMAT)\n # from_dt = from_dt + datetime.timedelta(hours=5 , minutes=30)\n date_planned = from_dt.strftime('%Y-%m-%d')\n fax_in.fax_date = date_planned", "def txfDate(date):\n return date.strftime('%m/%d/%Y')", "def convert_date_of_attendance(attendance):\n if isinstance(attendance,list):\n for a in attendance:\n a.date_of_att = datetime.datetime.strptime(a.DATE_OF_ATTENDANCE,'%d/%m/%Y').date()\n elif isinstance(attendance,models.AttendanceModel):\n attendance.date_of_att = datetime.datetime.strptime\\\n (attendance.DATE_OF_ATTENDANCE, '%d/%m/%Y').date()", "def _tr_cal_date(self, date):\n items = []\n for code in self._datefmt:\n if code == 'Y':\n items += [date.year_str]\n elif code == 'M':\n if '/' in self._datefmt or '.' in self._datefmt:\n month = date.month_num\n if month is not None:\n month = \"{:02d}\".format(month)\n else:\n month = self._monthName(date.month)\n if month is not None:\n items += [month]\n elif code == 'D':\n day = date.day\n if day is not None and ',' in self._datefmt:\n items += [str(\"{:02d},\".format(day))]\n elif day is not None:\n items += [\"{:02d}\".format(day)]\n if '/' in self._datefmt:\n sep = '/'\n elif '.' in self._datefmt:\n sep = '.'\n elif '-' in self._datefmt:\n sep = '-'\n else:\n sep = ' '\n return sep.join(items)", "def wsq_to_txt(table_name, date):\n\tif(table_name == 'visited'):\n\t\tquery = \"select date(f_date_time), substr(dayname(f_date_time),1,2), \" + \\\n\t\t\t\"f_lang_id, f_ns_id, count(*) \" + \\\n\t\t\t\"from Filtered where f_action_id is null \" + \\\n\t\t\t\"and date(f_date_time) = '\" + date.strftime('%Y-%m-%d') + \"' \" + \\\n\t\t\t\"group by date(f_date_time), f_lang_id, f_ns_id;\"\n\telif(table_name == 'saved'):\n\t\tquery = \"select date(f_date_time), substr(dayname(f_date_time),1,2), \" + \\\n\t\t\t\"f_lang_id, f_ns_id, count(*) \" + \\\n\t\t\t\"from Filtered where f_action_id = 2 \" + \\\n\t\t\t\"and date(f_date_time) = '\" + date.strftime('%Y-%m-%d') + \"' \" + \\\n\t\t\t\"group by date(f_date_time), f_lang_id, f_ns_id;\"\n\telif(table_name == 'actions'):\n\t\tquery = \"select date(f_date_time), substr(dayname(f_date_time),1,2), \" + \\\n\t\t\t\"f_action_id, f_lang_id, f_ns_id, count(*) \" + \\\n\t\t\t\"from Filtered where f_action_id in (0, 1, 3, 4) \" + \\\n\t\t\t\"and date(f_date_time) = '\" + date.strftime('%Y-%m-%d') + \"' \" + \\\n\t\t\t\"group by date(f_date_time), f_action_id, f_lang_id, f_ns_id;\"\n\n\tlog_msg4(\"Creando dump para \" + table_name)\n\n\texec_mysql(getConfig().db_name_squidlogs, query=query, dumped=True)\n\n\tlog_msg_ok4()", "def convert_date(date):\n\n if len(date) > 10: date = date[:date.rfind(\"-\")]\n return convf(date)", "def format_datetime(dt):\r\n return dateformat.format(make_naive(dt), 'r')", "def _try_date(set_list, index, nested_dict, dict_keys=[], try_func=_try_set):\n import datetime\n try:\n dt = try_func(None, None, nested_dict, dict_keys) # 2012-07-05T00:00:00+04:00\n dt = datetime.datetime.strptime(dt, \"%Y-%m-%dT%H:%M:%S%z\")\n try_func(set_list, index, str(dt.date()))\n print(str(dt.date())+\" sdfsdfsdf\")\n return dt.date() # Дата присвоения кадастрового номера\n except:\n return ''", "def format_datetime(self, data):\r\n data = make_naive(data)\r\n if self.datetime_formatting == 'rfc-2822':\r\n return format_datetime(data)\r\n\r\n return data.isoformat()", "def _format_value_date_32A(self, val):\n value_date = val.get('value_date')\n currency = val.get('currency')\n interbank_settled_amount = val.get('interbank_settled_amount')\n date_format = '%y%m%d'\n if value_date and currency and interbank_settled_amount:\n value_date = FSwiftWriterUtils.format_date(value_date, date_format)\n interbank_settled_amount = apply_currency_precision(currency, abs(float(interbank_settled_amount)))\n val = str(value_date) + str(currency) + str(FSwiftMLUtils.float_to_swiftmt(str(interbank_settled_amount)))\n return val", "def dformat(val):\n if isinstance(val, datetime):\n return val.strftime(DATE_FORMAT)", "def datemake(datestring):\n return dtt.datetime.strptime(datestring,'%m/%d/%Y')", "def translate_dates(dates):\r\n formatted_dates = list()\r\n year = dt.today().year\r\n for dat in dates:\r\n if dat == '':\r\n continue\r\n day = dat[:2]\r\n mont = dat[6:]\r\n if int(day) < 10:\r\n day = '0' + day[1]\r\n if mont != '':\r\n # Month from Comuniazo\r\n month = \\\r\n {'enero': '01', 'febrero': '02', 'marzo': '03', 'abril': '04',\r\n 'mayo': '05', 'junio': '06', 'julio': '07', 'agosto': '08',\r\n 'septiembre': '09', 'octubre': '10', 'noviembre': '11', 'diciembre': '12'}[mont]\r\n else:\r\n # Month from Comunio\r\n month = dat[3:5]\r\n\r\n if month + day == '0101' or (formatted_dates and int(month) > formatted_dates[-1].month):\r\n # One year less\r\n year -= 1\r\n\r\n p_date = datetime.strptime('%s-%s-%s' % (year, month, day), \"%Y-%m-%d\").date()\r\n formatted_dates.append(p_date)\r\n return formatted_dates", "def filter_simple_date(value: datetime) -> str:\n return value.strftime(\"%Y-%m-%d\")", "def __replaceDate(self, hql, date):\n #%%escapa\n hql = hql.replace(\"<date>\", date).replace('%', '%%')\n # gerp date-n\n #Re = re.compile(r'<date\\s*([-+]\\s*\\d+)')\n Re = re.compile(r'<date\\s*([-+]\\s*\\d+)\\|?(\\S*?\\s*\\S*?)>')\n l = Re.findall(hql)\n if not l:\n return hql\n\n l = map(lambda x: (int(x[0]), x[1]), l)\n for x in l:\n if x[1]:\n f = ''.join(\n map(lambda c: '%' + c if re.match('^[A-Za-z]', c) else c, x[1]))\n else:\n f = '%Y%m%d'\n stamp = int(time.mktime(time.strptime(\n date, '%Y%m%d'))) + 86400 * x[0]\n\n match = Re.search(hql)\n if not match:\n continue\n\n # replace <date-n|[Ymd]> to specific time.\n sdate = time.strftime(f, time.localtime(stamp))\n hql = hql.replace(match.group(), str(sdate))\n\n return hql", "def string_to_datetime(dataframe):\n\n print(\"Which variable would you like to convert from a date string to a python date?\")\n existing_variable = input()\n print(\"What would you like to call the new date variable?\")\n new_variable = input()\n\n dataframe[new_variable] = dataframe[existing_variable].dt.strftime('%Y-%m-%d')\n\n return dataframe", "def date_parser(dates):\n\n #splitting the dates(containing datetime data) list and returning only the datetime\n return([item.split()[0] for item in dates])\n pass", "def convert_date_to_string(date_input):\n if isinstance(date_input, date):\n return date_input.strftime(\"%Y-%m-%d\")\n else:\n raise TypeError(\"Input {0} is not a date object\".format(type(date_input)))", "def get_dates(raw_table) -> \"list of dates\":\n dates = []\n found_first = False\n for i, dstr in enumerate([raw_table[i][0] for i in range(0, len(raw_table))]):\n if dstr:\n if len(dstr.split(\"/\")) == 3:\n d = datetime.datetime.strptime(dstr, '%m/%d/%Y')\n elif len(dstr.split(\"-\")) == 3:\n d = datetime.datetime.strptime(dstr, '%Y-%m-%d')\n else:\n # Not necessarily an error, could just be a non-date cell\n logging.debug(\"unknown date-format: {}\".format(dstr))\n continue\n dates.append(d)\n if not found_first:\n found_first = True\n logging.debug(\"Found first date: '{}' at i: {}\".format(d.isoformat(), i))\n elif found_first:\n logging.debug(\"Last date: {}\".format(d))\n break\n return dates", "def _format_list_for_query(input_list):\n return (\n \", \".join(input_list).replace(\" \", \"\").replace(\"'\", \"\").replace(\",\", \"%2C\")\n )", "def encode_datetime(self, datetime_obj):\n if isinstance(datetime_obj, np.datetime64):\n datetime_obj = pd.Timestamp(datetime_obj).to_pydatetime()\n\n if isinstance(datetime_obj, dt.datetime):\n return datetime_obj.strftime(\"%Y%m%dT%H%M%S\")\n elif isinstance(datetime_obj, dt.date):\n return datetime_obj.strftime(\"%Y%m%d\")\n else:\n return datetime_obj", "def sqllist(lst):\n if isinstance(lst, basestring): \n return lst\n else:\n return ', '.join(lst)", "def transform_date(date):\n if type(date) == str:\n return date\n\n formatted_string = date.strftime(\"%d/%m/%Y\")\n\n return formatted_string", "def convert_date_column(datestring):\n return datetime.datetime.strptime(datestring.strip(), \"%b-%Y\").date()", "def datefixer(ds):\n\n\n\t# ========== create the new dates ==========\n\tyear = ds.Year\n\n\t# +++++ set up the list of dates +++++\n\tdates = OrderedDict()\n\ttm = [dt.datetime(int(year) , int(np.floor(tm)), int(tm%1*30+1)) for tm in ds.time]\n\tdates[\"time\"] = pd.to_datetime(tm)\n\n\tdates[\"calendar\"] = 'standard'\n\tdates[\"units\"] = 'days since 1900-01-01 00:00'\n\t\n\tdates[\"CFTime\"] = date2num(\n\t\ttm, calendar=dates[\"calendar\"], units=dates[\"units\"])\n\n\treturn dates", "def format_date(self, data):\r\n if self.datetime_formatting == 'rfc-2822':\r\n return format_date(data)\r\n\r\n return data.isoformat()", "def parse_date_and_time(row):\n datetime_regex = re.compile(r'\\d{8}T\\d{4}')\n date_and_time = datetime_regex.search(row)\n return '\\'' + date_and_time.group() + '\\''", "def construct_date_list(start=None, num=1):\n if not start:\n start = time.strftime(\"%Y%m%d\", time.gmtime(time.time() - num * 60 * 60 * 24))\n\n elif len(start) != 8:\n raise Exception(\"Date is not in expected format!\")\n\n startdatetime = datetime.datetime.strptime(start, '%Y%m%d')\n\n datelist = [startdatetime + datetime.timedelta(days=i) for i in range(0, num)]\n return [date.strftime('%Y%m%d') for date in datelist]", "def _datetime_to_query(dt):\n timestamp = timegm(dt.timetuple())\n return u\"{whole}.{part:06d}\".format(\n whole=timestamp,\n part=dt.microsecond,\n )", "def dump_datetime(value):\n if value is None:\n return None\n return [value.strftime(\"%Y-%m-%d\"), value.strftime(\"%H:%M:%S\")]", "def dump_datetime(value):\n if value is None:\n return None\n return [value.strftime(\"%Y-%m-%d\"), value.strftime(\"%H:%M:%S\")]", "def dump_datetime(value):\n if value is None:\n return None\n return [value.strftime(\"%Y-%m-%d\"), value.strftime(\"%H:%M:%S\")]", "def dump_datetime(value):\n if value is None:\n return None\n return [value.strftime(\"%Y-%m-%d\"), value.strftime(\"%H:%M:%S\")]", "def convert_date_time(self, dt):\n return datetime.fromtimestamp(dt).strftime(\"%Y-%m-%d\")", "def get_datetime_xs(df):\n\n # check the column exists\n if 'date' not in df.columns:\n raise RuntimeError(\"Couldn't find column 'date' in input df\")\n\n # safely read date column and convert to datetime objects\n try:\n xs = [datetime.datetime.strptime(d, '%Y-%m-%d').date() for d in df.date]\n except:\n # if the time series has been resampled the index is a TimeStamp object\n xs = [datetime.datetime.strptime(d._date_repr, '%Y-%m-%d').date() for d in df.date]\n\n return xs", "def format_date(d):\n if type(d) == str:\n d = dateutil_parse(d)\n return d.isoformat()", "def format_date(d):\r\n # workaround because Django's dateformat utility requires a datetime\r\n # object (not just date)\r\n dt = aware_datetime(d.year, d.month, d.day, 0, 0, 0)\r\n return dateformat.format(dt, 'j M Y')", "def numeric_date_recover(self):\n \n sp_time_zone, current_datetime = self.setup_datetime() \n converter2sptimezone = current_datetime.astimezone(sp_time_zone)\n \n return converter2sptimezone.strftime('%d-%m-%Y')", "def _get_date(self):\n for fax_out in self:\n DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n date_planned = False\n if fax_out.date:\n from_dt = datetime.datetime.strptime(str(fax_out.date[:19]), DATETIME_FORMAT)\n # from_dt = from_dt + datetime.timedelta(hours=5 , minutes=30)\n date_planned = from_dt.strftime('%Y-%m-%d')\n fax_out.fax_date = date_planned", "def get_datetime(df, col_name, dayfirst=False, yearfirst=False, use_format=False):\n if use_format:\n format = \"%d/%m/%Y\"\n return pd.to_datetime(df[col_name], dayfirst=dayfirst, yearfirst=yearfirst, format=format)", "def dateTimeConvert(date):\n dateTimeConvert = datetime.strftime(date ,'%d/%m/%Y %H:%M:%S')\n #Format -> d/m/Y H:M:S\n return dateTimeConvert" ]
[ "0.66734904", "0.65000284", "0.6259414", "0.59757656", "0.5600508", "0.5579302", "0.5578522", "0.5551475", "0.5513122", "0.54512274", "0.5435365", "0.52705914", "0.52298", "0.5214014", "0.5199284", "0.51939476", "0.5177129", "0.5144611", "0.51139647", "0.5111645", "0.5084982", "0.50844026", "0.50539654", "0.50317484", "0.5007062", "0.50055915", "0.5004682", "0.5003799", "0.49934232", "0.494639", "0.4941801", "0.49395546", "0.49319535", "0.49146414", "0.49083698", "0.48996976", "0.4896751", "0.489572", "0.4893967", "0.4892776", "0.48845044", "0.48837423", "0.48725617", "0.4872267", "0.48578805", "0.4848264", "0.48460853", "0.48317948", "0.48312882", "0.4831218", "0.48292738", "0.48240146", "0.4820597", "0.48179454", "0.48131344", "0.4812471", "0.48089874", "0.4803788", "0.4799662", "0.47903845", "0.4787351", "0.47846282", "0.47812128", "0.4771975", "0.4771897", "0.4769851", "0.4765369", "0.4758621", "0.47549957", "0.4750448", "0.47488025", "0.47460482", "0.47424024", "0.47404724", "0.47345945", "0.47286168", "0.47206166", "0.47182706", "0.47162825", "0.47155768", "0.4714964", "0.47103804", "0.47077122", "0.47053674", "0.470376", "0.47031957", "0.47007662", "0.4700221", "0.46952316", "0.46952316", "0.46952316", "0.46952316", "0.4693278", "0.46930197", "0.46915612", "0.46887812", "0.468706", "0.46755534", "0.46699256", "0.4665158" ]
0.7940835
0
=========================================================== dateformated(x) =========================================================== this function converts the the date read from a list to a datetime format
def DateFormated(x): x1=[] for i in x: if len(i)==19: x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(i[14:16]),int(i[17:18]) )) # elif len(i)==13: # x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(0),int(0) )) # else: # x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(0),int(0),int(0) )) # del i,x return x1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_format_from_input_to_datetime(list_d_t_t):\n data_output = []\n\n for row in list_d_t_t:\n data_output.append([datetime.datetime.strptime(row[0] + \" \" + row[1], \"%Y-%m-%d %H:%M:%S\"),\n datetime.datetime.strptime(row[0] + \" \" + row[2], \"%Y-%m-%d %H:%M:%S\")])\n\n return data_output", "def DateFormatedSQL(x):\n x=[i[0] for i in x]\n \n x1=[]\n for i in x:\n if len(i)==19:\n x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(i[14:16]),int(i[17:18]) ))\n# elif len(i)==13:\n# x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(0),int(0) ))\n# else:\n# x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(0),int(0),int(0) ))\n# del i,x\n return x1", "def convert_datetime_objs(list_of_dates):\n datetime_list = []\n for date in list_of_dates:\n date_obj = datetime.datetime.strptime(date, '%d.%m.%Y')\n datetime_list.append(date_obj)\n return datetime_list", "def datetimefstr(date_list, datetimeformat, longdatetimeformat):\n try:\n # including year\n parts = longdatetimeformat.count(' ') + 1\n dtstring = ' '.join(date_list[0:parts])\n dtstart = datetime.strptime(dtstring, longdatetimeformat)\n for _ in range(parts):\n date_list.pop(0)\n except ValueError:\n # without year\n parts = datetimeformat.count(' ') + 1\n dtstring = ' '.join(date_list[0:parts])\n dtstart = datetime.strptime(dtstring, datetimeformat)\n if dtstart.timetuple()[0] == 1900:\n dtstart = datetime(date.today().timetuple()[0],\n *dtstart.timetuple()[1:5])\n # if start date lies in the past use next year\n #if dtstart < datetime.today():\n #dtstart = datetime(dtstart.timetuple()[0] + 1,\n #*dtstart.timetuple()[1:6])\n for _ in range(parts):\n date_list.pop(0)\n return dtstart", "def to_date(d, date_format = \"%Y-%m-%d %H:%M:%S.%f\"):\n if type(d) == pd.core.series.Series:\n d = list(d)\n if type(d) == list:\n return [datetime.strptime(date,date_format) if type(date) == str else date for date in d]\n elif type(d) == str:\n return datetime.strptime(d,date_format)\n else:\n raise ValueError(\"Either String or list of Strings is accepted.\")", "def timefstr(date_list, timeformat):\n time_start = time.strptime(date_list[0], timeformat)\n time_start = dtime(*time_start[3:5])\n day_start = date.today()\n dtstart = datetime.combine(day_start, time_start)\n date_list.pop(0)\n return dtstart", "def _to_date(self, x):\n if isinstance(x, datetime.datetime):\n return x.date()\n return x", "def _reformat_date(exp_dates):\n time_fmt = \"%Y-%m-%dT%H:%M:%S\"\n wrong_time_fmt = \"%Y-%m-%d %H:%M:%S\"\n if exp_dates == 'NN':\n return exp_dates\n if exp_dates != 'NN' and not isinstance(exp_dates, list):\n try:\n datetime.strptime(exp_dates, time_fmt)\n except ValueError:\n try:\n exp_dates = datetime.strptime(exp_dates,\n wrong_time_fmt).strftime(time_fmt)\n except ValueError:\n exp_dates = datetime.strptime(exp_dates,\n \"%m/%d/20 %H:%M\").strftime(time_fmt)\n\n if exp_dates != 'NN' and isinstance(exp_dates, list):\n try:\n datetime.strptime(exp_dates[0], time_fmt)\n except ValueError:\n exp_dates = [datetime.strptime(c, wrong_time_fmt).strftime(time_fmt)\n for c in exp_dates]\n\n return exp_dates", "def date_parser(dates):\n\n #splitting the dates(containing datetime data) list and returning only the datetime\n return([item.split()[0] for item in dates])\n pass", "def date_trans_z(x):\n \"\"\"2017.01.09->2017/01/09 \"\"\"\n date_list=x.split('.')\n return date_list[0]+'/'+date_list[1]+'/'+date_list[2]", "def reformatdate(self, date):\n# print('DATE', self.__str__())\n if 'dummy' in date:\n return '1970_01_01'\n# datesplit = date.split('/')\n datesplit = date.split('-') # Really? This had to be changed?!\n# print('DATE', date, datesplit)\n\n # dates used to be as follows\n# month = datesplit[0]\n# day = datesplit[1]\n# year = datesplit[2]\n\n # dates as of 12 June 2018 now done this way\n year = datesplit[0]\n month = datesplit[1]\n day = datesplit[2]\n\n return year + '_' + month + '_' + day", "def _tr_cal_date(self, date):\n items = []\n for code in self._datefmt:\n if code == 'Y':\n items += [date.year_str]\n elif code == 'M':\n if '/' in self._datefmt or '.' in self._datefmt:\n month = date.month_num\n if month is not None:\n month = \"{:02d}\".format(month)\n else:\n month = self._monthName(date.month)\n if month is not None:\n items += [month]\n elif code == 'D':\n day = date.day\n if day is not None and ',' in self._datefmt:\n items += [str(\"{:02d},\".format(day))]\n elif day is not None:\n items += [\"{:02d}\".format(day)]\n if '/' in self._datefmt:\n sep = '/'\n elif '.' in self._datefmt:\n sep = '.'\n elif '-' in self._datefmt:\n sep = '-'\n else:\n sep = ' '\n return sep.join(items)", "def date_to_operate_format(self, date):\n date = date.replace(\" \", \"\")\n date = date.split(',')\n day = date[1]\n month = date[2]\n\n day = self.check_and_repair_right_format(day)\n month = self.check_and_repair_right_format(month)\n\n right_format = date[0] + month + day\n return right_format", "def datemake(datestring):\n return dtt.datetime.strptime(datestring,'%m/%d/%Y')", "def txfDate(date):\n return date.strftime('%m/%d/%Y')", "def build_date():\n def r(x):\n return tuple(ord(i) for i in x)\n return r", "def translate_dates(dates):\r\n formatted_dates = list()\r\n year = dt.today().year\r\n for dat in dates:\r\n if dat == '':\r\n continue\r\n day = dat[:2]\r\n mont = dat[6:]\r\n if int(day) < 10:\r\n day = '0' + day[1]\r\n if mont != '':\r\n # Month from Comuniazo\r\n month = \\\r\n {'enero': '01', 'febrero': '02', 'marzo': '03', 'abril': '04',\r\n 'mayo': '05', 'junio': '06', 'julio': '07', 'agosto': '08',\r\n 'septiembre': '09', 'octubre': '10', 'noviembre': '11', 'diciembre': '12'}[mont]\r\n else:\r\n # Month from Comunio\r\n month = dat[3:5]\r\n\r\n if month + day == '0101' or (formatted_dates and int(month) > formatted_dates[-1].month):\r\n # One year less\r\n year -= 1\r\n\r\n p_date = datetime.strptime('%s-%s-%s' % (year, month, day), \"%Y-%m-%d\").date()\r\n formatted_dates.append(p_date)\r\n return formatted_dates", "def date_trans_x(x):\n \"\"\"2017.01.09->2017.1.09 \"\"\"\n date_list=x.split('.')\n return date_list[0]+'.'+str(int(date_list[1]))+'.'+date_list[2]", "def buildDate(date):\n parts = date.split(\"-\")\n yDate = parts[1] + \" \" + parts[2] + ', ' + parts[0]\n return yDate", "def convert_date_of_attendance(attendance):\n if isinstance(attendance,list):\n for a in attendance:\n a.date_of_att = datetime.datetime.strptime(a.DATE_OF_ATTENDANCE,'%d/%m/%Y').date()\n elif isinstance(attendance,models.AttendanceModel):\n attendance.date_of_att = datetime.datetime.strptime\\\n (attendance.DATE_OF_ATTENDANCE, '%d/%m/%Y').date()", "def convert_date(raw_date):\n if raw_date:\n date = datetime.strptime(raw_date, \"%Y-%m-%d\")\n return date.strftime(\"%m/%d/%YZ\")", "def trost2date(trost_date):\n year, month, day = (int(val) for val in trost_date.split('-'))\n return datetime.date(year, month, day)", "def datetimeify(t):\n if type(t) in [datetime, Timestamp]:\n return t\n fmts = ['%Y-%m-%d %H:%M:%S', '%Y-%m-%d', '%Y %m %d %H %M %S',]\n for fmt in fmts:\n try:\n return datetime.strptime(t, fmt)\n except ValueError:\n pass\n raise ValueError(\"time data '{:s}' not a recognized format\".format(t))", "def _reformat_date_jan_1999():\n reader = csv.reader(open(\"temperatures_1999.csv\"), delimiter=\";\")\n for (day, month, temp) in reader:\n date = datetime.datetime.strptime(\"-\".join([\"1999\", month, day]), \n \"%Y-%m-%d\")\n print \"%s; %s\" % (date.strftime(\"%Y-%m-%d\"), temp)", "def format_datetime(self, data):\r\n data = make_naive(data)\r\n if self.datetime_formatting == 'rfc-2822':\r\n return format_datetime(data)\r\n\r\n return data.isoformat()", "def format_date(self, data):\r\n if self.datetime_formatting == 'rfc-2822':\r\n return format_date(data)\r\n\r\n return data.isoformat()", "def convertDate(indate):\n a = datetime.datetime.fromtimestamp(indate / 1000.0)\n a_str = a.strftime('%m/%d/%y')\n return datetime.datetime.strptime(a_str, '%m/%d/%y').date()", "def fixDate(weatherRDDRecord):\n fieldList = weatherRDDRecord.split(\",\")\n fieldList = [i.replace('\\\"', '') for i in fieldList] #remove quotation marks\n fieldList[0] = fieldList[0].replace('-', '/')\n \n swapDateOrder = fieldList[0].split('/')\n fieldList[0] = swapDateOrder[2] + '/' + swapDateOrder[1] + '/' + swapDateOrder[0]\n \n return (fieldList[0],(fieldList[1:]))", "def format_datetimes(self, datetimes, format=\"%B %d %Y %I:%M %p\"):\n date, times, space_character = datetimes.split(\", \")\n start_time, end_time = times.split(\" - \")\n year = datetime.now().strftime(\"%Y\")\n return (\n datetime.strptime(\n date + \" \" + year + \" \" + start_time.replace(\".\", \"\"), format\n ),\n datetime.strptime(\n date + \" \" + year + \" \" + end_time.replace(\".\", \"\"), format\n ),\n )", "def _process_date(self, data):\n def helper(val):\n # Sometime the date has a (1) or (2) following it. Strip that off\n # so that we can successful convert to date.\n s = val.find(\" (\")\n if s >= 0:\n val = val[0:s]\n dv = dt.datetime.strptime(val, '%A, %b %d')\n dv = dv.replace(year=self.start_date.year)\n return dv\n data['Date'] = data['Date'].apply(helper)\n return data", "def gen_date(date):\n date = date.split(',')\n month, day = [x.strip() for x in date[0].split(' ')]\n year = date[1].strip()\n day, year = map(int, [day, year])\n date = datetime.date(year, MONTHS[month.capitalize()], day)\n return date", "def __call__(self, x: Sequence[datetime]) -> Sequence[str]:\n if self.tz is not None:\n x = [d.astimezone(self.tz) for d in x]\n return [d.strftime(self.fmt) for d in x]", "def convert_str2date(date):\n import datetime\n date = str(date)\n year = int(date[0:4])\n month = int(date[4:6])\n day = int(date[6:8])\n return datetime.datetime(year,month,day)", "def date_to_python(self, value):\r\n # this throws away fractions of a second\r\n return datetime(*strptime(value[:-5], \"%Y-%m-%dT%H:%M:%S\")[0:6])", "def convert(date):\n converted_date = datetime.datetime.strptime(date, \n \"%Y-%m-%d\").date()\n return converted_date", "def convert_date_type(dates):\n try:\n return datetime.strptime(dates, '%Y-%m-%d')\n except ValueError:\n return datetime.strptime(dates, '%d/%m/%Y')", "def dateTimeConvert(date):\n dateTimeConvert = datetime.strftime(date ,'%d/%m/%Y %H:%M:%S')\n #Format -> d/m/Y H:M:S\n return dateTimeConvert", "def str2date(date):\n return datetime.datetime.strptime(date, \"%m/%d/%Y\").date()", "def dateToString(self, date_objs: list) -> list:\n date_strings = []\n try:\n if isinstance(date_objs, list) == False:\n return date_strings\n\n for date_obj in date_objs:\n if isinstance(date_obj, datetime) == False:\n continue\n date_strings.append(datetime.strftime(date_obj, '%d %b %Y'))\n\n return date_strings\n except Exception as e:\n logging.error(e)", "def transform_date(date):\n if type(date) == str:\n return date\n\n formatted_string = date.strftime(\"%d/%m/%Y\")\n\n return formatted_string", "def examples():\n lst = ['today +7d', '11:20 +4d', '2014-02-15 +1w', 'jan 10', \\\n 'jan 10 2pm', '10 jan at 10am', 'now-3d', '+7d', '7', '11:15',\\\n '11:15p', '11p', 'aug', 'jan 5', 'aug 2019', 'now', \"tomorrow 2pm\",\\\n \"now +4h\", 'today + 1w', '1w', '+3y', 'w', '1w']\n for tpl in zip(lst, [phrase_to_datetime(str_e) for str_e in lst]):\n print tpl", "def modis_to_from_pydatetime(date):\n \n if isinstance(date, (str, unicode)): \n return dt.datetime.strptime(date[1:], '%Y%j').date()\n return dt.datetime.strftime(date, 'A%Y%j')", "def reformat_subway_dates(date):\n date_formatted = datetime.datetime.strptime(date, '%m-%d-%y')\n date_formatted = date_formatted.strftime('%Y-%m-%d')\n return date_formatted", "def format_date(self, date_val):\n try:\n if type(date_val) is not datetime:\n d = date.fromisoformat(date_val[0:10])\n else:\n d = date_val\n return d.strftime('%Y-%m-%d')\n except Exception as e:\n self.error((str(e)))", "def transform_datetimes(data: Any) -> Any:\n\n if isinstance(data, (datetime, date)):\n return data.isoformat()\n\n if isinstance(data, (list, tuple)):\n tmp_data = [transform_datetimes(elem) for elem in data]\n\n return tuple(tmp_data) if isinstance(data, tuple) else tmp_data\n\n if isinstance(data, dict):\n for key, val in data.items():\n data[key] = transform_datetimes(val)\n\n return data", "def construct_date_list(start=None, num=1):\n if not start:\n start = time.strftime(\"%Y%m%d\", time.gmtime(time.time() - num * 60 * 60 * 24))\n\n elif len(start) != 8:\n raise Exception(\"Date is not in expected format!\")\n\n startdatetime = datetime.datetime.strptime(start, '%Y%m%d')\n\n datelist = [startdatetime + datetime.timedelta(days=i) for i in range(0, num)]\n return [date.strftime('%Y%m%d') for date in datelist]", "def preprocess_date(date_):\n if 'JAN' in date_:\n date_ = date_.replace('JAN', '01')\n elif 'FEB' in date_:\n date_ = date_.replace('FEB', '02')\n elif 'MAR' in date_:\n date_ = date_.replace('MAR', '03')\n elif 'APR' in date_:\n date_ = date_.replace('APR', '04')\n elif 'MAY' in date_:\n date_ = date_.replace('MAY', '05')\n elif 'JUN' in date_:\n date_ = date_.replace('JUN', '06')\n elif 'JUL' in date_:\n date_ = date_.replace('JUL', '07')\n elif 'AUG' in date_:\n date_ = date_.replace('AUG', '08')\n elif 'SEP' in date_:\n date_ = date_.replace('SEP', '09')\n elif 'OCT' in date_:\n date_ = date_.replace('OCT', '10')\n elif 'NON' in date_:\n date_ = date_.replace('NON', '11')\n elif 'DEC' in date_:\n date_ = date_.replace('DEC', '12')\n if date_[-2:] > '17':\n date_ = date_[:6] + '19' + date_[-2:]\n else:\n date_ = date_[:6] + '20' + date_[-2:]\n return datetime.strptime(date_, '%d-%m-%Y')", "def calculate_date(x, now):\n\t#now = datetime.datetime.now()\n\tn = int(extract_only_number(x))\n\tif n > 0:\n\t\treturn (now - datetime.timedelta(n)).strftime(\"%d-%m-%Y\")\n\treturn now.strftime(\"%d-%m-%Y\")", "def convert_date(date):\n\n if len(date) > 10: date = date[:date.rfind(\"-\")]\n return convf(date)", "def convert_date(date):\n date = get_nummeric_only(date) \n \n \n if len(date) == 8:\n\n year = int(date[:4]) \n month = int(date[4:6])\n day = int(date[6:8])\n \n date_time = dt.datetime(year,month,day)\n \n return date_time\n \n if len(date) == 12 or len(date) == 14:\n\n year = int(date[:4]) \n month = int(date[4:6])\n day = int(date[6:8])\n hour = int(date[8:10])\n minute = int(date[10:12])\n \n date_time = dt.datetime(year,month,day, hour, minute)\n \n return date_time\n else:\n return 0", "def deconstruct_datetime(self, date: datetime) -> List[int]:\n year, month, day, hour, _, _, _, _, _ = date.timetuple()\n return [year, month, day, hour]", "def get_formatted_date(self, date):\n\n formatted_date = date\n\n possible_datetime_formats = [\n \"%Y-%m-%dT%H:%M:%S%z\", # \"2021-10-19T16:46:02Z\"\n \"%a, %d %b %Y %H:%M:%S %z\", # \"Tue, 19 Oct 2021 21:00:13 +0300\"\n \"%a, %d %b %Y %H:%M:%S %Z\", # \"Tue, 19 Oct 2021 18:54:00 GMT\"\n \"%a, %d %b %Y %H:%M:%S\", # \"Tue, 19 Oct 2021 18:54:00\"\n ]\n\n for format in possible_datetime_formats:\n try:\n formatted_date = datetime.strptime(date, format).strftime(\"%Y%m%d\")\n except:\n pass\n return formatted_date", "def dates_as_strings(self, dates):\n\n return [i.strftime('%Y-%m-%d') for i in dates]", "def _date_to_datetime(value):\r\n assert isinstance(value, datetime.date)\r\n return datetime.datetime(value.year, value.month, value.day)", "def _get_date(self):\n for fax_in in self:\n DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n date_planned = False\n if fax_in.date:\n from_dt = datetime.datetime.strptime(str(fax_in.date[:19]), DATETIME_FORMAT)\n # from_dt = from_dt + datetime.timedelta(hours=5 , minutes=30)\n date_planned = from_dt.strftime('%Y-%m-%d')\n fax_in.fax_date = date_planned", "def serialize_date(value):\n return datetime.strptime(value, '%d %b %Y')", "def format_date(d):\n if type(d) == str:\n d = dateutil_parse(d)\n return d.isoformat()", "def rebuildDate(date):\n parts = date.split(\" \")\n parts[1] = parts[1][:-1]\n eDate = parts[2] + '-' + parts[0] + '-' + parts[1]\n return eDate", "def as_date(inp):\n \n out = datetime.datetime.strptime(str(inp), \"%Y%m\")\n out = out.replace(day = 28) + datetime.timedelta(days=4)\n \n return out - datetime.timedelta(days = out.day)", "def _get_date(self):\n for fax_out in self:\n DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n date_planned = False\n if fax_out.date:\n from_dt = datetime.datetime.strptime(str(fax_out.date[:19]), DATETIME_FORMAT)\n # from_dt = from_dt + datetime.timedelta(hours=5 , minutes=30)\n date_planned = from_dt.strftime('%Y-%m-%d')\n fax_out.fax_date = date_planned", "def any2datetime_date(d):\n return datetime.date(d.year, d.month, d.day)", "def date_parser(dates):\n final_date = []\n for date in dates:\n final_date = final_date + [date[0:10]]\n return final_date", "def convert_date(adate):\n\tprint \"date given: \" + adate\n\t# stuff\n\tprint \"epoch time for date: \"", "def parseDate(self, date):\n\n temp = dateparser.parse(date)\n temp_date = temp.strftime(\"%Y-%m-%d\")\n\n return temp_date", "def date_to_list(data_index):\n results = []\n for row in data:\n results.append(datetime.strptime(row[data_index], '%Y-%m-%d'))\n return results", "def convert_date(self, date=None):\n if date is not None:\n format_str = '%d/%m/%Y'\n converted_date = datetime.strptime(date, format_str)\n return converted_date.date()", "def format_date(date):\n try:\n start_date = datetime.strftime(date, '%m/%d/%Y')\n except (TypeError, ValueError) as e:\n start_date = date\n pass\n return start_date", "def str_2_date(str_date):\n str_format = \"%m/%d/%y\"\n return datetime.strptime(str_date, str_format)", "def fix_date(oldfmt):\n dval = oldfmt.split('/')[-1]\n datev = datetime.strptime(dval, \"%Y-%m-%d\")\n return datev.strftime(\"%B %-d, %Y\")", "def text2date(text):\n text = text.strip()\n text = text.replace('&nbsp;', '')\n time_tuple = time.strptime(text + '10', '%b %d, %Y')\n return datetime.date(*(time_tuple[0:3]))", "def format_dates(date, from_sep=\"/\", to_sep=\"-\"): # Tested [N]\n nums = date.split(from_sep)\n return to_sep.join(nums)", "def date_trans_y(x):\n \"\"\"2017.1.09->2017/ 1/09 \"\"\"\n date_list=x.split('.')\n if(int(date_list[1])<10):\n return date_list[0]+'/ '+str(int(date_list[1]))+'/'+date_list[2]\n else:\n return date_list[0]+'/'+date_list[1]+'/'+date_list[2]", "def _dateFmt(self, string):\n return time.strftime('%m/%d', time.strptime(string, '%B %d, %Y'))", "def get_dates(txt):\n txt = re.sub(r'[^\\w\\s]', '', txt)\n txt_token = txt.split()\n return get_dates_from_token_list(txt_token)", "def convert_to_date(dt, format):\n d_datetime = datetime.strptime(dt, format)\n d_date = date(int(d_datetime.strftime('%Y')), \n int(d_datetime.strftime('%m')), \n int(d_datetime.strftime('%d'))) # I know this is awful, I'm sorry\n return d_date", "def format_datetime(dt):\r\n return dateformat.format(make_naive(dt), 'r')", "def date_parser(dates): \n new_dates = []\n for i in range(len(dates)):\n a = dates[i][:10] \n new_dates.append(a)\n return new_dates", "def convert_date(dt):\n if not isinstance(dt, str):\n dt = str(dt)\n parts = re.split(r'\\.|\\_|\\-', dt)\n if len(parts) == 1:\n parts = str(dt).split('.')\n if len(parts[0]) == 4:\n return '.'.join(reversed(parts))\n return str(dt)", "def unify_date_format(date):\n if type(date) == str:\n try:\n date = dateutil.parser.parse(date) \n except:\n pass\n return date", "def bytes_to_dates(self, date_str):\r\n return mpldates.datestr2num(date_str.decode('utf-8'))", "def convertSODate(datenum):\n #Date numbers seem to start with 0 = 2001-01-01\n base_date = datetime.date(2001, 1, 1)\n #add key from the spot on object to this base date to get the date\n record_date = base_date + datetime.timedelta(days=int(datenum))\n record_date = record_date.isoformat()\n return record_date", "def dump_datetime(value):\n if value is None:\n return\n return [value.strftime(\"%Y-%m-%d\"), value.strftime(\"%H:%M:%S\")]", "def parseDate(date):\n formats = [\n \"D MMM YY, hh:mm a\", \n \"YYYY-MM-DDTHH:mm:ss+00:00\", \n \"ddd, D MMM YYYY HH:mm:ss +0530\", # NDTV\n \"ddd, D MMM YYYY HH:mm:ss +0100\", # skynews\n \"ddd, D MMM YYYY HH:mm:ss -0400\", # reuters\n \"D MMM, YYYY\", # espn cricket\n \"ddd, D MMM YYYY HH:mm:ss GMT\", # times of india\n \"ddd, D MMM YYYY HH:mm:ss +0200\", # lifrea\n \"ddd, D MMM YYYY HH:mm:ss +0000\", # linux, ubuntu\n \"ddd, D MMM YYYY HH:mm:ss -0700\", # iTunes\n ]\n\n for f in formats:\n try:\n parsed_date = tryDateFormat(date, f)\n return parsed_date.format(\"D MMM YY, hh:mm a\")\n except Exception as e:\n pass\n else:\n return \"Invalid date\"", "def date_prettyfier(self, date):\n units = 'days since 1900-01-01 00:00'\n date = date * 365.25\n date = cftime.num2date(date, units)\n pretty_date = str(date.day)+'/'+str(date.month)+'/'+str(date.year-1900) \n return pretty_date", "def numeric_date_recover(self):\n \n sp_time_zone, current_datetime = self.setup_datetime() \n converter2sptimezone = current_datetime.astimezone(sp_time_zone)\n \n return converter2sptimezone.strftime('%d-%m-%Y')", "def fix_dates(line, date_names, headers):\n date_idxs = [headers.index(date_name) for date_name in date_names]\n for date_idx in date_idxs:\n val = line[date_idx]\n if val:\n # Forget times if they appear\n val = val.split(' ')[0]\n\n # Sometimes, miraculously, the val is *not* in American format:\n try:\n datetime.datetime.strptime(val, '%Y-%m-%d')\n # In the correct format!\n line[date_idx] = val\n continue\n except ValueError:\n # In the American format\n pass\n\n try:\n val = datetime.datetime.strptime(val, '%m/%d/%Y')\n except ValueError:\n # No idea what format this is in. Warn and return None\n print(\"Unreadable date {}\".format(val))\n line[date_idx] = None\n continue\n\n # Sometimes people write dates like 4/1/15. Bump the years to the modern era\n if val.year < 50:\n val = datetime.datetime(val.year + 2000, val.month, val.day)\n elif val.year < 100:\n val = datetime.datetime(val.year + 1900, val.month, val.day)\n val = val.strftime('%Y-%m-%d')\n line[date_idx] = val", "def convert_date(date_str):\n return datetime.strptime(date_str, \"%d/%m/%Y\")", "def clean_date(raw_time):\n time_stamp = raw_time.split(\" \")\n time_stamp = str(time_stamp[1]+' '+time_stamp[2]+' '+time_stamp[3]+' '+time_stamp[5])\n clean_date_time = parser.parse(time_stamp)\n return clean_date_time", "def test_convert_date(self):\n self.assertEqual(convert_to_date('2015-11-03 13:21:02.071381',\n FORMAT_DATETIME), date(2015, 11, 3))\n self.assertEqual(convert_to_date('03.11.2015', FORMAT_CALENDAR), date(2015, 11, 3))", "def get_date_from_utterance(tokenized_utterance: List[Token],\n year: int = 1993) -> List[datetime]:\n\n dates = []\n\n utterance = ' '.join([token.text for token in tokenized_utterance])\n year_result = re.findall(r'199[0-4]', utterance)\n if year_result:\n year = int(year_result[0])\n trigrams = ngrams([token.text for token in tokenized_utterance], 3)\n for month, tens, digit in trigrams:\n # This will match something like ``september twenty first``.\n day = ' '.join([tens, digit])\n if month in MONTH_NUMBERS and day in DAY_NUMBERS:\n try:\n dates.append(datetime(year, MONTH_NUMBERS[month], DAY_NUMBERS[day]))\n except ValueError:\n print('invalid month day')\n\n bigrams = ngrams([token.text for token in tokenized_utterance], 2)\n for month, day in bigrams:\n if month in MONTH_NUMBERS and day in DAY_NUMBERS:\n # This will match something like ``september first``.\n try:\n dates.append(datetime(year, MONTH_NUMBERS[month], DAY_NUMBERS[day]))\n except ValueError:\n print('invalid month day')\n\n fivegrams = ngrams([token.text for token in tokenized_utterance], 5)\n for tens, digit, _, year_match, month in fivegrams:\n # This will match something like ``twenty first of 1993 july``.\n day = ' '.join([tens, digit])\n if month in MONTH_NUMBERS and day in DAY_NUMBERS and year_match.isdigit():\n try:\n dates.append(datetime(int(year_match), MONTH_NUMBERS[month], DAY_NUMBERS[day]))\n except ValueError:\n print('invalid month day')\n if month in MONTH_NUMBERS and digit in DAY_NUMBERS and year_match.isdigit():\n try:\n dates.append(datetime(int(year_match), MONTH_NUMBERS[month], DAY_NUMBERS[digit]))\n except ValueError:\n print('invalid month day')\n return dates", "def date_to_ddmmyyyy(dat=\"1981_01_24\", separator=\".\"):\n return f'{dat.split(\"_\")[2]}{separator}{(str(int(dat.split(\"_\")[1]))).zfill(2)}{separator}' \\\n f'{(str(int(dat.split(\"_\")[0]))).zfill(2)}'", "def pretty_date(self, date):\r\n return time.strftime(\"%a, %b %d, %Y\", time.strptime(date,\"%Y%m%d\"))", "def yt_datetime(yt_date_time):\n time_obj = time.strptime(yt_date_time, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n locale_date = time.strftime(\"%x\", time_obj)\n # strip first two digits of four digit year\n short_date = re.sub(r\"(\\d\\d\\D\\d\\d\\D)20(\\d\\d)$\", r\"\\1\\2\", locale_date)\n return time_obj, short_date", "def interpret_date( text ):\n try:\n as_arrow = arrow.get(text, \"MM/DD/YYYY\").replace(\n tzinfo=tz.tzlocal())\n except:\n flask.flash(\"Date '{}' didn't fit expected format 12/31/2001\")\n raise\n return as_arrow.isoformat()", "def convert_time(self, t_variable):\n date_list = []\n times = self.dataset[t_variable].values\n\n for time in times:\n try:\n time = pd.to_datetime(str(time))\n date_list.append(time.strftime('%Y-%m-%dT%H:%M:%SZ'))\n except ValueError as ve:\n print(\"Error parsing and converting '%s' variable object to CovJSON compliant string.\" % (t_variable), ve)\n\n return date_list", "def format_datetime(str_date):\n date_fromat = \"%Y-%m-%dT%H:%M:%S\"\n formated_datetime = None\n try:\n datetime_obj = datetime.fromtimestamp(float(str_date)/1000.)\n formated_datetime = datetime_obj.strftime(date_fromat)\n except Exception as exp:\n logger.error('Exception: {} occured while converting date {} into format {}'.format(\n exp,str_date, date_fromat))\n\n return formated_datetime", "def datetime_to_date(element):\r\n try:\r\n result = element.date()\r\n except AttributeError:\r\n result = element\r\n return result", "def _try_date(set_list, index, nested_dict, dict_keys=[], try_func=_try_set):\n import datetime\n try:\n dt = try_func(None, None, nested_dict, dict_keys) # 2012-07-05T00:00:00+04:00\n dt = datetime.datetime.strptime(dt, \"%Y-%m-%dT%H:%M:%S%z\")\n try_func(set_list, index, str(dt.date()))\n print(str(dt.date())+\" sdfsdfsdf\")\n return dt.date() # Дата присвоения кадастрового номера\n except:\n return ''", "def dt64_to_dt(dt64_list):\n\n from datetime import datetime\n ns = 1e-9\n dt_list = []\n for dt64 in dt64_list:\n dt_list.append(datetime.utcfromtimestamp(dt64.astype(int) * ns))\n\n return dt_list", "def date_parser(dates):\n # extract the date only from dates: Olwethu\n date_list = []\n for i in dates:\n i = i.split(' ')\n # append each date to a new list: Olwethu\n date_list.append(i[0])\n \n return date_list" ]
[ "0.73220545", "0.6644235", "0.64673054", "0.63785565", "0.6323779", "0.63159305", "0.6256937", "0.6174311", "0.5986844", "0.58866596", "0.5878423", "0.58775616", "0.58339506", "0.579193", "0.57800907", "0.57769805", "0.5757611", "0.57572365", "0.57409817", "0.5732213", "0.5722158", "0.5721827", "0.56974846", "0.5678983", "0.5675491", "0.5659683", "0.5645013", "0.56444377", "0.56371844", "0.5635159", "0.56315583", "0.5629016", "0.5616264", "0.5613257", "0.5612828", "0.5610813", "0.5604483", "0.5594918", "0.559276", "0.55886567", "0.55878246", "0.55791926", "0.55681765", "0.5558971", "0.5556539", "0.55489814", "0.55321366", "0.55134255", "0.55066586", "0.5479266", "0.54757655", "0.54693776", "0.54598063", "0.54452217", "0.544382", "0.5437103", "0.54230577", "0.54188156", "0.5415565", "0.54024065", "0.5401016", "0.5391929", "0.5386448", "0.5379607", "0.5375183", "0.5365078", "0.53602034", "0.53576064", "0.53491765", "0.5345659", "0.53360564", "0.5327258", "0.5324848", "0.53232974", "0.5314031", "0.53136456", "0.53120357", "0.5311964", "0.5307527", "0.5297301", "0.52962315", "0.5287995", "0.5286612", "0.52853173", "0.52823305", "0.5282284", "0.52821493", "0.52799404", "0.5278009", "0.5275524", "0.5273218", "0.52646196", "0.5253517", "0.5251564", "0.525042", "0.524789", "0.5240238", "0.52376276", "0.52244353", "0.52240235" ]
0.75249213
0
Mimic the & operator in R. This has to have Expression objects to be involved to work
def _op_and_(self, left: Any, right: Any) -> Any: if isinstance(left, list): # induce an intersect with Collection return Intersect(left, right) left, right = _recycle_left_right(left, right) left = Series(left).fillna(False) right = Series(right).fillna(False) return left & right
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AND(f, g):\n def _and(x):\n return f(x) & g(x)\n return _and", "def and_(a, b):", "def __and__(self, other):\n return self.fam.c_binop('and', self, other)", "def __and__(self, obj):\n return self._boolean_operation(obj, operator.__and__)", "def _and(cls, arg1, arg2):\n return arg1 and arg2", "def AND(r, s):\n return lambda l, i: r(l, i) and s(l, i)", "def __and__(self, other):\n return self.__class__(self.value + '&' + str(other))", "def __and__(self, other):\n return self._operation_and(other)", "def AND(self, value):\n self.reg.A = self.reg.A & value\n self.reg.Z = self.reg.A == 0\n self.reg.N = self.reg.A >> 7", "def __and__(self, other):\n return MyCustomNumber(self.value & other.value)", "def visit_and(self, left_result: T, right_result: T) -> T:", "def __and__(self, other: Any) -> Operators:\n return self.operate(and_, other)", "def bitwise_and(self):\n register = self.return_middle_registers(self.opcode)\n self.registers[register[0]] = (\n self.registers[register[0]] & self.registers[register[1]])\n logger.info(\"Bitwise AND on V{} and V{} for {}\".format(\n register[0],\n register[1],\n self.registers[register[0]]))", "def and_(*args, **kwargs):\n ...", "def logical_and(x1, x2, f=None):\n return _cur_framework(x1, f=f).logical_and(x1, x2)", "def test_and(\n self,\n left: Result[int, str],\n right: Result[int, str],\n exp: Result[int, str],\n ) -> None:\n assert left.and_(right) == exp", "def and_bexp(env, node):\n left_value = node.left.interpret(env)\n right_value = node.right.interpret(env)\n return 1 if left_value and right_value else 0", "def __and__(self, other):\n return self.and_(other)", "def bitwise_and(lhs, rhs):\n return _make.bitwise_and(lhs, rhs)", "def logical_and(lhs, rhs):\n return _make.logical_and(lhs, rhs)", "def f_and(*args):\n f = And(*args).factor()\n return f if f in B else f.factor()", "def __and__(self, other):\n a, b = Trits.match_length(self, other)\n return Trits([x & y for x, y in zip(a, b)])", "def __and__(self, other):\n\t\tif isinstance(other, int):\n\t\t\treturn self.value & other\n\t\telif type(self) is type(other):\n\t\t\treturn self.value & other.value", "def instruction_and(self, register, a, b):\n if Vm.is_register(a):\n a = self.get_register(a)\n\n if Vm.is_register(b):\n b = self.get_register(b)\n\n self.set_register(register, (a & b) % MAX_INT)", "def __and__(self, other):\n return self >> (lambda _: other)", "def _operation_and(self, other):\n self._check_items(other)\n return ReadingSet(self._set & self._get_other_set(other))", "def _andReg(address, mask):\n _setReg(address, _getReg(address)&mask)", "def __iand__(self, other: t.Any) -> te.Self:\n return self._op_inplace('__iand__', other)", "def convert_broadcast_logical_and(node, **kwargs):\n return create_basic_op_node('And', node, kwargs)", "def and_or_operator(cls, quad):\n\t\tleft_op = cls.get_address_value(quad.left_operand)\n\t\tright_op = cls.get_address_value(quad.right_operand)\n\t\t# TODO: The next set of lines will fail at a specific case\n\t\tif quad.operator == 10 :\n\t\t\tcls.set_address_value(quad.result, (left_op and right_op))\n\t\telif quad.operator == 11 :\n\t\t\tcls.set_address_value(quad.result, (left_op or right_op))", "def Nand(*args):\n return Not(And(*args))", "def _and(it):\n return 1 if it[0]==1 and it[1]==1 else 0", "def AND(*expressions):\n return {'$and': list(expressions)}", "def _daat_and(self):\n raise NotImplementedError", "def AND(self, operand2, *operands):\n\t\treturn AND((self, operand2) + operands)", "def and_filter(self):\n return self.__and", "def __and__(self, other):\n if other is None:\n return self.copy()\n elif isinstance(other, (Query, QueryCompound)):\n return self.and_(other)\n else:\n out = self.copy()\n out.addMath(Query.Math.And, other)\n return out", "def __and__(self, other):\n for k, v in other.items():\n if k in self._values:\n self._values[k] = str(SpecifierSet(self._values[k]) & v)\n else:\n self._values[k] = v\n return self", "def __mul__(self, obj):\n return self & obj", "def get_bprop_logical_and(self):\n\n def bprop(x, y, out, dout):\n return zeros_like(x), zeros_like(y)\n return bprop", "def __and__(self, other: t.Any) -> InspectableSet[_C]:\n return self._op_copy('__and__', other)", "def _(obj: And, visitor: BooleanExpressionVisitor[T]) -> T:\n left_result: T = visit(obj.left, visitor=visitor)\n right_result: T = visit(obj.right, visitor=visitor)\n return visitor.visit_and(left_result=left_result, right_result=right_result)", "def __iand__(self, y):\n if is_tensor(y) or isinstance(y, int):\n self.share &= y\n elif isinstance(y, BinarySharedTensor):\n self.share.set_(beaver.AND(self, y).share.data)\n else:\n raise TypeError(\"Cannot AND %s with %s.\" % (type(y), type(self)))\n return self", "def andExpr( ): #DOUBLE CHECK THIS\n\n\ttok = tokens.peek( )\n\tif debug: print(\"andExpr: \", tok)\n\tleft = relationalExpr( ) #does the left side of the grammar\n\ttok = tokens.peek( )\n\twhile tok == \"and\": #checks to see if there is the token \"and\" and will preform what is inside the curly bracket since it is a series \n\t\ttokens.next()\n\t\tright = relationalExpr( )\n\t\tleft = BinaryExpr(tok, left, right)#MIGHT HAVE TO CHANGE TO STRING \n\t\ttok = tokens.peek( )\n\treturn left", "def _and(self, _and):\n\n self.__and = _and", "def _and(self, _and):\n\n self.__and = _and", "def _and(self, _and):\n\n self.__and = _and", "def _and(self, _and):\n\n self.__and = _and", "def _and(self, _and):\n\n self.__and = _and", "def __and__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n raise excep.biogemeError(\n f'This is not a valid expression: {other}'\n )\n return And(self, other)", "def bitwise_and(self, other: Any) -> ColumnOperators:\n\n return self.operate(bitwise_and_op, other)", "def __and__(self, other):\n return np.logical_and(self.array, other.array)", "def test_predicate11(self):\n xpb = XPathBuilder()\n xp = xpb.a.b.c[(xpb.attr('d') == 'e') & xpb.foo[xpb.attr('z') == 'ab']]\n exp = '/a/b/c[@d = \"e\" and /foo[@z = \"ab\"]]'\n self.assertEqual(xp.tostring(), exp)", "def __and__(self, other):\n return self.__mul__(other)", "def to_not_and(formula: Formula) -> Formula:\r\n # Task 3.6a\r\n map_operators = {'->': Formula.parse('~(~~p&~q)'),\r\n '+': Formula.parse('~(~(p&~q)&~(~p&q))'),\r\n '<->': Formula.parse('~~(~(p&~q)&~(~p&q))'),\r\n '-&': Formula.parse('~(p&q)'),\r\n '-|': Formula.parse('~~(~p&~q)'),\r\n 'F': Formula.parse('(p&~p)'),\r\n 'T': Formula.parse('~(p&~p)'),\r\n '|': Formula.parse('~(~p&~q)')}\r\n return formula.substitute_operators(map_operators)", "def __iand__(self, other):\n self.truths = self.truths | other.truths\n return self", "def __and__(self, other):\n assert isinstance(other, Filter)\n new_query = \"({}) & ({})\".format(self.query, other.query)\n return Filter(query=new_query)", "def __and__(self, other):\r\n if self.field.characteristic == 2:\r\n return runtime.and_(self, other)\r\n\r\n return super().__and__(other)", "def _logical_and(*args):\n args_ = [_static_value(x) for x in args]\n if any(x is not None and not bool(x) for x in args_):\n return constant_op.constant(False)\n if all(x is not None and bool(x) for x in args_):\n return constant_op.constant(True)\n if len(args) == 2:\n return math_ops.logical_and(*args)\n return math_ops.reduce_all(args)", "def __and__(self, other):\n return BitBoard(self.num & other.num)", "def test_predicate7(self):\n xpb = XPathBuilder()\n xp = xpb.foo.bar[(xpb.attr('name') == 'foo') & (xpb.attr('x') == 'x')]\n exp = '/foo/bar[@name = \"foo\" and @x = \"x\"]'\n self.assertEqual(xp.tostring(), exp)", "def and__inplace(a,b):", "def on_true(self) -> global___Expression:", "def le(cls, __and=True, __key=None, **kwargs):\r\n return _queries(\"<=\", __key, __and, kwargs.items())", "def __invert__(self) -> BooleanExpression:", "def __or__(self, y):\n return self.__and__(y) ^ self ^ y", "def __and__(self, other):\n return self.intersection(other)", "def ge(cls, __and=True, __key=None, **kwargs):\r\n return _queries(\">=\", __key, __and, kwargs.items())", "def __and__(self, query):\r\n return And([self, query]).normalize()", "def g3(a, b): \n return not (a and b)", "def __mul__(self, other):\n return And(self, other)", "def __le__(self, *args):\n return _ida_hexrays.operand_locator_t___le__(self, *args)", "def __ge__(self, *args):\n return _ida_hexrays.operand_locator_t___ge__(self, *args)", "def __and__(self, other):\r\n return self * other", "def And(*conditions):\n def andPred(db):\n from functools import reduce\n return reduce(lambda result, c: c(result),\n conditions, db)\n\n return andPred", "def and_list(conditionList):\n return functools.reduce(numpy.logical_and, conditionList)", "def t_and(self, other):\n if self is TRUE and other is TRUE:\n return TRUE\n if self is FALSE or other is FALSE:\n return FALSE\n return UNKNOWN", "def test_andOperator(self):\n xp = XPathQuery(\"//bar[@attrib4='value4' and @attrib5='value5']\")\n self.assertEqual(xp.matches(self.e), True)\n self.assertEqual(xp.queryForNodes(self.e), [self.bar5])", "def equivalence_of(formula1: Formula, formula2: Formula) -> Formula:\r\n return Formula('&', Formula('->', formula1, formula2),\r\n Formula('->', formula2, formula1))", "def _conjunction_op(spec, *expressions):", "def _conjunction_op(spec, *expressions):", "def _disjunction_op(spec, *expressions):", "def equivalence_of(formula1, formula2):\n return Formula('&', Formula('->', formula1, formula2),\n Formula('->', formula2, formula1))", "def pl_true(exp, model={}):\n op, args = exp.op, exp.args\n if exp == TRUE:\n return True\n elif exp == FALSE:\n return False\n elif is_prop_symbol(op):\n return model.get(exp)\n elif op == '~':\n p = pl_true(args[0], model)\n if p is None: return None\n else: return not p\n elif op == '|':\n result = False\n for arg in args:\n p = pl_true(arg, model)\n if p is True: return True\n if p is None: result = None\n return result\n elif op == '&':\n result = True\n for arg in args:\n p = pl_true(arg, model)\n if p is False: return False\n if p is None: result = None\n return result\n p, q = args\n if op == '>>':\n return pl_true(~p | q, model)\n elif op == '<<':\n return pl_true(p | ~q, model)\n pt = pl_true(p, model)\n if pt is None: return None\n qt = pl_true(q, model)\n if qt is None: return None\n if op == '<=>':\n return pt == qt\n elif op == '^':\n return pt != qt\n else:\n raise ValueError, \"illegal operator in logic expression\" + str(exp)", "def test_evaluate_and_expression(self):\n value = self.evaluate_common(\"false and false\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Boolean, \"Expected Boolean\")\n self.assertTrue(value.value is False, \"Expected False\")\n try:\n value = self.evaluate_common(\"false and 0\")\n self.fail(\"Integer promotion to Boolean\")\n except odata.EvaluationError:\n pass\n value = self.evaluate_common(\"false and true\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"true and false\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"true and true\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\"true and null\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Boolean, \"Expected Boolean\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"false and null\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"false and false\")\n self.assertTrue(value.value is False, \"Expected False\")", "def __rand__(self, other):\n return self._operation_and(other)", "def test_bin_op_support():\n check_peval_expression(\"1 + 2\", {}, \"3\", fully_evaluated=True, expected_value=3)\n check_peval_expression(\"2 - 1\", {}, \"1\", fully_evaluated=True, expected_value=1)\n check_peval_expression(\"2 * 3\", {}, \"6\", fully_evaluated=True, expected_value=6)\n check_peval_expression(\"9 / 2\", {}, \"4.5\", fully_evaluated=True, expected_value=4.5)\n check_peval_expression(\"9 // 2\", {}, \"4\", fully_evaluated=True, expected_value=4)\n check_peval_expression(\"9 % 2\", {}, \"1\", fully_evaluated=True, expected_value=1)\n check_peval_expression(\"2 ** 4\", {}, \"16\", fully_evaluated=True, expected_value=16)\n check_peval_expression(\"3 << 2\", {}, \"12\", fully_evaluated=True, expected_value=12)\n check_peval_expression(\"64 >> 3\", {}, \"8\", fully_evaluated=True, expected_value=8)\n check_peval_expression(\"17 | 3\", {}, \"19\", fully_evaluated=True, expected_value=19)\n check_peval_expression(\"17 ^ 3\", {}, \"18\", fully_evaluated=True, expected_value=18)\n check_peval_expression(\"17 & 3\", {}, \"1\", fully_evaluated=True, expected_value=1)", "def test_multi_chains_flatten():\n age = User.age >= 3\n name = User.name == \"foo\"\n email = User.email != \"bar\"\n\n and_condition = bloop.condition.Condition()\n or_condition = bloop.condition.Condition()\n for c in [age, name, email]:\n and_condition &= c\n or_condition |= c\n assert and_condition == bloop.condition.And(age, name, email)\n assert or_condition == bloop.condition.Or(age, name, email)", "def ff_add(a, b):\n return a ^ b", "def to_not_and_or(formula: Formula) -> Formula:\r\n # Task 3.5\r\n\r\n map_operators = {'->': Formula.parse('(~p|q)'),\r\n '+': Formula.parse('((p&~q)|(~p&q))'),\r\n '<->': Formula.parse('~((p&~q)|(~p&q))'),\r\n '-&': Formula.parse('~(p&q)'),\r\n '-|': Formula.parse('~(p|q)'),\r\n 'F': Formula.parse('(p&~p)'),\r\n 'T': Formula.parse('~(p&~p)')}\r\n return formula.substitute_operators(map_operators)", "def __and__(self, other):\n return intersect(self, other)", "def and_(self, other):\n if not isinstance(other, (Query, QueryCompound)) or other.isNull():\n return self.copy()\n elif self.isNull():\n return other.copy()\n else:\n # grow this if the operators are the same\n if self.__op == QueryCompound.Op.And:\n queries = list(self.__queries) + [other]\n return QueryCompound(*queries, op=QueryCompound.Op.And)\n else:\n return QueryCompound(self, other, op=QueryCompound.Op.And)", "def test_pathop1(self):\n xpb = XPathBuilder()\n xp = xpb.foo.bar & xpb.bar.foo\n exp = '/foo/bar and /bar/foo'\n self.assertEqual(xp.tostring(), exp)", "def op(self) -> Literal[\"==\"] | Literal[\"<=\"] | Literal[\">=\"]:\n ...", "def test_pathop10(self):\n xpb = XPathBuilder()\n xp = (xpb.foo & xpb.bar | xpb.baz).parenthesize() & xpb.foobar\n exp = '(/foo and /bar or /baz) and /foobar'\n self.assertEqual(xp.tostring(), exp)", "def __le__(self, other):\n return _generate_relational_expression(_le, self, other)", "def less_than_or_equal(self) -> global___Expression:", "def __and__(self, other):\n union = proto.FilterExpression()\n domains = [self.filter, other.filter]\n union.filter_union.filter_expressions.extend(domains)\n self.filter = union\n return self", "def filter_and(filters):\n def filt(item):\n for f in filters:\n if not f(item):\n return False\n return True\n return filt", "def test_and_then(\n self,\n start: Result[int, int],\n first: t.Callable[[int], Result[int, int]],\n second: t.Callable[[int], Result[int, int]],\n exp: Result[int, int],\n ) -> None:\n assert start.and_then(first).and_then(second) == exp" ]
[ "0.6913351", "0.6844835", "0.6834847", "0.68041515", "0.6614185", "0.6585983", "0.65602845", "0.65299505", "0.6528684", "0.6510841", "0.6501651", "0.64942497", "0.6466398", "0.6432646", "0.639087", "0.6376711", "0.6334177", "0.6331774", "0.63316846", "0.6288898", "0.62308896", "0.62308514", "0.62090707", "0.62063646", "0.61766183", "0.61751795", "0.617076", "0.61665165", "0.6148593", "0.60970813", "0.6046134", "0.6040575", "0.6003985", "0.59954596", "0.599482", "0.5944566", "0.5912189", "0.5905758", "0.5879148", "0.58318156", "0.58232903", "0.5822931", "0.5821608", "0.579306", "0.57708895", "0.57708895", "0.57708895", "0.57708895", "0.57708895", "0.5745658", "0.57407916", "0.57351184", "0.5720683", "0.5714338", "0.5713976", "0.5676874", "0.5669976", "0.56577057", "0.5650796", "0.56462824", "0.5627038", "0.5626583", "0.5602917", "0.55926865", "0.5585748", "0.5575975", "0.55558485", "0.5552535", "0.553487", "0.5531191", "0.55120945", "0.5501878", "0.5493118", "0.54901034", "0.5482169", "0.54508764", "0.5446951", "0.54214096", "0.5411467", "0.53934443", "0.53934443", "0.53542286", "0.53526646", "0.5337076", "0.5336293", "0.5284144", "0.52558523", "0.5242038", "0.5241389", "0.523373", "0.5216926", "0.52088875", "0.5208302", "0.52061176", "0.5197937", "0.5195612", "0.51852405", "0.518513", "0.5184801", "0.51795256" ]
0.62697506
20
Mimic the & operator in R. This has to have Expression objects to be involved to work
def _op_or_(self, left: Any, right: Any) -> Any: if isinstance(left, list): return Collection(left, right) left, right = _recycle_left_right(left, right) left = Series(left).fillna(False) right = Series(right).fillna(False) return left | right
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AND(f, g):\n def _and(x):\n return f(x) & g(x)\n return _and", "def and_(a, b):", "def __and__(self, other):\n return self.fam.c_binop('and', self, other)", "def __and__(self, obj):\n return self._boolean_operation(obj, operator.__and__)", "def _and(cls, arg1, arg2):\n return arg1 and arg2", "def AND(r, s):\n return lambda l, i: r(l, i) and s(l, i)", "def __and__(self, other):\n return self.__class__(self.value + '&' + str(other))", "def __and__(self, other):\n return self._operation_and(other)", "def AND(self, value):\n self.reg.A = self.reg.A & value\n self.reg.Z = self.reg.A == 0\n self.reg.N = self.reg.A >> 7", "def __and__(self, other):\n return MyCustomNumber(self.value & other.value)", "def visit_and(self, left_result: T, right_result: T) -> T:", "def __and__(self, other: Any) -> Operators:\n return self.operate(and_, other)", "def bitwise_and(self):\n register = self.return_middle_registers(self.opcode)\n self.registers[register[0]] = (\n self.registers[register[0]] & self.registers[register[1]])\n logger.info(\"Bitwise AND on V{} and V{} for {}\".format(\n register[0],\n register[1],\n self.registers[register[0]]))", "def and_(*args, **kwargs):\n ...", "def logical_and(x1, x2, f=None):\n return _cur_framework(x1, f=f).logical_and(x1, x2)", "def test_and(\n self,\n left: Result[int, str],\n right: Result[int, str],\n exp: Result[int, str],\n ) -> None:\n assert left.and_(right) == exp", "def and_bexp(env, node):\n left_value = node.left.interpret(env)\n right_value = node.right.interpret(env)\n return 1 if left_value and right_value else 0", "def __and__(self, other):\n return self.and_(other)", "def bitwise_and(lhs, rhs):\n return _make.bitwise_and(lhs, rhs)", "def logical_and(lhs, rhs):\n return _make.logical_and(lhs, rhs)", "def _op_and_(self, left: Any, right: Any) -> Any:\n if isinstance(left, list):\n # induce an intersect with Collection\n return Intersect(left, right)\n\n left, right = _recycle_left_right(left, right)\n left = Series(left).fillna(False)\n right = Series(right).fillna(False)\n return left & right", "def f_and(*args):\n f = And(*args).factor()\n return f if f in B else f.factor()", "def __and__(self, other):\n a, b = Trits.match_length(self, other)\n return Trits([x & y for x, y in zip(a, b)])", "def __and__(self, other):\n\t\tif isinstance(other, int):\n\t\t\treturn self.value & other\n\t\telif type(self) is type(other):\n\t\t\treturn self.value & other.value", "def instruction_and(self, register, a, b):\n if Vm.is_register(a):\n a = self.get_register(a)\n\n if Vm.is_register(b):\n b = self.get_register(b)\n\n self.set_register(register, (a & b) % MAX_INT)", "def __and__(self, other):\n return self >> (lambda _: other)", "def _operation_and(self, other):\n self._check_items(other)\n return ReadingSet(self._set & self._get_other_set(other))", "def _andReg(address, mask):\n _setReg(address, _getReg(address)&mask)", "def __iand__(self, other: t.Any) -> te.Self:\n return self._op_inplace('__iand__', other)", "def convert_broadcast_logical_and(node, **kwargs):\n return create_basic_op_node('And', node, kwargs)", "def and_or_operator(cls, quad):\n\t\tleft_op = cls.get_address_value(quad.left_operand)\n\t\tright_op = cls.get_address_value(quad.right_operand)\n\t\t# TODO: The next set of lines will fail at a specific case\n\t\tif quad.operator == 10 :\n\t\t\tcls.set_address_value(quad.result, (left_op and right_op))\n\t\telif quad.operator == 11 :\n\t\t\tcls.set_address_value(quad.result, (left_op or right_op))", "def Nand(*args):\n return Not(And(*args))", "def _and(it):\n return 1 if it[0]==1 and it[1]==1 else 0", "def AND(*expressions):\n return {'$and': list(expressions)}", "def _daat_and(self):\n raise NotImplementedError", "def AND(self, operand2, *operands):\n\t\treturn AND((self, operand2) + operands)", "def and_filter(self):\n return self.__and", "def __and__(self, other):\n if other is None:\n return self.copy()\n elif isinstance(other, (Query, QueryCompound)):\n return self.and_(other)\n else:\n out = self.copy()\n out.addMath(Query.Math.And, other)\n return out", "def __and__(self, other):\n for k, v in other.items():\n if k in self._values:\n self._values[k] = str(SpecifierSet(self._values[k]) & v)\n else:\n self._values[k] = v\n return self", "def __mul__(self, obj):\n return self & obj", "def get_bprop_logical_and(self):\n\n def bprop(x, y, out, dout):\n return zeros_like(x), zeros_like(y)\n return bprop", "def __and__(self, other: t.Any) -> InspectableSet[_C]:\n return self._op_copy('__and__', other)", "def _(obj: And, visitor: BooleanExpressionVisitor[T]) -> T:\n left_result: T = visit(obj.left, visitor=visitor)\n right_result: T = visit(obj.right, visitor=visitor)\n return visitor.visit_and(left_result=left_result, right_result=right_result)", "def __iand__(self, y):\n if is_tensor(y) or isinstance(y, int):\n self.share &= y\n elif isinstance(y, BinarySharedTensor):\n self.share.set_(beaver.AND(self, y).share.data)\n else:\n raise TypeError(\"Cannot AND %s with %s.\" % (type(y), type(self)))\n return self", "def andExpr( ): #DOUBLE CHECK THIS\n\n\ttok = tokens.peek( )\n\tif debug: print(\"andExpr: \", tok)\n\tleft = relationalExpr( ) #does the left side of the grammar\n\ttok = tokens.peek( )\n\twhile tok == \"and\": #checks to see if there is the token \"and\" and will preform what is inside the curly bracket since it is a series \n\t\ttokens.next()\n\t\tright = relationalExpr( )\n\t\tleft = BinaryExpr(tok, left, right)#MIGHT HAVE TO CHANGE TO STRING \n\t\ttok = tokens.peek( )\n\treturn left", "def _and(self, _and):\n\n self.__and = _and", "def _and(self, _and):\n\n self.__and = _and", "def _and(self, _and):\n\n self.__and = _and", "def _and(self, _and):\n\n self.__and = _and", "def _and(self, _and):\n\n self.__and = _and", "def __and__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n raise excep.biogemeError(\n f'This is not a valid expression: {other}'\n )\n return And(self, other)", "def bitwise_and(self, other: Any) -> ColumnOperators:\n\n return self.operate(bitwise_and_op, other)", "def __and__(self, other):\n return np.logical_and(self.array, other.array)", "def test_predicate11(self):\n xpb = XPathBuilder()\n xp = xpb.a.b.c[(xpb.attr('d') == 'e') & xpb.foo[xpb.attr('z') == 'ab']]\n exp = '/a/b/c[@d = \"e\" and /foo[@z = \"ab\"]]'\n self.assertEqual(xp.tostring(), exp)", "def __and__(self, other):\n return self.__mul__(other)", "def to_not_and(formula: Formula) -> Formula:\r\n # Task 3.6a\r\n map_operators = {'->': Formula.parse('~(~~p&~q)'),\r\n '+': Formula.parse('~(~(p&~q)&~(~p&q))'),\r\n '<->': Formula.parse('~~(~(p&~q)&~(~p&q))'),\r\n '-&': Formula.parse('~(p&q)'),\r\n '-|': Formula.parse('~~(~p&~q)'),\r\n 'F': Formula.parse('(p&~p)'),\r\n 'T': Formula.parse('~(p&~p)'),\r\n '|': Formula.parse('~(~p&~q)')}\r\n return formula.substitute_operators(map_operators)", "def __iand__(self, other):\n self.truths = self.truths | other.truths\n return self", "def __and__(self, other):\n assert isinstance(other, Filter)\n new_query = \"({}) & ({})\".format(self.query, other.query)\n return Filter(query=new_query)", "def __and__(self, other):\r\n if self.field.characteristic == 2:\r\n return runtime.and_(self, other)\r\n\r\n return super().__and__(other)", "def _logical_and(*args):\n args_ = [_static_value(x) for x in args]\n if any(x is not None and not bool(x) for x in args_):\n return constant_op.constant(False)\n if all(x is not None and bool(x) for x in args_):\n return constant_op.constant(True)\n if len(args) == 2:\n return math_ops.logical_and(*args)\n return math_ops.reduce_all(args)", "def __and__(self, other):\n return BitBoard(self.num & other.num)", "def test_predicate7(self):\n xpb = XPathBuilder()\n xp = xpb.foo.bar[(xpb.attr('name') == 'foo') & (xpb.attr('x') == 'x')]\n exp = '/foo/bar[@name = \"foo\" and @x = \"x\"]'\n self.assertEqual(xp.tostring(), exp)", "def and__inplace(a,b):", "def on_true(self) -> global___Expression:", "def le(cls, __and=True, __key=None, **kwargs):\r\n return _queries(\"<=\", __key, __and, kwargs.items())", "def __invert__(self) -> BooleanExpression:", "def __or__(self, y):\n return self.__and__(y) ^ self ^ y", "def __and__(self, other):\n return self.intersection(other)", "def ge(cls, __and=True, __key=None, **kwargs):\r\n return _queries(\">=\", __key, __and, kwargs.items())", "def __and__(self, query):\r\n return And([self, query]).normalize()", "def g3(a, b): \n return not (a and b)", "def __mul__(self, other):\n return And(self, other)", "def __le__(self, *args):\n return _ida_hexrays.operand_locator_t___le__(self, *args)", "def __ge__(self, *args):\n return _ida_hexrays.operand_locator_t___ge__(self, *args)", "def __and__(self, other):\r\n return self * other", "def And(*conditions):\n def andPred(db):\n from functools import reduce\n return reduce(lambda result, c: c(result),\n conditions, db)\n\n return andPred", "def and_list(conditionList):\n return functools.reduce(numpy.logical_and, conditionList)", "def t_and(self, other):\n if self is TRUE and other is TRUE:\n return TRUE\n if self is FALSE or other is FALSE:\n return FALSE\n return UNKNOWN", "def test_andOperator(self):\n xp = XPathQuery(\"//bar[@attrib4='value4' and @attrib5='value5']\")\n self.assertEqual(xp.matches(self.e), True)\n self.assertEqual(xp.queryForNodes(self.e), [self.bar5])", "def equivalence_of(formula1: Formula, formula2: Formula) -> Formula:\r\n return Formula('&', Formula('->', formula1, formula2),\r\n Formula('->', formula2, formula1))", "def _conjunction_op(spec, *expressions):", "def _conjunction_op(spec, *expressions):", "def _disjunction_op(spec, *expressions):", "def equivalence_of(formula1, formula2):\n return Formula('&', Formula('->', formula1, formula2),\n Formula('->', formula2, formula1))", "def pl_true(exp, model={}):\n op, args = exp.op, exp.args\n if exp == TRUE:\n return True\n elif exp == FALSE:\n return False\n elif is_prop_symbol(op):\n return model.get(exp)\n elif op == '~':\n p = pl_true(args[0], model)\n if p is None: return None\n else: return not p\n elif op == '|':\n result = False\n for arg in args:\n p = pl_true(arg, model)\n if p is True: return True\n if p is None: result = None\n return result\n elif op == '&':\n result = True\n for arg in args:\n p = pl_true(arg, model)\n if p is False: return False\n if p is None: result = None\n return result\n p, q = args\n if op == '>>':\n return pl_true(~p | q, model)\n elif op == '<<':\n return pl_true(p | ~q, model)\n pt = pl_true(p, model)\n if pt is None: return None\n qt = pl_true(q, model)\n if qt is None: return None\n if op == '<=>':\n return pt == qt\n elif op == '^':\n return pt != qt\n else:\n raise ValueError, \"illegal operator in logic expression\" + str(exp)", "def test_evaluate_and_expression(self):\n value = self.evaluate_common(\"false and false\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Boolean, \"Expected Boolean\")\n self.assertTrue(value.value is False, \"Expected False\")\n try:\n value = self.evaluate_common(\"false and 0\")\n self.fail(\"Integer promotion to Boolean\")\n except odata.EvaluationError:\n pass\n value = self.evaluate_common(\"false and true\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"true and false\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"true and true\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\"true and null\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Boolean, \"Expected Boolean\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"false and null\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"false and false\")\n self.assertTrue(value.value is False, \"Expected False\")", "def __rand__(self, other):\n return self._operation_and(other)", "def test_bin_op_support():\n check_peval_expression(\"1 + 2\", {}, \"3\", fully_evaluated=True, expected_value=3)\n check_peval_expression(\"2 - 1\", {}, \"1\", fully_evaluated=True, expected_value=1)\n check_peval_expression(\"2 * 3\", {}, \"6\", fully_evaluated=True, expected_value=6)\n check_peval_expression(\"9 / 2\", {}, \"4.5\", fully_evaluated=True, expected_value=4.5)\n check_peval_expression(\"9 // 2\", {}, \"4\", fully_evaluated=True, expected_value=4)\n check_peval_expression(\"9 % 2\", {}, \"1\", fully_evaluated=True, expected_value=1)\n check_peval_expression(\"2 ** 4\", {}, \"16\", fully_evaluated=True, expected_value=16)\n check_peval_expression(\"3 << 2\", {}, \"12\", fully_evaluated=True, expected_value=12)\n check_peval_expression(\"64 >> 3\", {}, \"8\", fully_evaluated=True, expected_value=8)\n check_peval_expression(\"17 | 3\", {}, \"19\", fully_evaluated=True, expected_value=19)\n check_peval_expression(\"17 ^ 3\", {}, \"18\", fully_evaluated=True, expected_value=18)\n check_peval_expression(\"17 & 3\", {}, \"1\", fully_evaluated=True, expected_value=1)", "def test_multi_chains_flatten():\n age = User.age >= 3\n name = User.name == \"foo\"\n email = User.email != \"bar\"\n\n and_condition = bloop.condition.Condition()\n or_condition = bloop.condition.Condition()\n for c in [age, name, email]:\n and_condition &= c\n or_condition |= c\n assert and_condition == bloop.condition.And(age, name, email)\n assert or_condition == bloop.condition.Or(age, name, email)", "def ff_add(a, b):\n return a ^ b", "def to_not_and_or(formula: Formula) -> Formula:\r\n # Task 3.5\r\n\r\n map_operators = {'->': Formula.parse('(~p|q)'),\r\n '+': Formula.parse('((p&~q)|(~p&q))'),\r\n '<->': Formula.parse('~((p&~q)|(~p&q))'),\r\n '-&': Formula.parse('~(p&q)'),\r\n '-|': Formula.parse('~(p|q)'),\r\n 'F': Formula.parse('(p&~p)'),\r\n 'T': Formula.parse('~(p&~p)')}\r\n return formula.substitute_operators(map_operators)", "def __and__(self, other):\n return intersect(self, other)", "def and_(self, other):\n if not isinstance(other, (Query, QueryCompound)) or other.isNull():\n return self.copy()\n elif self.isNull():\n return other.copy()\n else:\n # grow this if the operators are the same\n if self.__op == QueryCompound.Op.And:\n queries = list(self.__queries) + [other]\n return QueryCompound(*queries, op=QueryCompound.Op.And)\n else:\n return QueryCompound(self, other, op=QueryCompound.Op.And)", "def test_pathop1(self):\n xpb = XPathBuilder()\n xp = xpb.foo.bar & xpb.bar.foo\n exp = '/foo/bar and /bar/foo'\n self.assertEqual(xp.tostring(), exp)", "def op(self) -> Literal[\"==\"] | Literal[\"<=\"] | Literal[\">=\"]:\n ...", "def test_pathop10(self):\n xpb = XPathBuilder()\n xp = (xpb.foo & xpb.bar | xpb.baz).parenthesize() & xpb.foobar\n exp = '(/foo and /bar or /baz) and /foobar'\n self.assertEqual(xp.tostring(), exp)", "def __le__(self, other):\n return _generate_relational_expression(_le, self, other)", "def less_than_or_equal(self) -> global___Expression:", "def __and__(self, other):\n union = proto.FilterExpression()\n domains = [self.filter, other.filter]\n union.filter_union.filter_expressions.extend(domains)\n self.filter = union\n return self", "def filter_and(filters):\n def filt(item):\n for f in filters:\n if not f(item):\n return False\n return True\n return filt", "def test_and_then(\n self,\n start: Result[int, int],\n first: t.Callable[[int], Result[int, int]],\n second: t.Callable[[int], Result[int, int]],\n exp: Result[int, int],\n ) -> None:\n assert start.and_then(first).and_then(second) == exp" ]
[ "0.6913351", "0.6844835", "0.6834847", "0.68041515", "0.6614185", "0.6585983", "0.65602845", "0.65299505", "0.6528684", "0.6510841", "0.6501651", "0.64942497", "0.6466398", "0.6432646", "0.639087", "0.6376711", "0.6334177", "0.6331774", "0.63316846", "0.6288898", "0.62697506", "0.62308896", "0.62308514", "0.62090707", "0.62063646", "0.61766183", "0.61751795", "0.617076", "0.61665165", "0.6148593", "0.60970813", "0.6046134", "0.6040575", "0.6003985", "0.59954596", "0.599482", "0.5944566", "0.5912189", "0.5905758", "0.5879148", "0.58318156", "0.58232903", "0.5822931", "0.5821608", "0.579306", "0.57708895", "0.57708895", "0.57708895", "0.57708895", "0.57708895", "0.5745658", "0.57407916", "0.57351184", "0.5720683", "0.5714338", "0.5713976", "0.5676874", "0.5669976", "0.56577057", "0.5650796", "0.56462824", "0.5627038", "0.5626583", "0.5602917", "0.55926865", "0.5585748", "0.5575975", "0.55558485", "0.5552535", "0.553487", "0.5531191", "0.55120945", "0.5501878", "0.5493118", "0.54901034", "0.5482169", "0.54508764", "0.5446951", "0.54214096", "0.5411467", "0.53934443", "0.53934443", "0.53542286", "0.53526646", "0.5337076", "0.5336293", "0.5284144", "0.52558523", "0.5242038", "0.5241389", "0.523373", "0.5216926", "0.52088875", "0.5208302", "0.52061176", "0.5197937", "0.5195612", "0.51852405", "0.518513", "0.5184801", "0.51795256" ]
0.0
-1
type + sequence_number + key_size + key + value_size + value 1bit 63bit 32bit varlength 32bit varlength
def __init__(self, key, sequence_number, type=KeyType.PUT, value=None): assert key is not None assert sequence_number >= 0 self.type = type self.sequence_number = sequence_number self.key = key self.value = value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_meta_chunk(key, value):\n bkey = key.encode(\"utf-8\")\n bvalue = value.encode(\"utf-8\")\n return (wozardry.to_uint32(len(bkey) + len(bvalue) + 2) + bkey + b'\\x09' + bvalue + b'\\x0A').hex()", "def _pack_dict( self, table, pad = False ) :\r\n\r\n keys, values = zip( *table.items() )\r\n \r\n ## #debug \r\n ## print \"_pack_dict(): keys; values\", keys, values\r\n\r\n # we hope not to be called with an empty dict(), but ... \r\n if len( keys ) <= 0 :\r\n return struct.pack('0s', '') \r\n\r\n #\r\n # preprocess the keys ... \r\n #\r\n\r\n # 4-byte condition check\r\n if not pad : \r\n # \"try\" ... \r\n map( Eggog.check_type, keys ) \r\n map( Eggog.check_len, keys )\r\n\r\n else : # else convert to string and truncate or pad\r\n \r\n for i in xrange(len(keys)) :\r\n\r\n k = keys[i] \r\n if type(k) != type( '' ) :\r\n \r\n k = make_fit( str(k) )\r\n keys[i] = k\r\n \r\n \r\n \r\n # check uniqueness of the keys\r\n \r\n\r\n # \r\n # pack the values \r\n #\r\n\r\n nkeys = len(keys) \r\n \r\n if nkeys > 255 :\r\n raise Eggog( \"too many keys to send (%d > 255)\" % (nkeys, ) ) \r\n \r\n\r\n nkeys_str = struct.pack( '=B', nkeys ) \r\n\r\n values_packed = map( self._pack_data, values )\r\n\r\n items_packed = [nkeys_str, ] * ( 2 * nkeys + 1 )\r\n items_packed[1::2] = keys[:]\r\n items_packed[2::2] = values_packed\r\n\r\n result = _cat( *items_packed ) \r\n\r\n ## #debug \r\n ## print \"_pack_dict(): nkeys, keys; values_packed\", nkeys, keys, repr(values_packed)\r\n\r\n ## print \"_pack_dict(): result:\", repr(result ) \r\n\r\n return result", "def encode(key, value, ber_length=0):\n return bytearray(key) + encode_ber(len(value), ber_length) + bytearray(value)", "def key_type(self) -> global___Type:", "def decode(k, key_length):\n key = k[:key_length]\n val_length, ber_length = decode_ber(k[key_length:])\n value = k[key_length + ber_length : key_length + ber_length + val_length]\n return key, value", "def __init__(self, batch_size, vocab_size, memory_size,\n\t\t\t\t query_size, key_size, value_size, embedding_size,\n\t\t\t\t feature_size=30,\n\t\t\t\t hops=3,\n\t\t\t\t l2_lambda=0.2,\n\t\t\t\t name='KeyValueMemN2N'):\n\t\tself._key_size = key_size\n\t\tself._value_size = value_size\n\t\tself._batch_size = batch_size\n\t\tself._vocab_size = vocab_size\n\t\tself._memory_key_size = memory_size\n\t\tself._memory_value_size = memory_size\n\t\tself._query_size = query_size\n\t\t#self._wiki_sentence_size = doc_size\n\t\tself._embedding_size = embedding_size\n\t\tself._hops = hops\n\t\tself._l2_lambda = l2_lambda\n\t\tself._name = name\n\t\tself._key_encoding = tf.constant(position_encoding(self._key_size, self._embedding_size), name=\"encoding\")\n\t\tself._value_encoding = tf.constant(position_encoding(self._value_size, self._embedding_size), name=\"encoding\")\n\t\tself._query_encoding = tf.constant(position_encoding(self._query_size, self._embedding_size), name=\"encoding\")\n\t\tself._build_inputs()\n\n\t\t\n\t\td = feature_size\n\t\tself._feature_size = feature_size\n\t\tself._build_graph()", "def bit_length(self, ???):", "def hashId(key, size):\n return sum([ord(c) for c in key]) % size", "def keysequence(value):\r\n return value.toString()", "def key_size(self) -> int:\n pass", "def key_size(self) -> int:\n pass", "def genkey(value, length = 8):\n if not isinstance(value, str):\n raise ValueError('Expected `value` to be `str`.')\n\n return blake2b(value.encode('utf-8'), digest_size=4).hexdigest()", "def JAVA_NATIVE(key):\n h = 0\n l = len(key)\n for (idx,c) in enumerate(key):\n h += ord(c)*31**(l-(idx+1))\n return _signed_int32(h)", "def encode(x):\n i = int(16384 * x)\n return Struct(\"h\").pack(i)", "def estimate_map_output_bytes(num_words, key_num_bytes, value_num_bytes):\n return num_words * (key_num_bytes + value_num_bytes)", "def key_to_struct(key: RsaKey) -> bytes:\n mod = int_to_bytes(key.n)\n exponent = int_to_bytes(key.e)\n\n return b\"\\x00\\x00\\x00\\x80\" + mod + b\"\\x00\\x00\\x00\\x03\" + exponent", "def __init__(self, key: bytes):\n\n if len(key) != 32:\n raise ValueError('Key must be 32 bytes long')\n self.key = key", "def unmarshal_compactsize(b):\n key = b[0]\n if key == 0xff:\n return b[0:9], unmarshal_uint(b[1:9])\n if key == 0xfe:\n return b[0:5], unmarshal_uint(b[1:5])\n if key == 0xfd:\n return b[0:3], unmarshal_uint(b[1:3])\n return b[0:1], unmarshal_uint(b[0:1])", "def _dict_new_sized(typingctx, n_keys, keyty, valty):\n resty = types.voidptr\n sig = resty(n_keys, keyty, valty)\n\n def codegen(context, builder, sig, args):\n n_keys = builder.bitcast(args[0], ll_ssize_t)\n\n # Determine sizeof key and value types\n ll_key = context.get_data_type(keyty.instance_type)\n ll_val = context.get_data_type(valty.instance_type)\n sz_key = context.get_abi_sizeof(ll_key)\n sz_val = context.get_abi_sizeof(ll_val)\n\n refdp = cgutils.alloca_once(builder, ll_dict_type, zfill=True)\n\n argtys = [ll_dict_type.as_pointer(), ll_ssize_t, ll_ssize_t, ll_ssize_t]\n fnty = ir.FunctionType(ll_status, argtys)\n fn = ir.Function(builder.module, fnty, 'numba_dict_new_sized')\n\n args = [refdp, n_keys, ll_ssize_t(sz_key), ll_ssize_t(sz_val)]\n status = builder.call(fn, args)\n\n allocated_failed_msg = \"Failed to allocate dictionary\"\n _raise_if_error(context, builder, status, msg=allocated_failed_msg)\n\n dp = builder.load(refdp)\n return dp\n\n return sig, codegen", "def encode(x):\n i = int(16384 * x)\n return Struct('h').pack(i)", "def calc_keyid(flags, protocol, algorithm, st):\n # Remove spaces and create the wire format\n st0=st.replace(' ', '')\n st2=struct.pack('!HBB', int(flags), int(protocol), int(algorithm))\n st2+=base64.b64decode(st0)\n \n # Calculate the tag\n cnt=0\n for idx in xrange(len(st2)):\n s=struct.unpack('B', st2[idx])[0]\n if (idx % 2) == 0:\n cnt+=s<<8\n else:\n cnt+=s\n \n ret=((cnt & 0xFFFF) + (cnt>>16)) & 0xFFFF\n \n return(ret)", "def xxh128(data):\n storage_key1 = bytearray(xxhash.xxh64(data, seed=0).digest())\n storage_key1.reverse()\n\n storage_key2 = bytearray(xxhash.xxh64(data, seed=1).digest())\n storage_key2.reverse()\n\n return storage_key1 + storage_key2", "def loads_value(type_key, binary_data, version, vectors):\n # pylint: disable=too-many-return-statements\n\n if isinstance(type_key, bytes):\n type_key = type_keys.Value(type_key)\n\n if type_key == type_keys.Value.INTEGER:\n return struct.unpack(\"!q\", binary_data)[0]\n if type_key == type_keys.Value.FLOAT:\n return struct.unpack(\"!d\", binary_data)[0]\n if type_key == type_keys.Value.COMPLEX:\n return complex(*struct.unpack(formats.COMPLEX_PACK, binary_data))\n if type_key == type_keys.Value.NUMPY_OBJ:\n return common.data_from_binary(binary_data, np.load)\n if type_key == type_keys.Value.STRING:\n return binary_data.decode(common.ENCODE)\n if type_key == type_keys.Value.NULL:\n return None\n if type_key == type_keys.Value.CASE_DEFAULT:\n return CASE_DEFAULT\n if type_key == type_keys.Value.PARAMETER_VECTOR:\n return common.data_from_binary(binary_data, _read_parameter_vec, vectors=vectors)\n if type_key == type_keys.Value.PARAMETER:\n return common.data_from_binary(binary_data, _read_parameter)\n if type_key == type_keys.Value.PARAMETER_EXPRESSION:\n if version < 3:\n return common.data_from_binary(binary_data, _read_parameter_expression)\n else:\n return common.data_from_binary(\n binary_data, _read_parameter_expression_v3, vectors=vectors\n )\n\n raise exceptions.QpyError(f\"Serialization for {type_key} is not implemented in value I/O.\")", "def hashing_info(string):#KEY HASHING FUNCTION\n nodeInfo = string.encode('utf-8')\n\n #md5 -> 2^7 = 128 bits\n hash_object = hashlib.md5()\n hash_object.update(nodeInfo)\n\n tmp = hash_object.hexdigest()\n tmp = int(tmp,16)\n\n result = tmp >> (128-16)\n return result", "def h_python(key, N):\n return hash(key) % N", "def __init__(self, key: bytearray):\n self.__key = key\n self.__KSA(bytearray([i for i in range(256)]))", "def __init__(self, key):\n self.key = key\n self.BLOCK_SIZE = 16", "def test_long():\n key = 'A' * 242\n hashed_key = '%s[3705915182]' % ('A' * 229)\n full_key = 'prefix:1:%s' % hashed_key\n assert full_key == make_key(key, 'prefix', 1)", "def ohe_sequence(sequence, batch_size, seq_size, dict_size):\n bow_vec = np.zeros((batch_size, seq_size, dict_size), dtype=np.float32)\n\n for flow_num in range(batch_size):\n for action_num in range(seq_size):\n bow_vec[flow_num, action_num, sequence[flow_num][action_num]] = 1\n\n return bow_vec", "def key(nullable=True):\n return sa.Column(\n \"key\",\n sa.Text().with_variant(mysql.VARCHAR(255), \"mysql\"),\n nullable=nullable,\n )", "def RSA_KEYPAIR_SIZE() :\n return 512", "def testIntegerKeys(self):\n hd = HeapDict(size=1)\n hd.push(1, 2)\n self.assertEqual(hd.get_result(), {1: [2]})", "def keyGen(key):\n def leftShift(keyBitList):\n \"\"\"Perform a circular left shift on the first and second five bits\"\"\"\n shiftedKey = [None] * KeyLength\n shiftedKey[0:9] = keyBitList[1:10]\n shiftedKey[4] = keyBitList[0]\n shiftedKey[9] = keyBitList[5]\n return shiftedKey\n\n # Converts input key (integer) into a list of binary digits\n keyList = [(key & 1 << i) >> i for i in reversed(range(KeyLength))]\n permKeyList = [None] * KeyLength\n for index, elem in enumerate(P10table):\n permKeyList[index] = keyList[elem - 1]\n shiftedOnceKey = leftShift(permKeyList)\n shiftedTwiceKey = leftShift(leftShift(shiftedOnceKey))\n subKey1 = subKey2 = 0\n for index, elem in enumerate(P8table):\n subKey1 += (128 >> index) * shiftedOnceKey[elem - 1]\n subKey2 += (128 >> index) * shiftedTwiceKey[elem - 1]\n return (subKey1, subKey2)", "def format_length( self, key ) :\r\n\r\n return struct.calcsize( self[key] )", "def _key(self):\n return (self.name, self.struct_types, self.struct_values)", "def __extract_fields(self):\n for name, stuff in self.data.items():\n if stuff == (): # Empty tuple == 1 bit, value of 0\n self.fields.append(Field(name=name, value=0, size=1))\n elif isinstance(stuff, int): # int == specified value, value of 0\n self.fields.append(Field(name=name, value=0, size=stuff))\n elif isinstance(stuff, str): # str == specified value, value of 0\n pattern = re.compile(\"[0-9]+[bB]\")\n if pattern.match(stuff):\n if \"b\" in stuff: # bits specified\n size = int(stuff[:stuff.lower().index(\"b\")])\n self.fields.append(Field(name=name, value=0, size=size))\n elif \"B\" in stuff: # Bytes specified\n size = int(stuff[:stuff.lower().index(\"b\")]) * 8\n self.fields.append(Field(name=name, value=0, size=size))\n else: # No other string option, so must have been one of the \"vary\" constants from above.\n self.fields.append(Field(name=name, value=stuff, size=\"vary\"))\n elif isinstance(stuff, tuple) or isinstance(stuff, list): # specified value and size.\n if isinstance(stuff[0], str):\n if \"b\" in stuff[0]: # Bits\n size = int(stuff[0][:stuff[0].lower().index(\"b\")])\n # if not self.__check_bit_size(stuff[1], size):\n # raise Exception(\"error. \" + str(stuff[1]) + \" cannot be fit in \" + str(size) + \" bits.\")\n self.fields.append(Field(name=name, value=stuff[1], size=size))\n elif \"B\" in stuff[0]: # Bytes\n size = int(stuff[0][:stuff[0].lower().index(\"b\")]) * 8\n # if not self.__check_bit_size(stuff[1], size):\n # raise Exception(\"error. \" + str(stuff[1]) + \" cannot be fit in \" + str(size) + \" bits.\")\n self.fields.append(Field(name=name, value=stuff[1], size=size))\n elif stuff[0].lower() == NULL_TERMINATE:\n self.fields.append(Field(name=name, value=stuff[1], size=NULL_TERMINATE))\n elif stuff[0].lower() == PREFIX_LENGTH:\n self.fields.append(Field(name=name, value=stuff[1], size=PREFIX_LENGTH))\n elif stuff[0].lower() == PREFIX_LEN_NULL_TERM:\n self.fields.append(Field(name=name, value=stuff[1], size=PREFIX_LEN_NULL_TERM))\n elif stuff[0].lower() == IPv4:\n self.fields.append(Field(name=name, value=stuff[1], size=IPv4))\n elif isinstance(stuff[0], int):\n # if not self.__check_bit_size(stuff[1], stuff[0]):\n # raise Exception(\"error. \" + str(stuff[1]) + \" cannot be fit in \" + str(stuff[0]) + \" bits.\")\n self.fields.append(Field(name=name, value=stuff[1], size=stuff[0]))", "def __init__(self, name, id=0, xtd=0, rtr= 0 ,dtype='u8', endian='intel', startbit=0, bitlength=32, val=0.0 ):\r\n self._name = name\r\n self._id = id\r\n self._xtd = xtd\r\n self._rtr = rtr \r\n self._dtype = dtype\r\n self._endian = endian\r\n self._startbit = startbit\r\n self._bitlength = bitlength\r\n self._val = val", "def encode_vector_of_t(value: list):\n return encode_u32(len(value)) + bytes([i for j in value for i in j])", "def create_key ():", "def encode(self,\n data: mx.sym.Symbol,\n data_length: Optional[mx.sym.Symbol],\n seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:\n pass", "def StoreBits32(self, val):\n tmp_val = struct.pack(\">L\", val)\n self.StoreBits( (StrToList(tmp_val), 32))", "def generate_random_binary(length):\n key = [str(random.randint(0,1)) for x in range(length)]\n return \"\".join(key)", "def memory_key_values(k, v, num_mem_kv, dim_batch, dim_heads, variable_dtype, mesh):\n\n dim_mem_kv = mtf.Dimension(\"mem_kv_sequence\", num_mem_kv)\n emb_dim = k.shape[-1]\n mem_std = 1 / math.sqrt(emb_dim.size)\n\n mem_k = mtf.get_variable(mesh, \"mem_k\", mtf.Shape([dim_mem_kv, dim_heads, emb_dim]),\n initializer=tf.random_normal_initializer(stddev=mem_std),\n master_dtype=variable_dtype.master_dtype,\n slice_dtype=variable_dtype.slice_dtype,\n activation_dtype=variable_dtype.activation_dtype,\n )\n mem_v = mtf.get_variable(mesh, \"mem_v\", mtf.Shape([dim_mem_kv, dim_heads, emb_dim]),\n initializer=tf.random_normal_initializer(stddev=mem_std),\n master_dtype=variable_dtype.master_dtype,\n slice_dtype=variable_dtype.slice_dtype,\n activation_dtype=variable_dtype.activation_dtype)\n\n mem_k, mem_v = map(lambda t: mtf.broadcast(t, [dim_batch, dim_mem_kv, dim_heads, emb_dim]),\n (mem_k, mem_v))\n mem_k, mem_v = map(lambda t: mtf.rename_dimension(t, \"mem_kv_sequence\", \"sequence\"),\n (mem_k, mem_v))\n\n k = mtf.concat([mem_k, k], \"sequence\")\n v = mtf.concat([mem_v, v], \"sequence\")\n return k, v", "def one_hot_encoding(sequence):\n\n mydict = {\n \"A\": np.asarray([1, 0, 0, 0]),\n \"a\": np.asarray([1, 0, 0, 0]),\n \"C\": np.asarray([0, 1, 0, 0]),\n \"c\": np.asarray([0, 1, 0, 0]),\n \"G\": np.asarray([0, 0, 1, 0]),\n \"g\": np.asarray([0, 0, 1, 0]),\n \"T\": np.asarray([0, 0, 0, 1]),\n \"t\": np.asarray([0, 0, 0, 1]),\n \"Y\": np.asarray([0, 1, 0, 1]),\n \"y\": np.asarray([0, 1, 0, 1]),\n \"R\": np.asarray([1, 0, 1, 0]),\n \"r\": np.asarray([1, 0, 1, 0]),\n \"S\": np.asarray([0, 1, 1, 0]),\n \"s\": np.asarray([0, 1, 1, 0]),\n \"W\": np.asarray([1, 0, 0, 1]),\n \"w\": np.asarray([1, 0, 0, 1]),\n \"K\": np.asarray([0, 0, 1, 1]),\n \"k\": np.asarray([0, 0, 1, 1]),\n \"M\": np.asarray([1, 1, 0, 0]),\n \"m\": np.asarray([1, 1, 0, 0]),\n \"B\": np.asarray([0, 1, 1, 1]),\n \"b\": np.asarray([0, 1, 1, 1]),\n \"D\": np.asarray([1, 0, 1, 1]),\n \"d\": np.asarray([1, 0, 1, 1]),\n \"H\": np.asarray([1, 1, 0, 1]),\n \"h\": np.asarray([1, 1, 0, 1]),\n \"V\": np.asarray([1, 1, 1, 0]),\n \"v\": np.asarray([1, 1, 1, 0]),\n \"N\": np.asarray([0, 0, 0, 0]),\n \"n\": np.asarray([0, 0, 0, 0]),\n \"-\": np.asarray([0, 0, 0, 0]),\n }\n print(f\"Seq: {sequence}\")\n if len(sequence) > 0:\n nuc_list = list()\n for nuc in list(sequence):\n nuc_list.append(mydict[nuc])\n result = np.stack(np.asarray(nuc_list, dtype=\"int8\"))\n return result\n else: \n print(\"ERROR! sequence is too short\")", "def _to_packed(self, value):\n raise NotImplementedError", "def create_shared_key(self, scalar: bytes, point: bytes) -> bytes:", "def encode(self,\n data: mx.sym.Symbol,\n data_length: Optional[mx.sym.Symbol],\n seq_len: int = 0) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:\n return data, data_length, seq_len", "def __init__(self,key):\n self.block_size = 32\n self.key = hashlib.sha256(key).digest()", "def test_bit_insert_value_byte_size_too_large(self):\n value = bytearray([3] * 6)\n ops = [bitwise_operations.bit_insert(self.five_255_bin, 0, 6, value, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([3] * 6 + [255] * 5)\n assert bins[self.five_255_bin] == expected_result", "def pack(dicty: dict[str, Any]) -> bytes:\n column_count = 0\n column_directory = []\n directory_offset = 0\n name_offset = 0\n names = []\n data_offset = 0\n data = []\n total_encname_length = 0\n\n dicty_names_encoded = {key.encode(\"utf-8\"): value for key, value in dicty.items()}\n\n for encname in sorted(dicty_names_encoded.keys(), key=name_order):\n value = dicty_names_encoded[encname]\n if value is None:\n continue\n\n if len(encname) > MAX_NAME_LENGTH:\n raise DynColLimitError(\"Key too long: \" + encname.decode(\"utf-8\"))\n total_encname_length += len(encname)\n if total_encname_length > MAX_TOTAL_NAME_LENGTH:\n raise DynColLimitError(\"Total length of keys too long\")\n\n try:\n encode_func = ENCODE_FUNCS[type(value)]\n except KeyError:\n raise DynColTypeError(f\"Unencodable type {type(value)}\")\n dtype, encvalue = encode_func(value)\n\n column_count += 1\n column_directory.append(name_offset)\n column_directory.append((data_offset << 4) + dtype)\n names.append(encname)\n name_offset += len(encname)\n data.append(encvalue)\n data_offset += len(encvalue)\n\n directory_offset += 2\n\n data_size_flag, coldir_size_code, odd_sized_datacode = data_size(data)\n\n flags = 4 | data_size_flag # means this contains named dynamic columns\n enc_names = b\"\".join(names)\n\n buf = [struct_pack(\"<BHH\", flags, column_count, len(enc_names))]\n if not odd_sized_datacode:\n buf.append(\n struct_pack(\n \"<\" + (\"H\" + coldir_size_code) * (len(column_directory) // 2),\n *column_directory,\n )\n )\n else:\n for i, val in enumerate(column_directory):\n if i % 2 == 0:\n # name_offset\n buf.append(struct_pack(\"<H\", val))\n else:\n # data_offset + dtype, have to cut last byte\n value = struct_pack(\"<\" + coldir_size_code, val)\n buf.append(value[:-1])\n buf.append(enc_names)\n buf.extend(data)\n return b\"\".join(buf)", "def reduce_length(key, values):\n yield str((key, len(values)))", "def key_type(self):\n raise exceptions.NotImplementedError()", "def sizes(cls, p): \n try:\n return cls.table_dict[p]\n except:\n assert p > 1 and p & (p + 1) == 0, str(p)\n d = cls.tables.copy()\n d[\"P\"] = p\n d[\"P_BITS\"] = P_BITS = bitlen(p)\n FIELD_BITS = P_BITS\n while (FIELD_BITS & (FIELD_BITS - 1)): \n FIELD_BITS += 1 \n d[\"FIELD_BITS\"] = FIELD_BITS\n d[\"LOG_FIELD_BITS\"] = hibit(FIELD_BITS)\n d[\"INT_FIELDS\"] = INT_FIELDS = cls.INT_BITS // FIELD_BITS\n d[\"LOG_INT_FIELDS\"] = hibit(INT_FIELDS)\n V24_INTS = 32 // INT_FIELDS\n d[\"V24_INTS\"] = V24_INTS\n d[\"LOG_V24_INTS\"] = hibit(V24_INTS)\n d[\"V24_INTS_USED\"] = V24_INTS - (V24_INTS >> 2)\n V64_INTS = 64 // INT_FIELDS\n d[\"V64_INTS\"] = V64_INTS\n d[\"LOG_V64_INTS\"] = hibit(V64_INTS)\n d[\"MMV_INTS\"] = 3 * (2048 + 24) * V24_INTS + 759 * V64_INTS\n partial_smask = partial(smask_default, FIELD_BITS)\n d[\"smask\"] = UserFormat(partial_smask, fmt = c_hex)\n cls.table_dict[p] = d\n return d", "def testOneSize(self):\n hd = HeapDict(size=1)\n hd.push('a', 2)\n hd.push('a', 1)\n hd.push('b', 3)\n hd.push('b', 4)\n self.assertEqual(hd.get_result(), {'a': [2], 'b': [4]})", "def create_data_set():\n data_set = {}\n for index in range(1024):\n size = random.randint(1, 100) #nosec\n key = str(index).encode(\"utf-8\")\n data_set[key] = get_random_bytes(size)\n return data_set", "def key_id(key, origin=None):\n\n rdata = _to_rdata(key, origin)\n if key.algorithm == RSAMD5:\n return (rdata[-3] << 8) + rdata[-2]\n else:\n total = 0\n for i in range(len(rdata) // 2):\n total += (rdata[2 * i] << 8) + rdata[2 * i + 1]\n if len(rdata) % 2 != 0:\n total += rdata[len(rdata) - 1] << 8\n total += (total >> 16) & 0xFFFF\n return total & 0xFFFF", "def __init__(self, key):\n self.bs = 16\n self.key = hashlib.sha256(key.encode()).digest()", "def analyze_bit(key: List[Key],\n side_length: int,\n random_bits: str,\n message_bits: str):\n # Concatenate the input to get cube input.\n cube_input = message_bits + random_bits\n # Initialize the cube.\n cube = Cube(cube_input=cube_input, cube_side_length=side_length)\n\n # Xor, Shift, and apply move onto the cube.\n for each_key in key:\n cube.xor()\n cube.shift_cubie_content()\n cube.shift(key=each_key)\n\n # Count number of zeros and number of ones.\n return {\n \"0\": cube.content.count(\"0\"),\n \"1\": cube.content.count(\"1\")\n }", "def test_pos_operate_with_bin_length_extra_nostricttypes(self):\n key = (\"test\", \"demo\", 1)\n\n max_length = \"a\"\n for _ in range(20):\n max_length = max_length + \"a\"\n\n llist = [\n {\"op\": aerospike.OPERATOR_PREPEND, \"bin\": \"name\", \"val\": \"ram\"},\n {\"op\": aerospike.OPERATOR_INCR, \"bin\": max_length, \"val\": 3},\n ]\n\n TestOperate.client_no_typechecks.operate(key, llist)\n\n (key, _, bins) = TestOperate.client_no_typechecks.get(key)\n\n assert bins == {\"name\": \"ramname1\", \"age\": 1}", "def test_matrix_kv(matrix):\n assert isinstance(matrix.kv, unitdata.Storage)", "def __getKeyInformation( self , flaglist ):\n\t\tkeyinfo = 0\n\t\tif 'HMAC_MD5_RC4' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 0 )\n\t\tif 'HMAC_SHA1_AES' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 1 )\n\t\tif 'group' in flaglist:\n\t\t\tpass\n\t\tif 'pairwise' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 3 )\n\t\tif 'idx0' in flaglist:\n\t\t\tpass\n\t\tif 'idx1' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 4 )\n\t\tif 'idx2' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 5 )\n\t\tif 'install' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 6 )\n\t\tif 'ack' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 7 )\n\t\tif 'mic' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 8 )\n\t\tif 'secure' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 9 )\n\t\tif 'error' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 10 )\n\t\tif 'request' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 11 )\n\t\tif 'encrypted' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 12 )\n\t\treturn keyinfo", "def _dict_length(typingctx, d):\n resty = types.intp\n sig = resty(d)\n\n def codegen(context, builder, sig, args):\n fnty = ir.FunctionType(\n ll_ssize_t,\n [ll_dict_type],\n )\n fn = cgutils.get_or_insert_function(builder.module, fnty,\n 'numba_dict_length')\n [d] = args\n [td] = sig.args\n dp = _container_get_data(context, builder, td, d)\n n = builder.call(fn, [dp])\n return n\n\n return sig, codegen", "def decodeKeyRecord(keyFile,needToSwap,nRecordTypes):\n\n record = array.array('I')\n record.fromfile(keyFile,6)\n if needToSwap: record.byteswap()\n syncValue = (record[0],record[1],record[2])\n recordIndex = (record[4]<<32) | record[5]\n return (syncValue,recordIndex)", "def decode_key(as_bytes: typing.List[int]) -> str:\n raise NotImplementedError()", "def generate_key(self, size):\n key = bytearray()\n for i in range(0,size):\n random_byte = ord(os.urandom(1))\n key.append(random_byte)\n return key", "def generate_keypair(self, key_length: int = 2048) -> Tuple[bytes, bytes]:\n\n return None", "def __init__(self, *args):\n\n BaseDataTypes.__init__(self, *args)\n self.const_integer = 2\n self.const_integer_octet_encoded = '\\x02'\n self.const_integer_short_encoded = '\\x00\\x02'\n self.const_integer_long_encoded = '\\x00\\x00\\x00\\x02'\n self.const_integer_long_long_encoded = '\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02'", "def __bytes__(self):\n prm = self.package(self.p, LEN_PRIME)\n gen = self.package(self.g, LEN_GEN)\n pbk = self.package(self.pk, LEN_PK)\n return prm + gen + pbk", "def __init__(self, *args):\n\n BaseDataTypes.__init__(self, *args)\n self.const_field_table_dummy_dict = {'$key1':'value1','$key2':'value2'}\n self.const_field_table_dummy_dict_encoded = '\\x00\\x00\\x00\\x22\\x05$key2S\\x00\\x00\\x00\\x06value2\\x05$key1S\\x00\\x00\\x00\\x06value1'", "def _from_packed(self, value):\n raise NotImplementedError", "def htable_put(table, key, value):", "def get_num_slots(self):\n Return the load factor for this hash table.\n\n Implement this.\n \"\"\"\n return self.elements / self.capacity\n\n\n def fnv1(self, key):\n \"\"\"\n FNV-1 Hash, 64-bit\n\n Implement this, and/or DJB2.pyy\n \"\"\"\n\n # Your code here\n\n\n def djb2(self, key):\n \"\"\"\n DJB2 hash, 32-bit\n\n Implement this, and/or FNV-1.\n \"\"\"\n # Your code here\n\n\n def hash_index(self, key):\n \"\"\"\n Take an arbitrary key and return a valid integer index\n between within the storage capacity of the hash table.\n \"\"\"\n #return self.fnv1(key) % self.capacity\n<<<<<<< Updated upstream\n return self.djb2(key) % self.capacity\n=======\n return self.djb2(key) % len(self.storage)\n>>>>>>> Stashed changes\n\n def put(self, key, value):\n \"\"\"\n Store the value with the given key.\n\n Hash collisions should be handled with Linked List Chaining.\n\n Implement this.\n \"\"\"\n<<<<<<< Updated upstream\n # Your code here\n=======\n # need to account for if the key value is the same \n\n i = self.hash_index(key)\n if not self.storage[i]:\n hte = HashTableEntry(key, value)\n self.storage[i] = hte\n self.elements += 1\n hte.head = HashTableEntry(key, value)\n elif self.storage[i] and self.storage[i].key != key:\n self.storage[i].insert_at_head(HashTableEntry(key, value))\n>>>>>>> Stashed changes\n\n\n def delete(self, key):\n \"\"\"\n Remove the value stored with the given key.\n\n Print a warning if the key is not found.\n\n Implement this.\n \"\"\"\n<<<<<<< Updated upstream\n # Your code here\n=======\n i = self.hash_index(key)\n node = self.storage[i]\n prev = None\n if node.key == key:\n self.storage[i] = node.next\n return\n while node != None:\n if node.key == key:\n prev.next = node.next\n self.storage[i].next = None\n return\n prev = node\n node = node.next\n self.elements -= 1\n return\n>>>>>>> Stashed changes\n\n\n def get(self, key):\n \"\"\"\n Retrieve the value stored with the given key.\n\n Returns None if the key is not found.\n\n Implement this.\n \"\"\"\n<<<<<<< Updated upstream\n # Your code here\n=======\n # - find the index in the hash table for the key\n i = self.hash_index(key)\n # - search the list for that key\n if not self.storage[i]:\n return None\n else:\n if self.storage[i].find_key(key) == key:\n return self.storage[i].value\n>>>>>>> Stashed changes\n\n\n def resize(self, new_capacity):\n \"\"\"\n Changes the capacity of the hash table and\n rehashes all key/value pairs.\n\n Implement this.\n \"\"\"\n<<<<<<< Updated upstream\n # Your code here\n=======\n prev_storage = self.storage\n self.capacity = new_cap\n self.storage = [None] * new_cap\n for i in range(len(prev_storage)):\n prev = prev_storage[i]\n if prev:\n while prev:\n if prev.key:\n self.put(prev.key, prev.value)\n prev = prev.next\n\n>>>>>>> Stashed changes\n\n\n\nif __name__ == \"__main__\":\n ht = HashTable(8)\n\n ht.put(\"line_1\", \"'Twas brillig, and the slithy toves\")\n ht.put(\"line_2\", \"Did gyre and gimble in the wabe:\")\n ht.put(\"line_3\", \"All mimsy were the borogoves,\")\n ht.put(\"line_4\", \"And the mome raths outgrabe.\")\n ht.put(\"line_5\", '\"Beware the Jabberwock, my son!')\n ht.put(\"line_6\", \"The jaws that bite, the claws that catch!\")\n ht.put(\"line_7\", \"Beware the Jubjub bird, and shun\")\n ht.put(\"line_8\", 'The frumious Bandersnatch!\"')\n ht.put(\"line_9\", \"He took his vorpal sword in hand;\")\n ht.put(\"line_10\", \"Long time the manxome foe he sought--\")\n ht.put(\"line_11\", \"So rested he by the Tumtum tree\")\n ht.put(\"line_12\", \"And stood awhile in thought.\")\n\n print(\"\")\n\n # Test storing beyond capacity\n for i in range(1, 13):\n print(ht.get(f\"line_{i}\"))\n\n # Test resizing\n old_capacity = ht.get_num_slots()\n ht.resize(ht.capacity * 2)\n new_capacity = ht.get_num_slots()\n\n print(f\"\\nResized from {old_capacity} to {new_capacity}.\\n\")\n\n # Test if data intact after resizing\n for i in range(1, 13):\n print(ht.get(f\"line_{i}\"))\n\n print(\"\")", "def __generate_key(length):\n if length % 2 != 0:\n raise ValueError(\"'length' must be a multiple of 2\")\n length_bytes = int(length / 2) # length of key in bytes\n key_bytes = os.urandom(length_bytes)\n return binascii.hexlify(key_bytes).decode()", "def appenddictitemsize(self, key, numents):\n self._dentsvertsdata[key].appendsize(numents * self._multFactor)", "def __init__(self, key):\n\n def keys(key, num_rounds):\n \"\"\"Yields the permuted key bitstring for i = 1..num_rounds\"\"\"\n C, D = key[:28], key[28:]\n # Rounds are 1-indexed, so shift array over by one\n left_shifts = [None, 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1]\n for i in range(1, num_rounds + 1):\n # Negate each rotation to rotate left.\n C, D = rotate(C, -left_shifts[i]), rotate(D, -left_shifts[i])\n yield self.permute(C + D, self._CD_permutation)\n\n self.key = list(bits_of(key, 64))\n # Permute the key. The permutation discards the parity bits...\n self.key = self.permute(self.key, self._key_permutation)\n self.number_of_rounds = 16\n # A list of the 16 keys K1 .. K16, shifted over by one to allow 1-indexing.\n self.keys = [None] + list(keys(self.key, self.number_of_rounds))", "def get_dict_of_bytes2(self):\n pass", "def encode_metadata_size(metasiz):\n return SizeEncoder.pack(metasiz)", "def _set_key(self, key):\n\n # select 56 bits from the 64-bit key\n key = self._permutate(self.__pc1, self._string_to_bitlist(key))\n self.L = key[:28]\n self.R = key[28:]\n for i in range(0, 16):\n for j in range(0, self.__left_rotations[i]):\n self.L.append(self.L[0])\n del self.L[0]\n self.R.append(self.R[0])\n del self.R[0]\n # select 48 bits from 56 bits\n self.Kn[i] = self._permutate(self.__pc2, self.L + self.R)", "def store(self,key,start,end,data):\n\n pass", "def saveInTuple(list,name,type=\"R\",tname = \"T\", keytypes = {}):\r\n if keytypes == {}:\r\n\t for key in list[0].keys(): keytypes[key] = \"F\"\r\n \r\n print \"saving data\"\r\n labels=[]\r\n vvv=vector(0,0,0)\r\n if len(list)==0:\r\n print \"list size = 0\"\r\n return 0\r\n for i in range(4):\r\n if 'has_key' in dir(list[0]):break\r\n list=desagrupate(list)\r\n for key in list[0].keys():\r\n if (dir(list[0][key])==dir(vvv))or(dir(list[0][key])==dir(labels)):\r\n \r\n s=len(list[0][key])\r\n for j in range(s): \r\n labels.append(key+str(j+1)+\"/\" + keytypes[key])\r\n else: labels.append(key+\"/\"+keytypes[key])\r\n if type in [\"X\", \"ASCII\"]:\r\n TUP=XTuple(name,labels)\r\n if type==\"R\":\r\n TUP=RTuple(name,labels,tname)\r\n if type==\"M\":\r\n TUP=MTuple(name,labels)\r\n for thing in list:\r\n for key in thing.keys():\r\n #if key+\"1/\" + keytypes[key] in labels:\r\n # s=len(thing[key])\r\n # for j in range(s):\r\n # TUP.fillItem(key+str(j+1),thing[key][j])\r\n TUP.fillItem(key,thing[key])\r\n TUP.fill()\r\n TUP.close()", "def pack(buffer, *values):\n write_bytes = buffer.write\n\n def write_header(size, tiny, small=None, medium=None, large=None):\n if 0x0 <= size <= 0xF and tiny is not None:\n write_bytes(bytearray([tiny + size]))\n elif size < 0x100 and small is not None:\n write_bytes(bytearray([small]))\n write_bytes(PACKED_UINT_8[size])\n elif size < 0x10000 and medium is not None:\n write_bytes(bytearray([medium]))\n write_bytes(PACKED_UINT_16[size])\n elif size < 0x100000000 and large is not None:\n write_bytes(bytearray([large]))\n write_bytes(struct_pack(\">I\", size))\n else:\n raise ValueError(\"Collection too large\")\n\n for value in values:\n\n # None\n if value is None:\n write_bytes(b\"\\xC0\") # NULL\n\n # Boolean\n elif value is True:\n write_bytes(b\"\\xC3\")\n elif value is False:\n write_bytes(b\"\\xC2\")\n\n # Float (only double precision is supported)\n elif isinstance(value, float):\n write_bytes(b\"\\xC1\")\n write_bytes(struct_pack(\">d\", value))\n\n # Integer\n elif isinstance(value, integer_types):\n if -0x10 <= value < 0x80:\n write_bytes(PACKED_UINT_8[value % 0x100])\n elif -0x80 <= value < -0x10:\n write_bytes(b\"\\xC8\")\n write_bytes(PACKED_UINT_8[value % 0x100])\n elif -0x8000 <= value < 0x8000:\n write_bytes(b\"\\xC9\")\n write_bytes(PACKED_UINT_16[value % 0x10000])\n elif -0x80000000 <= value < 0x80000000:\n write_bytes(b\"\\xCA\")\n write_bytes(struct_pack(\">i\", value))\n elif INT64_MIN <= value < INT64_MAX:\n write_bytes(b\"\\xCB\")\n write_bytes(struct_pack(\">q\", value))\n else:\n raise ValueError(\"Integer %s out of range\" % value)\n\n # String\n elif isinstance(value, string_types):\n encoded = bstr(value)\n write_header(len(encoded), 0x80, 0xD0, 0xD1, 0xD2)\n write_bytes(encoded)\n\n # Byte array\n elif isinstance(value, bytes_types):\n write_header(len(value), None, 0xCC, 0xCD, 0xCE)\n write_bytes(bytes(value))\n\n # List\n elif isinstance(value, list):\n write_header(len(value), 0x90, 0xD4, 0xD5, 0xD6)\n pack(buffer, *value)\n\n # Map\n elif isinstance(value, dict):\n write_header(len(value), 0xA0, 0xD8, 0xD9, 0xDA)\n for key, item in value.items():\n pack(buffer, key, item)\n\n # Structure\n elif isinstance(value, Structure):\n write_header(len(value), 0xB0, None, None, None)\n write_bytes(bytearray([value.tag]))\n pack(buffer, *value.fields)\n\n # Other\n else:\n raise TypeError(\"Values of type %s are not supported\" % type(value))", "def _fill_cdata(cls):\n\n funcs = {}\n for key, name in [(\"b\", \"char\"), (\"h\", \"short\"),\n (\"i\", \"int\"), (\"q\", \"longlong\")]:\n for echar, esuffix in [(\"<\", \"le\"), (\">\", \"be\")]:\n esuffix = \"_\" + esuffix\n for unsigned in [True, False]:\n s = struct.Struct(echar + (key.upper() if unsigned else key))\n get_wrapper = lambda f: lambda *a, **k: f(*a, **k)[0]\n unpack = get_wrapper(s.unpack)\n unpack_from = get_wrapper(s.unpack_from)\n\n def get_unpack_from(s):\n def unpack_from(data, offset=0):\n return s.unpack_from(data, offset)[0], offset + s.size\n return unpack_from\n\n unpack_from = get_unpack_from(s)\n pack = s.pack\n\n prefix = \"u\" if unsigned else \"\"\n if s.size == 1:\n esuffix = \"\"\n bits = str(s.size * 8)\n\n if unsigned:\n max_ = 2 ** (s.size * 8) - 1\n min_ = 0\n else:\n max_ = 2 ** (s.size * 8 - 1) - 1\n min_ = - 2 ** (s.size * 8 - 1)\n\n funcs[\"%s%s_min\" % (prefix, name)] = min_\n funcs[\"%s%s_max\" % (prefix, name)] = max_\n funcs[\"%sint%s_min\" % (prefix, bits)] = min_\n funcs[\"%sint%s_max\" % (prefix, bits)] = max_\n\n funcs[\"%s%s%s\" % (prefix, name, esuffix)] = unpack\n funcs[\"%sint%s%s\" % (prefix, bits, esuffix)] = unpack\n funcs[\"%s%s%s_from\" % (prefix, name, esuffix)] = unpack_from\n funcs[\"%sint%s%s_from\" % (prefix, bits, esuffix)] = unpack_from\n funcs[\"to_%s%s%s\" % (prefix, name, esuffix)] = pack\n funcs[\"to_%sint%s%s\" % (prefix, bits, esuffix)] = pack\n\n for key, func in iteritems(funcs):\n setattr(cls, key, staticmethod(func))", "def keyIndex(self, key):\n key ^= bsr(key, 33)\n key *= 0xff51afd7ed558ccdL\n key ^= bsr(key, 33)\n key *= 0xc4ceb9fe1a85ec53L\n key ^= bsr(key, 33)\n return key", "def __construct_attibute_values(tag: IppTag, value: Any) -> bytes:\n bs = b\"\"\n\n if tag in (IppTag.INTEGER, IppTag.ENUM):\n bs += struct.pack(\">h\", 4)\n bs += struct.pack(\">i\", value)\n elif tag == IppTag.BOOLEAN:\n bs += struct.pack(\">h\", 1)\n bs += struct.pack(\">?\", value)\n else:\n bs += struct.pack(\">h\", len(value))\n bs += value.encode(\"utf-8\")\n\n return bs", "def h2(self, key) -> int:\n idx: int = (self.b * self.encode(key)) % self.table_size\n return idx if idx != 0 else 1", "def estimate_map_output_materialized_bytes(num_words, num_reducers, key_num_bytes, value_num_bytes):\n SPILL_FILE_PARTITION_INDICATOR_NUM_BYTES = 6\n\n return (num_words * (zero_compress.size_of_zero_compressed_int64(key_num_bytes) +\n key_num_bytes +\n zero_compress.size_of_zero_compressed_int64(value_num_bytes) +\n value_num_bytes) +\n (SPILL_FILE_PARTITION_INDICATOR_NUM_BYTES * num_reducers))", "def encode_u32(value: int) -> bytes:\n return int_to_le_bytes(value, NUMERIC_CONSTRAINTS[CLTypeKey.U32].LENGTH, False)", "def chunk_type(self) -> global___Type.BytesType:", "def keyExp(key):\r\n def sub2Nib(b):\r\n \"\"\"Swap each nibble and substitute it using sBox\"\"\"\r\n return sBox[b >> 4] + (sBox[b & 0x0f] << 4)\r\n \r\n Rcon1, Rcon2 = 0b10000000, 0b00110000\r\n w[0] = (key & 0xff00) >> 8\r\n w[1] = key & 0x00ff\r\n w[2] = w[0] ^ Rcon1 ^ sub2Nib(w[1])\r\n w[3] = w[2] ^ w[1]\r\n w[4] = w[2] ^ Rcon2 ^ sub2Nib(w[3])\r\n w[5] = w[4] ^ w[3]", "def encode_length(value):\n if value == Length.INDEFINITE:\n return bytes([0b10000000])\n\n if value < 127:\n return bytes([value])\n\n output = []\n while value > 0:\n value, remainder = value // 256, value % 256\n output.insert(0, remainder)\n\n # prefix length information\n output = [0b10000000 | len(output)] + output\n return bytes(output)", "def _key(self):\n return (self.name, self.array_type.upper(), self.values)", "def unpack( self, key, data ) :\r\n\r\n return struct.unpack(self[key], data)", "def pack( self, key, *args ) :\r\n\r\n ## return struct.pack(self[key], arg)\r\n # packing and unpacking here are assymetrical,\r\n # as for packing we want to send a complete string :\r\n ## fmt = '=c' + self[key].lstrip('@=<>!') \r\n\r\n fmt = '=c' + self[key].lstrip('=') \r\n\r\n # or more strict ( though not tested ) : \r\n '''\r\n fmt_ = self[key].lstrip('=') \r\n prefix = '=' \r\n PREFIXES = '@=<>!' \r\n if fmt_[0] in PREFIXES : \r\n\r\n prefix = fmt_[0] \r\n\r\n fmt = prefix + 'c' + self[key].lstrip( PREFIXES ) \r\n ''' \r\n\r\n\r\n ## # debug \r\n ## print \"format string: '%s', args: \" % (fmt, ), args \r\n\r\n result = struct.pack(fmt, key, *args) \r\n\r\n ## # debug \r\n ## print result\r\n\r\n return result", "def gen_parameters(generator=2,key_size=2048,backend=backend):\n\treturn dh.generate_parameters(generator,key_size,backend)", "def gen_keys():", "def test_encode_pair():\n\tassert encode_pair(0, 0) == 0\n\tassert encode_pair(1, 0) == 1\n\tassert encode_pair(0, 1) == 2\n\tassert encode_pair(4, 6) == 207", "def make_map(\n name: str, key_type: int, keys: List[Any], values: SequenceProto\n) -> MapProto:\n map_proto = MapProto()\n valid_key_int_types = [\n TensorProto.INT8,\n TensorProto.INT16,\n TensorProto.INT32,\n TensorProto.INT64,\n TensorProto.UINT8,\n TensorProto.UINT16,\n TensorProto.UINT32,\n TensorProto.UINT64,\n ]\n map_proto.name = name\n map_proto.key_type = key_type\n if key_type == TensorProto.STRING:\n map_proto.string_keys.extend(keys)\n elif key_type in valid_key_int_types:\n map_proto.keys.extend(keys)\n map_proto.values.CopyFrom(values)\n return map_proto", "def two_x64_concat(data):\n storage_key = bytearray(xxhash.xxh64(data, seed=0).digest())\n storage_key.reverse()\n\n return storage_key + data", "def hash_function(s):\n\n # O(n) over the key length\n # O(1) over the HASH_DATA_SIZE\n\n bytes_list = s.encode()\n\n total = 0\n\n\n for b in bytes_list: # O(n) over the length of the key\n total += b\n\n\n total &= 0xffffffff # 32 bit (8 f's)\n\n return total", "def __hash__(self):\n hash_value = 0\n \n # required\n hash_value ^= self.required << 14\n \n # title\n hash_value ^= hash(self.title)\n \n # type\n hash_value ^= hash(self.type)\n \n # values\n values = self.values\n if (values is not None):\n hash_value ^= len(values)\n \n for value in values:\n hash_value ^= hash(value)\n \n return hash_value" ]
[ "0.5809457", "0.5597021", "0.55033654", "0.5478504", "0.5475797", "0.5431724", "0.54011375", "0.53711766", "0.535695", "0.53174025", "0.53174025", "0.5285025", "0.5257738", "0.52197987", "0.5217525", "0.52163196", "0.51897675", "0.51784354", "0.5164822", "0.5161085", "0.5133049", "0.5132771", "0.51233876", "0.5116999", "0.50814086", "0.5077301", "0.5075172", "0.50541294", "0.5045825", "0.50183356", "0.49607635", "0.49424416", "0.49257272", "0.49088335", "0.49065703", "0.49046287", "0.48981637", "0.48850304", "0.48770157", "0.48689777", "0.48673463", "0.48570126", "0.48528162", "0.48317724", "0.4830153", "0.48263958", "0.4824446", "0.48117653", "0.48034185", "0.48013982", "0.48013362", "0.47918406", "0.47891", "0.47864434", "0.4783491", "0.47709933", "0.47695124", "0.47545555", "0.47444972", "0.47403103", "0.47399265", "0.47394043", "0.47245452", "0.4720622", "0.4704623", "0.4704324", "0.4702176", "0.47003922", "0.46861464", "0.46854302", "0.46829802", "0.46791396", "0.4677447", "0.4666978", "0.46659872", "0.46658787", "0.46639386", "0.46624416", "0.46554172", "0.46544945", "0.4652583", "0.46434093", "0.46378738", "0.4634843", "0.46315935", "0.46279368", "0.4625642", "0.46241367", "0.46204442", "0.46193543", "0.46177632", "0.46168867", "0.46157554", "0.46121126", "0.46032542", "0.45942116", "0.45941442", "0.45936853", "0.45936197", "0.45912138" ]
0.48401326
43
Find all occurences of val on list lo Returns a list of indices of val on lo.
def findall(lo,val): u = [] i = -1 while( i < len(lo)-1): try: i = lo.index(val,i+1) u.append(i) except: i += 1 return u
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def findall(l, o):\n return [i for i, u in enumerate(l) if u==o]", "def getIndexes(self, val):\n # Find where this value is listed. \n valNdx = (self.values == val).nonzero()[0]\n \n # If this value is not actually in those listed, then we \n # must return empty indexes\n if len(valNdx) == 0:\n start = 0\n end = 0\n else:\n # The index into counts, etc. for this value. \n valNdx = valNdx[0]\n start = self.start[valNdx]\n end = self.end[valNdx]\n \n # Create a tuple of index arrays, one for each index of the original array. \n ndx = ()\n for i in range(self.nDims):\n ndx += (self.indexes[start:end, i], )\n return ndx", "def matchloc(alist,val): \n return [ilc for ilc,jlc in enumerate(alist) if jlc==val]", "def all_indices(haystack, needle):\n index = 0\n indices = list()\n while True:\n try:\n i = haystack.index(needle, index)\n except ValueError:\n break\n indices.append(i)\n index = i+1\n return indices", "def get_indexes(from_list, find_list):\n\n df_find = pd.DataFrame(find_list, columns=['value'])\n df_from = pd.DataFrame(list(zip(from_list, np.arange(len(from_list)))), columns=['value', 'index'])\n indexes = pd.merge(df_from, df_find, on='value', how='inner')['index'].values\n return indexes", "def find_value(lists, target):\n loc = []\n l = len(lists)\n for i in range(0, l, 1):\n if(lists[i] == target):\n loc.append(i)\n else:\n continue\n return loc", "def occ_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==1:\n indices.append(index)\n return indices", "def getAllIndex(ldata, fldata):\n\treturn list(map(lambda e : fldata.index(e), ldata))", "def map_values_to_value_list(value_list, values):\n return [value_list.index(x) for x in values]", "def get_coincidence_indices(self, lst, element):\n result = []\n offset = -1\n while True:\n try:\n offset = lst.index(element, offset+1)\n except ValueError:\n return result\n result.append(offset)", "def indexof(self, value, tag=WORD):\n match = lambda a, b: a.endswith(\"*\") and b.startswith(a[:-1]) or a==b\n indices = []\n for i in range(len(self.words)):\n if match(value, unicode(self.get(i, tag))):\n indices.append(i)\n return indices", "def indexAll(inputList=None, value=None):\r\n if not isinstance(inputList, list):\r\n raise TypeError('Input list must be a list object.')\r\n return [i for i, x in enumerate(inputList) if x == value]", "def find_index(vec_vals,target):\n target=np.atleast_1d(target) #turn scalar into iterable, no op if already array\n vec_vals=np.array(vec_vals)\n index_list=[]\n for item in target:\n first_index=np.argmin(np.abs(vec_vals - item))\n index_list.append(first_index)\n return index_list", "def findIndices(g):\r\n change = [0]\r\n seen = [g[0]]\r\n for i in range(1, len(g)):\r\n if not g[i] in seen:\r\n change.append(i)\r\n seen.append(g[i])\r\n return change", "def indices(lst, element):\n result = []\n offset = -1\n while True:\n try:\n offset = lst.index(element, offset + 1)\n except ValueError:\n return result\n result.append(offset)", "def get_34index_list(self):\n msk = self.load_mask()\n return [i for (i,v) in enumerate(msk) if v==1]", "def linearSearch(values: list, target: int) -> int:\n for i in range(len(values)):\n if target == values[i]:\n return i\n \n return -1", "def coord_indices_of(self, v_list):\n return [self.coord_index_of(v) for v in v_list]", "def find_at(self, x, y):\n return list(self.ifind_at(x, y))", "def lc_index(*args):\n index = []\n x = check_lc_data(args[0])\n i = 0\n for line in args[0].Data.LCData.lc_data:\n i += 1\n if line != x[i - 1]:\n index.append(0)\n elif line == x[i - 1]:\n index.append(i)\n return index", "def vir_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==0:\n indices.append(index)\n return indices", "def int_to_indices(value: int, length: int, radix_bits: int) -> Iterable[int]:\n mask = (1 << radix_bits) - 1\n return ((value >> (i * radix_bits)) & mask for i in reversed(range(length)))", "def indices(self):\n _indices = []\n for h in self.miller.indices():\n _indices.append(self.indices_hkl(*h)[0])\n return _indices", "def positions(self, searchstr: str):\n indices = []\n index = mybinsearch(self.sarray, searchstr, self.comp)\n if index >= 0:\n indices.append(index)\n return indices", "def get_idxvals(self):\n input_rows = list()\n input_cols = list()\n for key in self.index:\n input_rows.append(key[0])\n input_cols.append(key[1])\n\n return list(OrderedSet(input_rows)), list(OrderedSet(input_cols))", "def index(self,*val):\n if len(val): self._value = self.allele_set[val[0]]\n return self.allele_set.index(self.value())", "def get_indexes(self, x):\n indexes = []\n for index_hashes in self.hash_functions:\n combined_index = []\n for idx_spec, hash_func in zip(self.config.index_specs, index_hashes):\n combined_index.append(idx_spec.distribution.get_index(hash_func(x)))\n indexes.append(tuple(combined_index))\n return indexes", "def my_index(list_, element):\n pos = []\n for i in range(len(list_)):\n if list_[i] == element:\n pos.append(i)\n return pos", "def return_inds(arr, target):\n\n # Convert list to numpy array\n arr = np.array(arr)\n # Determine all possible combinations, excluding combinations of the same number\n arr_combs = list(combinations(arr, 2))\n \n # Determine the sum of each combination\n sum_arr = np.array(list((map(sum, arr_combs)))) \n \n # Determine the index where the sum is equal to our target\n vals = arr_combs[np.where(sum_arr == target)[0][0]]\n \n # Determine the two indices\n ind_1 = np.where(arr == vals[0])[0][0]\n ind_2 = np.where(arr == vals[1])[0][0]\n\n return ind_1, ind_2", "def return_indices(nums, target):\n indices = []\n i = 0\n number_found = False\n while not number_found:\n my_target = nums[i]\n \n for j in range(i+1,len(nums)):\n my_target += nums[j]\n if my_target == target:\n number_found = True\n indices = [i, j]\n break\n my_target = nums[i]\n \n i+=1\n return indices", "def get_row_indices(df, col, vals):\n\n return list(df[df[col].isin(vals)].index)", "def get_positions(token, docs):\n\n all_matches = [token]\n for doc in docs:\n matches = []\n if token in doc:\n indexes = [i for i, x in enumerate(doc) if x == token]\n matches += [docs.index(doc), len(indexes), indexes]\n if matches:\n all_matches.append(matches)\n return all_matches", "def scan(self) -> list[int]:", "def find_needle_in_haystack(self, needle, haystack): \n r = [] \n L = len(needle) \n for i in range(len(haystack)): \n if haystack[i:i+L] == needle: \n r.append(i)\n return r", "def search_linear(xs, target):\n for (i, v) in enumerate(xs):\n if v == target: # Is referred to as a probe.\n return i\n return -1", "def __getHints(self, p):\n st = bisect.bisect_left(self.index, (p[:self.ln], -1)) # binary search\n en = bisect.bisect_right(self.index, (p[:self.ln], sys.maxsize)) # binary search\n hits = self.index[st:en] # this range of elements corresponds to the hits\n return [h[1] for h in hits] # return just the offsets", "def scan(self) -> List[int]:", "def scan(self) -> List[int]:", "def positions(self, searchstr: str):\n out = []\n for x in range(0, len(self.sa)):\n sub = self.sa[x]\n if searchstr == sub[0:len(searchstr)]:\n out.append(x)\n return out\n \n pass", "def linear_search(lst, value):\n i = 0\n while i != len(lst) and lst[i] != value:\n i = i + 1\n if i == len(lst):\n return -1\n else:\n return i", "def indexer(list1, list2):\r\n\tindex_list = []\r\n\tfor x in list2:\r\n\t\tfor y in list1:\r\n\t\t\tif x == y:\r\n\t\t\t\tindex = list1.index(x)\r\n\t\t\t\tindex_list.append(index)\r\n\treturn index_list", "def get_indexes_of(number, int_list):\n\n index = 0\n result = []\n while True:\n if is_end_of_list(int_list, index):\n break\n if number in int_list[index:]: # if number is found in (the rest of) the int_list\n result.append(index + int_list[index:].index(number)) # result = [3]\n index = result[-1] + 1 # index = 4\n continue\n else: # cannot find the number in (the rest of) the int_list\n break\n return result # [3,7]", "def get_indexes(self, variable, *args):\n\n return [get_subset_idxs(data, min, max)\n for data, (min, max) in args]", "def occurence(main_seq,sub_seq):\n start= 0\n indices =[]\n while True:\n start = main_seq.find(sub_seq,start)\n if start > 0:\n indices.append(start)\n else:\n break\n start +=1\n return indices", "def getall(l, idx):\n return [l[i] for i in idx]", "def indices_of(self, col_name, value):\n return list(self._obj[self._obj[col_name] == value].index\n ) if col_name in self._obj.columns else None", "def get_indexes(self):\n return set(k.index for k in self if k.has_index)", "def indices(self, fit):\r\n lam = self.lam_reeval if self.lam_reeval else 2 + len(fit) / 20\r\n reev = int(lam) + ((lam % 1) > np.random.rand())\r\n return np.argsort(array(fit, copy=False)[:2 * (reev + 1)])[:reev]", "def findings_2_idx(findings, corner_2_idx, funcx, funcy):\n idx = []\n for finding in findings:\n x, y = finding\n mesh = np.array(np.meshgrid(funcx(x), funcy(y))).swapaxes(1,2).reshape(2,-1).T\n idx.extend([corner_2_idx(c) for c in mesh])\n\n return np.unique(idx)", "def getIndicesGlobCurrent(lons, lats):\n if np.size(lons) == 1:\n lon_0, lon_1 = int(np.floor(lons-5)), int(np.ceil(lons+5))\n else:\n lon_0, lon_1 = int(np.round(np.min(lons))), int(np.round(np.max(lons)))\n\n if np.size(lats) == 1:\n lat_0, lat_1 = int(np.floor(lats-5)), int(np.ceil(lats+5))\n else:\n lat_0, lat_1 = int(np.round(np.min(lats))), int(np.round(np.max(lats)))\n\n lon_range = range((lon_0-5+180)*4-1, (lon_1+5+180)*4+1)\n lat_range = range((lat_0-5+80)*4-1, (lat_1+5+80)*4+1)\n\n indices = {\"lon\": lon_range,\n \"lat\": lat_range}\n\n print \"getIndicesGlobCurrent(): Success! Indices created.\"\n return indices", "def get_index(y, value):\n\n for i in range(len(y)):\n if y[i] <= value:\n continue\n\n return i", "def get_index(y, value):\n\n for i in range(len(y)):\n if y[i] <= value:\n continue\n\n return i", "def get_lis(a):\n dp = [0]*len(a)\n aux = [inf]*(len(a)+1)\n aux[0] = -inf\n high = 0\n for i in range(len(a)):\n dp[i] = bisect_left(aux, a[i])\n aux[dp[i]] = min(aux[dp[i]], a[i])\n high = max(high, dp[i])\n return high", "def get_possible_indexes(self, factors_lock, random_state):\n factor_list = [[]]\n for i, lock_val in enumerate(factors_lock):\n ## if unlocked, number of possible factor-samples are multiplied by the number of possible unlocked factor values\n if lock_val == -1 : \n base = factor_list\n factor_list = []\n for b in base:\n bc = b.copy()\n for f in range(self.factor_sizes[i]):\n bc.append(f)\n factor_list.append(bc)\n bc = b.copy()\n \n else: ## if factor is locked, just append the locked factor value\n for i in range(len(factor_list)):\n factor_list[i].append(lock_val)\n #transform to index\n possible_indexes = self._features_to_state_space_index(np.asarray(factor_list)) \n return possible_indexes", "def FindIdxValues(X):\n data = X.select_dtypes(include=[\"float64\"])\n idx = np.argwhere(~np.isnan(data.values))\n idx[:, 1] += 4 # add ID variable columns\n StoE = pd.read_csv(\"msresist/data/MS/CPTAC/IDtoExperiment.csv\")\n assert all(StoE.iloc[:, 0] == data.columns), \"Sample labels don't match.\"\n StoE = StoE.iloc[:, 1].values\n tmt = [[StoE[idx[ii][1] - 4]] for ii in range(idx.shape[0])]\n return np.append(idx, tmt, axis=1)", "def list_item_indexes(list_arg: list, item: Any) -> Tuple[int, ...]:\n indexes = [index for index, value in enumerate(list_arg) if value == item]\n return indexes", "def get_os_indices_list(common_os_dict):\n\n indices_list = []\n os_values_list = common_os_dict.values()\n for os_entry in os_values_list:\n indices_list.append(os_entry)\n\n return indices_list", "def parameter_finder(target_list, search_list, msgflag=False, exact=False):\n target_list = [x.lower() for x in target_list]\n\n indexes = []\n\n if isinstance(search_list, str):\n cont = 0\n search_list = search_list.lower()\n for t in target_list:\n if exact == False and search_list in t:\n indexes.append(cont)\n elif exact == True and search_list == t:\n indexes.append(cont)\n cont += 1\n if isinstance(search_list, list):\n search_list = [x.lower() for x in search_list]\n\n for s in search_list:\n s = str(s)\n for cont, t in enumerate(target_list):\n if exact == False and s in t:\n print((s, t))\n indexes.append(cont)\n elif exact == True and s == t:\n print((s, t))\n indexes.append(cont)\n\n if msgflag == True:\n length = len(indexes)\n if length > 1: print(\"There were several ocurrences\")\n if length == 0: print(\"No ocurrences found\")\n\n return indexes", "def __valuesToIndices(self, mappings, values):\n indices = np.empty(0, dtype=np.int_)\n\n for key, _ in mappings.items():\n # Lookup the index of the value of the values in the map.\n index = mappings[key](values[key])\n\n indices = np.hstack((indices, index))\n\n return indices", "def find_all_lists(rij):\n\n langste_rij = [0]*len(rij)\n langste_rij[0] = 1\n\n for i in range(len(rij)):\n for j in range(i):\n #print \"*******\", i, rij[i], j, langste_rij[i]\n if ((rij[j] < rij[i]) and (langste_rij[i] < langste_rij[j]+1)):\n langste_rij[i] = langste_rij[j] + 1\n\n return langste_rij", "def get_indexes(self, items: Iterable[_T]) -> List[int]:\n return [self.get_index(item) for item in items]", "def find_indices(li, first_elt, second_elt):\r\n index1, index2 = li.index(first_elt), li.index(second_elt)\r\n if index1 == index2:\r\n index2 = index1 + 1 + li[index1+1:].index(second_elt)\r\n if index1 > index2:\r\n index1, index2 = index2, index1\r\n return (index1+1, index2+1)", "def indices_of_label(self, label_name):\n return self.indices_of('label', label_name)", "def closest_value_index(val, lst):\n index = 0\n for item in lst:\n if item > val:\n return index\n index += 1\n return index-1", "def _get_indexes(self, participants):\n tr_idx = int(np.floor(self.tr_size*len(participants)))\n j = self.val_size + self.tr_size\n val_idx = int(np.floor(j*len(participants)))\n return tr_idx, val_idx", "def find_all_elements(grid, target):\n \n indices = []\n \n ### This pattern of iterating through row and col indices is very common\n for row_number in range(len(grid)):\n for col_number in range(len(grid[row_number])):\n \n if grid[row_number][col_number] == target:\n indices.append((row_number, col_number))\n \n return indices", "def fn(nums):\n ans, vals = [], []\n for i, x in enumerate(nums): \n k = bisect_left(vals, x)\n if k == len(vals): vals.append(x)\n else: vals[k] = x\n ans.append(k)\n return ans", "def arg_indices(concept, mentions, toks):\r\n indices = []\r\n for i, tok in enumerate(toks):\r\n for m in mentions:\r\n if 'start' not in m:\r\n logging.warning('%s', m)\r\n if m['id'] == concept and m['start'] <= tok['start'] and tok['end'] <= m['end']:\r\n indices.append(i)\r\n break\r\n return indices", "def index_two_v2(values):\n\n pairs = []\n for i in range(len(values)):\n pairs.append((values[i], i))\n pairs.sort()\n return pairs[0][1], pairs[1][1] # indices of the values are in location 1 of each pair", "def linear_search(vlist, srchval): # somewhat different from book\n#Look at each item in list. If it equals the value you are looking for, stop.\n # linear_search_2.py\n index = 0\n for item in vlist:\n if item == srchval:\n return index # implicit break\n index += 1\n \n return -1", "def get_idx_from_sent(sent, word_idx_map, max_l=50, filter_h=3):\n x = []\n x_mask = []\n pad = filter_h - 1\n for i in xrange(pad):\n x.append(0)\n x_mask.append(0)\n words = sent.split()\n for i, word in enumerate(words):\n if i >= max_l: break\n if word in word_idx_map:\n x.append(word_idx_map[word])\n x_mask.append(1)\n while len(x) < max_l+2*pad:\n x.append(0)\n x_mask.append(0)\n for e in x_mask:\n x.append(e)\n return x", "def min_indice(L):\n min_l = min(L)\n return [min_l,np.where(L==min_l)[0][0]]", "def find_indices(self,nr_frame,label,forward=True):\n if forward:\n index = nr_frame-self.first_frame\n label_list=[label]\n #Fetches the 10 first frames. 10 is arbitrary\n n_iterations = min(10,len(self.correspondance_lists)-index-1 )\n for i in range(n_iterations):\n corresp_list = self.correspondance_lists[index+i]\n match = [v for u,v in corresp_list if u==label_list[index+i]]\n match = match[0]\n if match==-1:\n break\n \n label_list.append(match)\n return label_list\n \n else:\n index = nr_frame-self.first_frame\n label_list=[label]\n #Fetches the 10 first frames. 10 is arbitrary\n n_iterations = min(10,index )\n for i in range(n_iterations):\n corresp_list = self.correspondance_lists[index-i]\n match = [u for u,v in corresp_list if v==label_list[index-i]]\n match = match[0]\n if match==-1:\n break\n label_list.append(match)\n return label_list", "def get_pixel_indices(self, lats, lons):\n return self._hpx.get_pixel_indices(lats, lons)", "def index(liste, value):\n\n for ii in range(len(liste)):\n if liste[ii] == value:\n return ii\n return None", "def get_all_occurences(s, word):\n res = [0]\n print(\"Searching for \" + word)\n while True:\n try:\n idx = s.index(word, res[-1])\n res.append(idx+1)\n #print(\"found at \", idx)\n except ValueError:\n break\n return res[1:]", "def get_indexes_for_word (self,word):\r\n\r\n if self.using_database:\r\n value_tuple = (notebookname,word,)\r\n db_cursor.execute(\"SELECT note_index\"\r\n +\" FROM word_to_indexes\"\r\n +\" WHERE notebook=? and word=?;\",\r\n value_tuple)\r\n\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {index[0].strip() for index in fetched}\r\n return set()\r\n\r\n\r\n return self.word_dict[word]", "def linear_search(self, num_lst, key):\r\n # Running time: O(n)\r\n for i in range(len(num_lst)):\r\n if num_lst[i] == key:\r\n return i\r\n \r\n return -1", "def value_to_idx(val_range, unique_values, run_idx):\n return np.where(unique_values == val_range[run_idx])[0]", "def linear_search(element, list_of_elements):\n for i, elem in enumerate(list_of_elements):\n if elem == element:\n return i\n return None", "def indices_of_specie(self, specie: Union[int, str]) -> List[int]:\n return [i for i, spec in enumerate(self.coded_species)\n if spec == specie]", "def get_antecedent_constant_indexes(rule):\n constant_indexes = dict()\n for antecedent_atom in extract_logic_predicates(rule.antecedent):\n predicate = antecedent_atom.functor.name\n indexes = {\n i\n for i, arg in enumerate(antecedent_atom.args)\n if isinstance(arg, Constant)\n }\n if len(indexes) > 0:\n constant_indexes[predicate] = indexes\n return constant_indexes", "def indsWithin(vals, extr, edges=True):\n assert np.ndim(vals) == 1, \"Only `ndim = 1` arrays allowed!\"\n bnds = minmax(extr)\n if(edges):\n inds = np.where((vals >= bnds[0]) & (vals <= bnds[1]))[0]\n else:\n inds = np.where((vals > bnds[0]) & (vals < bnds[1]))[0]\n\n return inds", "def _xy_locs(mask):\n y, x = mask.nonzero()\n return list(zip(x, y))", "def linear_search_sentinal(lst, value):\n\n lst.insert(0, value)\n\n i = len(lst) - 1\n\n while lst[i] != value:\n i = i - 1\n\n lst.pop(0)\n\n if i == 0:\n return -1\n else:\n return i - 1", "def __find_index(arr, val):\n if val is not None:\n return numpy.searchsorted(arr, val)\n else:\n return val", "def getindex(self,name,searchfrom='name'):\n name = name.replace(':','_').lower()\n pat = re.compile(name)\n result = []\n\n for (i,elem) in enumerate(self.lat):\n if pat.search(elem[searchby]):\n result.append(i)\n return result", "def findidx(X, v, tol=1e-3):\n\tloc = -1\n\tdiff = 1e15 # Take a big difference\n\tn = len(X)\n\n\tfor i in xrange(n):\n\t\tndiff = abs(X[i]-v)\n\t\tif ndiff <= tol and ndiff < diff:\n\t\t\tloc = i\n\t\t\tdiff = ndiff\n\t\n\treturn loc", "def _subset(lst: list, val_filter: str) -> int:\n \n for i, v in enumerate(lst):\n if v == val_filter:\n return i", "def inidices(self):\n return [Index(name, index) for name, index in self._data['indices'].iteritems()]", "def get_idx_from_sent(self, sent, word_idx_map, max_l=45, k=300, filter_h=5):\n x = []\n pad = filter_h - 1\n # for i in xrange(pad):\n # x.append(0)\n words = sent.split()\n for word in words:\n if word in word_idx_map:\n x.append(word_idx_map[word])\n if len(x)==max_l+pad:\n break\n while len(x) < max_l+2*pad:\n x.append(0)\n return x", "def find_offsets(haystack, needle):\n\toffs = -1\n\twhile True:\n\t\toffs = haystack.find(needle, offs+1)\n\t\tif offs == -1:\n\t\t\tbreak\n\t\telse:\n\t\t\tyield offs", "def binary_search(alist, target):\n index = binary_search_iterative(alist, target)\n return index", "def binary_search(self, num_lst, key):\r\n # Running time: O(log n) with O(n logn) overhead\r\n # get sorted list\r\n num_lst = sorted(num_lst)\r\n \r\n low, high, idx = 0, len(num_lst), -1\r\n \r\n while low < high:\r\n mid = int(math.floor((low+high) / 2.0))\r\n \r\n if key < num_lst[mid]: high = mid - 1\r\n elif key > num_lst[mid]: low = mid + 1\r\n elif key == num_lst[mid]: \r\n idx = mid\r\n return idx\r\n \r\n return idx", "def get_map_values(self, lons, lats, ibin=None):\n pix_idxs = self.get_pixel_indices(lons, lats, ibin)\n idxs = copy.copy(pix_idxs)\n\n m = np.empty_like(idxs[0], dtype=bool)\n m.fill(True)\n for i, p in enumerate(pix_idxs):\n m &= (pix_idxs[i] >= 0) & (pix_idxs[i] < self._npix[i])\n idxs[i][~m] = 0\n\n vals = self.counts.T[idxs]\n vals[~m] = np.nan\n return vals", "def __twoSum(self, numbers, target):\n dic = {}\n for i, value in enumerate(numbers):\n complement = target - value\n if complement in dic:\n return [dic[complement], i]\n else:\n # index the new value\n dic[value] = i", "def get_idx_set(i, sets):\n idxs = []\n for j, set_j in enumerate(sets):\n if i in set_j: idxs.append(j)\n return idxs", "def _get_indices_from_iss(self, iss):\n iss = [iss] if type(iss) not in [np.ndarray, list] else iss\n if self.iss is not None:\n inds = []\n for i in iss:\n inds.append(list(self.iss).index(i))\n# else:\n# inds = iss\n return inds", "def find_position(self, val):\n edges = np.array(self.cell_edges)\n if val in edges:\n index = np.searchsorted(edges, val)\n return index, index\n else:\n edges -= val\n if edges[0] > 0:\n return -1, 0\n if edges[-1] < 0:\n return 0, -1\n index = 0\n for i, e in enumerate(edges):\n if e > 0:\n index = i\n break\n return index - 1, index", "def linear_search_iterative(alist, target):\n index_target = None\n found = False\n index_current = 0\n while index_current < len(alist) and found is False:\n if alist[index_current] == target:\n index_target = index_current\n found = True\n index_current += 1\n return index_target" ]
[ "0.70793176", "0.7071109", "0.66156113", "0.6319743", "0.6250533", "0.620535", "0.62024206", "0.619781", "0.6169729", "0.60985184", "0.6077737", "0.6067603", "0.5907215", "0.5842827", "0.58379203", "0.5817054", "0.58101517", "0.5786621", "0.577902", "0.5734879", "0.5654707", "0.5616727", "0.55868834", "0.5581693", "0.5579649", "0.5515418", "0.5482658", "0.5481943", "0.54432505", "0.54349846", "0.5433607", "0.541198", "0.540692", "0.53990185", "0.5387827", "0.5384745", "0.53846455", "0.53846455", "0.53340465", "0.532639", "0.5321772", "0.53210115", "0.53143036", "0.5314129", "0.5304778", "0.5303014", "0.5294939", "0.5279905", "0.5277896", "0.5274131", "0.52718896", "0.52718896", "0.52714247", "0.52589935", "0.5243768", "0.523001", "0.52261263", "0.5225127", "0.5199031", "0.51928705", "0.5179667", "0.5179497", "0.51731926", "0.51698726", "0.5160939", "0.5160316", "0.51357085", "0.5130136", "0.5127168", "0.5125203", "0.51217115", "0.5116338", "0.510652", "0.5101254", "0.5095237", "0.50888777", "0.50886786", "0.5088214", "0.50826013", "0.5080806", "0.5080801", "0.5071751", "0.507141", "0.5069089", "0.506789", "0.50531656", "0.50515014", "0.5047353", "0.5045569", "0.5045092", "0.5040066", "0.5038577", "0.5034531", "0.50313795", "0.5028644", "0.5016542", "0.50148255", "0.50136113", "0.5007277", "0.50035566" ]
0.8087934
0
Generate an identity key pair. Clients should only do this once, at install time. the generated IdentityKeyPair.
def generateIdentityKeyPair(): keyPair = Curve.generateKeyPair() publicKey = IdentityKey(keyPair.getPublicKey()) serialized = '0a21056e8936e8367f768a7bba008ade7cf58407bdc7a6aae293e2c' \ 'b7c06668dcd7d5e12205011524f0c15467100dd603e0d6020f4d293' \ 'edfbcd82129b14a88791ac81365c' serialized = binascii.unhexlify(serialized.encode()) identityKeyPair = IdentityKeyPair(publicKey, keyPair.getPrivateKey()) return identityKeyPair # return IdentityKeyPair(serialized=serialized)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_key_pair(self, keysize, cb):\n\n def gen_key_pair_pub_cb(data, ctx):\n if not data:\n warning('keymanagement: Could not generate a key pair\\n')\n cb(None, None)\n else:\n cb(ctx, data)\n\n def gen_key_pair_priv_cb(data, ctx):\n if not data:\n warning('keymanagement: Could not generate a key pair\\n')\n cb(None, None)\n else:\n xrun([self.sslname, 'rsa', '-pubout'], gen_key_pair_pub_cb,\n data, data)\n\n return xrun([self.sslname, 'genrsa', str(keysize)],\n gen_key_pair_priv_cb, None)", "def gen_key_pair():\n sk = gen_secret_key(BITCOIN.gen.n)\n pk = PublicKey.from_sk(sk)\n return sk, pk", "def generate(self):\n if self.curvetype == KeyType.ECDSA_P256v1:\n self.private_key_obj = ec.generate_private_key(ec.SECP256R1(), default_backend())\n elif self.curvetype == KeyType.ECDSA_SECP256k1:\n self.private_key_obj = ec.generate_private_key(ec.SECP256K1(), default_backend())\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_private_key_bytes()\n self._get_naive_public_key_bytes()", "def create_keypair(self):\n # NOTE: currently we rely on zmq for convenience, but we may use libnacl directly\n # if we want to isolate this module from zmq dependency.\n public_key, private_key = zmq.curve_keypair()\n return public_key, private_key", "def generate_symmetric_key():\n return Fernet.generate_key()", "def generate_rsa_key_pair(self):\n\t\tprint \"Started rsa key generation\"\n\t\tkey = RSA.generate(self.key_size, randfunc=self.random_number_generator)\n\t\t\t\n\t\tpub_key = key.publickey().exportKey()\n\t\tprint pub_key\n\t\t\n\n\t\tpriv_key = key.exportKey()\n\t\tprint \"Private key\", priv_key \n\t\tprint \"Note: Normally, the private key should be protected. For the purposes of this demo, I'm printing it to terminal.\"", "def generate_key(self, **options):\n\n return security_utils_services.generate_rsa_key(**options)", "def create_key_pair(self) -> Keypair:\n res = self.context.post(\n \"/dsum/create_key_pair\", None, None, \"DSum: failed creating a Curve 25519 Keypair\")\n return Keypair(res['private_key_id'], res['public_key_id'])", "def create_keypair(self, username):\n msg = \"create_keypair not implemented\"\n raise NotImplementedError(msg)", "def generate_signing_keys():\n return SigningKey.generate(curve=SECP256k1)", "def create_key_pair(self, keypair, **kwargs):\n\n if not isinstance(keypair, models.CreateKeyPairReq):\n raise HuaweiCloudSDKException(\n message=\"The datatype of parameter(keypair) \"\n \"is not CreateKeyPairReq\")\n body_params = keypair.serialize()\n\n header_params = {}\n header_params['Accept'] = util.select_header_accept(\n ['application/xml', 'application/json'])\n\n header_params['Content-Type'] = util.select_header_content_type(\n ['application/json', 'application/xml'])\n\n return_code, return_data, _ = self.api_client.handle_raw_request(\n 'compute', 'POST',\n '/os-keypairs',\n headers=header_params,\n body=body_params,\n timeout=kwargs.get('_request_timeout', None),\n _preload_content=kwargs.get('_preload_content', True))\n\n if return_data is not None:\n return_data = json.loads(return_data)\n else:\n return_data = {}\n if return_code not in [200, 201]:\n raise HuaweiCloudSDKException(\n return_code,\n \"Run create_key_pair failed, \"\n \"message=%s\" % return_data.get(\"message\"))\n return models.CreateKeyPairResp().deserialize(return_data)", "def generate_key(self):\n key = rsa.generate_private_key(\n public_exponent=self.settings['key_public_exponent_size'],\n key_size=self.settings['key_size'],\n backend=default_backend()\n )\n return key", "def gen_key(self):\n\n if not self.private_key:\n self._gen_key()\n else:\n raise CryptoError(\"Private Key already existing\")", "def generate_key():\n key = crypto.Key.generate_key()\n click.echo('Private Key (len {}):: \\n{}'.format(\n len(key.get_privkey()),\n hexlify(key.get_privkey())))\n click.echo('Public Key (len {})::\\n{}'.format(\n len(key.get_pubkey()),\n hexlify(key.get_pubkey())))", "def test_generate_key_pair(self):\n with patch('iceit.crypto.gnupg.GPG') as mock_gpg:\n mock_key = Mock()\n mock_key.fingerprint = 'fake-fingerprint'\n mock_gpg.gen_key.return_value = mock_key\n\n mock_gpg.return_value = mock_gpg\n encryptor = self.test_init()\n fake_key = encryptor.generate_key_pair(key_type=\"RSA\", length=4096, options={\n 'name_real': 'Fake Name', 'name_email': '[email protected]', 'name_comment': 'Fake comment'})\n\n self.assertEqual(mock_gpg.gen_key_input.call_count, 1)\n self.assertEqual(fake_key, mock_key.fingerprint)", "def generate_key(self):\n\n self.key = Fernet.generate_key()\n self.cryptor = Fernet(self.key)", "def _gen_key(self):\n\n input_data = self._gpg.gen_key_input(key_type=\"RSA\",\n key_length=self.key_length, name_real=self.name,\n name_comment=self.comment, name_email=self.email)\n\n log.info(\"Generating key: (%s)\" % input_data)\n\n self.key = self._gpg.gen_key(input_data)", "def generate(self, force=False):\n if not self.check_force_generate(force):\n return False\n\n mkdirs(self.path)\n\n command = [openssl, 'ecparam', '-genkey', '-name', self.asn1_oid, '-out', self.key_file]\n\n self.log.info('Generating EC key')\n # Generate the keyfile with no password\n if not run_command(command):\n raise RuntimeError('EC key generation failed', self)\n\n # Now encrypt the key with a password, overwriting the original\n # passwordless key.\n if self.password:\n command = [\n openssl, 'ec',\n '-in', self.key_file,\n '-out', self.key_file,\n '-des3', '-passout', 'pass:{}'.format(self.password)\n ]\n self.log.info('Encrypting key with password')\n\n if not run_command(command):\n raise RuntimeError('EC key file password encryption failed')\n\n if not self.exists():\n raise RuntimeError(\n 'Key generation succeeded but key file does not exist. '\n 'This should not happen', self\n )", "def createKeyPair(type, bits):\n pkey = crypto.PKey()\n pkey.generate_key(type, bits)\n return pkey", "def create_key ():", "def create_keypair(address_type, addresses_path, address_prefix, name):\n vkey_file = get_vkey_file(addresses_path, address_prefix, name)\n skey_file = get_skey_file(addresses_path, address_prefix, name)\n\n if(path.exists(vkey_file)) :\n print(address_prefix, \"key pair already exists for\", name)\n return\n \n makedirs(path.dirname(vkey_file), mode=0o777, exist_ok=True)\n\n run_params = ['cardano-cli', address_type, 'key-gen', '--verification-key-file', vkey_file, '--signing-key-file', skey_file]\n subprocess_run(run_params, capture_output=False, text=True)\n return", "def genKeys():\r\n (pub, priv) = rsa.newkeys(256)\r\n context = {\r\n 'pub': pub,\r\n 'priv': priv\r\n }\r\n return context", "def new_key_pair(self):\n from plonevotecryptolib.KeyPair import KeyPair # avoids circular imports\n return KeyPair(self)", "def generate_keys(self):\n\n # TODO: Store keys encrypted\n rsa1 = RsaPrivateKey.Generate()\n self.sign_private = str(rsa1)\n self.sign_public = str(rsa1.public_key)\n\n rsa2 = RsaPrivateKey.Generate()\n self.crypt_private = str(rsa2)\n self.crypt_public = str(rsa2.public_key)", "def generate_keys() -> tuple:\n private_key = ecdsa.SigningKey.generate(curve=curve)\n public_key = private_key.get_verifying_key()\n\n private_key = encode_private_key(private_key)\n public_key = encode_public_key(public_key)\n\n return public_key, private_key", "def generate_ecdh_key_pair() -> tuple[X25519PrivateKey, bytes]:\n private_key = X25519PrivateKey.generate()\n public_key_raw = private_key.public_key().public_bytes(\n serialization.Encoding.Raw, serialization.PublicFormat.Raw\n )\n return private_key, public_key_raw", "def get_key_pair() -> typing.Tuple[bytes, bytes]: \n return _get_key_pair_from_sk(ecdsa.SigningKey.generate(curve=CURVE))", "def generate_key(self):\n self.key = Fernet.generate_key()\n with open(\"secret.key\", \"wb\") as key_file:\n key_file.write(self.key)", "def create_keypair(econfig_file=None, region=None, keyname=\"bcbio\"):\n import boto\n import boto.ec2\n if econfig_file:\n keypair_dir = os.path.dirname(econfig_file).replace(\"elasticluster\", \"aws_keypairs\")\n else:\n keypair_dir = os.path.join(os.getcwd(), \"aws_keypairs\")\n if not os.path.exists(keypair_dir):\n os.makedirs(keypair_dir)\n private_key = os.path.join(os.path.join(keypair_dir, keyname))\n new_key = not os.path.exists(private_key)\n if new_key:\n cmd = [\"ssh-keygen\", \"-t\", \"rsa\", \"-N\", \"\", \"-f\", private_key, \"-C\", \"bcbio_aws_keypair\"]\n subprocess.check_call(cmd)\n public_key = private_key + \".pub\"\n if region:\n ec2 = boto.ec2.connect_to_region(region)\n else:\n ec2 = boto.connect_ec2()\n key = ec2.get_key_pair(keyname)\n if key and new_key:\n print(\"Non matching key %s found in AWS, removing.\" % keyname)\n ec2.delete_key_pair(keyname)\n key = None\n if not key:\n print(\"Key %s not found in AWS, importing created key\" % keyname)\n with open(public_key) as in_handle:\n body = in_handle.read()\n try:\n ec2.import_key_pair(keyname, body)\n except TypeError as e:\n body = body.encode('utf-8')\n ec2.import_key_pair(keyname, body)\n return {\"user_key_name\": keyname, \"user_key_private\": private_key,\n \"user_key_public\": public_key}", "def generate_keypair(self, key_length: int = 2048) -> Tuple[bytes, bytes]:\n\n return None", "def generate_new_key(self, index):\n new_key = self.chain_key.subkey(index)\n self._key_generated(new_key, index)", "def create_key_pair(self, key_name):\r\n params = {'KeyName':key_name}\r\n return self.get_object('CreateKeyPair', params, KeyPair, verb='POST')", "def keygen():\n pk, pub = generate_signing_key()\n t = PrettyTable([\"Private (install on your witness node)\",\n \"Public (publish with 'conductor enable' command)\"])\n t.align = \"l\"\n t.add_row([pk, pub])\n\n output(t, '')", "def gen_temp_key(self, keysize=1024):\n self.temp_session_key = [None, None]\n self.key_exchange_gui.generating_temp_key()\n return self.gen_key_pair(keysize, self.gen_temp_key_cb)", "def create(self):\n self.initialize()\n\n if not self.__keypair:\n logger.info('Creating keypair %s...' % self.keypair_settings.name)\n\n if self.keypair_settings.public_filepath and os.path.isfile(\n self.keypair_settings.public_filepath):\n logger.info(\"Uploading existing keypair\")\n self.__keypair = nova_utils.upload_keypair_file(\n self._nova, self.keypair_settings.name,\n self.keypair_settings.public_filepath)\n\n if self.keypair_settings.delete_on_clean is not None:\n delete_on_clean = self.keypair_settings.delete_on_clean\n self.__delete_keys_on_clean = delete_on_clean\n else:\n self.__delete_keys_on_clean = False\n else:\n logger.info(\"Creating new keypair\")\n keys = nova_utils.create_keys(self.keypair_settings.key_size)\n self.__keypair = nova_utils.upload_keypair(\n self._nova, self.keypair_settings.name,\n nova_utils.public_key_openssh(keys))\n file_utils.save_keys_to_files(\n keys, self.keypair_settings.public_filepath,\n self.keypair_settings.private_filepath)\n\n if self.keypair_settings.delete_on_clean is not None:\n delete_on_clean = self.keypair_settings.delete_on_clean\n self.__delete_keys_on_clean = delete_on_clean\n else:\n self.__delete_keys_on_clean = True\n elif self.__keypair and not os.path.isfile(\n self.keypair_settings.private_filepath):\n logger.warn(\"The public key already exist in OpenStack \\\n but the private key file is not found ..\")\n\n return self.__keypair", "def CreateKeyFile():\n keyfile = tempfile.mkstemp()[1]\n cmd = [\n 'openssl',\n 'genrsa',\n '-out', keyfile,\n '2048'\n ]\n _RunCommand(cmd)\n return keyfile", "def generate_key():\n return get_token_generator().generate_token()", "def get_keypair(ec2):\n # call the boto ec2 function to create a key pair\n key_pair = ec2.create_key_pair(KeyName=KEY_PAIR_NAME)\n print(\"\\n===Created a new key pair in AWS.\")\n\n # capture the key and store it in a file\n KeyPairOut = str(key_pair.key_material)\n\n # create a file to store the key locally\n print(\"Saving the keypair.\")\n key_pair_path = KEY_PAIR_NAME + \".pem\"\n with open(key_pair_path, \"w\") as f:\n f.write(KeyPairOut)\n os.chmod(key_pair_path, 0o600)\n print(\"===Changed access permission to read-only.\")", "def GenerateKey(self):\n self.key_name = self.key_name or str(uuid.uuid4())\n if self.key is None or not self.key.id():\n self.key = ndb.Key(self._get_kind(), self.key_name)\n return True\n return False", "def generate_key_image(\n self, output: OutputInfo, private_view_key: bytes, private_spend_key: bytes\n ) -> bytes:", "def generate_key_pair(G):\r\n\r\n global random\r\n\r\n if random == None:\r\n random = hash_drbg.HashDRBG()\r\n\r\n if G.order == None:\r\n raise RuntimeError(\"Base point must have order.\")\r\n\r\n key_size = log(ec.leftmost_bit(G.order)) / log(2)\r\n key_size = int(ceil(key_size) / 2)\r\n private_key = 1\r\n\r\n while private_key <= 1:\r\n private_key = random(key_size) #generates a random number\r\n #with twice the required bits\r\n private_key %= G.order\r\n\r\n return (private_key, G * private_key)", "def generate(cls, params = None, quiet = False):\n\n if params is None:\n if not quiet:\n logger.debug(\"Generating new ECDSA key parameters\")\n params = KeyParams.generateEC()\n\n assert isinstance(params, KeyParams)\n\n if not quiet:\n logger.debug(\"Generating new ECDSA key\")\n\n return cls(POW = rpki.POW.Asymmetric.generateFromParams(params.get_POW()))", "def genKey(self, privateKey,otherKey):\n\t\tself.sharedSecret = self.genSecret(privateKey, otherKey)\n\n\t\t# Convert the shared secret (int) to an array of bytes in network order\n\t\t# Otherwise hashlib can't hash it.\n\t\ttry:\n\t\t\t_sharedSecretBytes = self.sharedSecret.to_bytes(\n\t\t\t\tself.sharedSecret.bit_length() // 8 + 1, byteorder=\"big\")\n\t\texcept AttributeError:\n\t\t\t_sharedSecretBytes = str(self.sharedSecret)\n\n\t\ts = hashlib.sha256()\n\t\ts.update(bytes(_sharedSecretBytes))\n\t\tself.key = s.digest()", "def generate(self, force=False):\n raise NotImplementedError(\n 'Cannot generate Key of unknown algorithm type. Use a subclass.', self\n )", "def generate_key(self):\n return str(uuid4())", "def get_identity_shared_key(self, identity, curve, their_pubkey, index=0):\n params = {'identity': identity, 'curve': curve, 'index': index,\n 'their_pubkey': their_pubkey}\n return self._jadeRpc('get_identity_shared_key', params)", "def generate_keys(self, password):\n\n # TODO: Store keys encrypted\n rsa1 = RsaPrivateKey.Generate()\n self.sign_private = str(rsa1)\n self.sign_public = str(rsa1.public_key)\n\n rsa2 = RsaPrivateKey.Generate()\n self.crypt_private = str(rsa2)\n self.crypt_public = str(rsa2.public_key)", "def create_key_pair(self, key_name):\n response = key_pair.create_key_pair(self.url, self.verb, self.headers,\n self.version, key_name)\n if response is not None :\n res = CreateKeyPairResponse.CreateKeyPairResponse()\n parseString(str(response.text), res)\n return res\n else :\n return None", "def genKey(self, otherKey):\n self.sharedSecret = self.genSecret(self.privateKey, otherKey)\n #print(\"Shared secret:\")\n #print(self.sharedSecret)\n s = hashlib.sha256()\n s.update(bytes(str(self.sharedSecret).encode()))\n self.key = s.digest()", "def generate_input_key(\n self, output: OutputInfo, private_view_key: bytes, private_spend_key: bytes\n ) -> bytes:", "def ex_create_keypair(self, name):\n params = {\n 'Action': 'CreateKeyPair',\n 'KeyName': name,\n }\n response = self.connection.request(self.path, params=params).object\n key_material = self._findtext(response, 'keyMaterial')\n key_fingerprint = self._findtext(response, 'keyFingerprint')\n return {\n 'keyMaterial': key_material,\n 'keyFingerprint': key_fingerprint,\n }", "def create_keys(self):\n crypto_tool = CryptoTools()\n # creating RSA keys for the signer user\n public_key, private_key = crypto_tool.create_key_with_entropy()\n self.priv_key = crypto_tool.get_pem_format(private_key).decode(\"utf-8\")\n self.pub_key = crypto_tool.get_pem_format(public_key).decode(\"utf-8\")", "def gen_private_key():\n return DH.b2i(Random.new().read(DH_SIZE))", "def create(self, name, public_key=None):\n data = {\n \"keypair\": {\n \"name\": name\n }\n }\n if public_key is not None:\n data['keypair']['public_key'] = public_key\n \n path = '/os-keypairs'\n res = self.client.call(path, 'POST', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Create/import openstack keypair: %s' % truncate(res))\n return res[0]['keypair']", "def generate_key():\r\n # generating key\r\n key = Fernet.generate_key()\r\n\r\n key_dir = os.path.join(os.path.dirname(__file__), \"resources/key\")\r\n\r\n # writing key in file\r\n with open(key_dir, \"wb\") as keyFile:\r\n keyFile.write(key)", "def test_aws_service_api_keypair_generate_post(self):\n pass", "def sym_key_gen(pairing_group=None, sym_key_size=None, debug=0):\n\n # If sym_key_size is not defined, set a default value\n if sym_key_size is None:\n sym_key_size = SYM_KEY_DEFAULT_SIZE\n\n # Clamp the size between SYM_KEY_MIN_SIZE and the system maximum possible value\n size = clamp(sym_key_size, SYM_KEY_MIN_SIZE, sys.maxsize)\n\n # Check if an error occurred during clamping\n if size is None:\n logging.error('sym_key_gen clamp size exception')\n if debug: # ONLY USE FOR DEBUG\n print('EXCEPTION in sym_key_gen clamp size')\n raise Exception\n\n # Check if size is a power of 2\n if not math.log2(size).is_integer():\n logging.error('sym_key_gen size exception')\n if debug: # ONLY USE FOR DEBUG\n print('EXCEPTION in sym_key_gen size')\n raise Exception\n\n # Generate and return a random symmetric key with the given size\n return random_string_gen(pairing_group, sym_key_size)", "def gen_public_key(n, e):\n\n # Assign key parameters\n key_params = (n, e)\n # Construct private key\n key = RSA.construct(key_params)\n\n return key.exportKey()", "def generate_key():\n return str(uuid.uuid4())", "def vsce_uokms_server_generate_server_key_pair(self, ctx, server_private_key, server_public_key):\n vsce_uokms_server_generate_server_key_pair = self._lib.vsce_uokms_server_generate_server_key_pair\n vsce_uokms_server_generate_server_key_pair.argtypes = [POINTER(vsce_uokms_server_t), POINTER(vsc_buffer_t), POINTER(vsc_buffer_t)]\n vsce_uokms_server_generate_server_key_pair.restype = c_int\n return vsce_uokms_server_generate_server_key_pair(ctx, server_private_key, server_public_key)", "def generate_keypair(bits):\n p = generate_prime(bits // 2)\n #print(p)\n q = generate_prime(bits // 2)\n #print(q)\n n = p * q\n return PrivateKey(p, q, n), PublicKey(n)", "def generate_keystream(self):", "def gen_key(app):\n\tos.system('lxc-attach -n %s -- ssh-keygen -t rsa -N \"\" -f key' % app)", "def generate(cls, keylength = 2048, quiet = False):\n\n if not quiet:\n logger.debug(\"Generating new %d-bit RSA key\", keylength)\n if generate_insecure_debug_only_rsa_key is not None:\n return cls(POW = generate_insecure_debug_only_rsa_key())\n else:\n return cls(POW = rpki.POW.Asymmetric.generateRSA(keylength))", "def generate_key(seed):\n private_key = sha256(seed)\n public_key = privtopub(private_key)\n return {\"private\": private_key, \"public\": public_key}", "def test_create_keypair_only(self):\n self.keypair_creator = create_keypairs.OpenStackKeypair(self.os_creds,\n create_keypairs.KeypairSettings(name=keypair_name))\n self.keypair_creator.create()\n\n keypair = nova_utils.keypair_exists(self.keypair_creator.nova, self.keypair_creator.keypair)\n self.assertEquals(self.keypair_creator.keypair, keypair)", "def generate_key():\n key = ''.join([chr(random.randint(0, 0x10)) for _ in range(block_size)])\n return AES.new(second_key, AES.MODE_ECB).encrypt(pad((key.encode('ascii')), block_size))", "def create_pem_keys(self):\n self.random_rsa()\n\n return self.keys", "def generate_key():\n return get_random_bytes(KEY_SIZE)", "def create_handshake_key_pair(cls) -> Tuple[bytes, bytes]:\n ...", "def generate_private_key(self):\n if not self.curve:\n raise NoCurveError(\"Curve must be set prior to key generation.\")\n return self.load_private_key(SigningKey.generate(curve=self.curve))", "def get_new_key() -> rsa.RSAPrivateKeyWithSerialization:\n\n return rsa.generate_private_key(\n backend=default_backend(), public_exponent=65537, key_size=2048\n )", "def generate_keys(attributes: List[AttributeName]) -> Tuple[SecretKey, PublicKey]:\n # Pick uniformly random variables\n x = G1.order().random()\n y = {a: G1.order().random() for a in attributes}\n\n # take generators of G1 and G2\n g1 = G1.generator()\n g2 = G2.generator()\n\n # Compute Xs and Ys\n X1 = g1 ** x\n X2 = g2 ** x\n Y1 = {a: g1 ** y_i for a, y_i in y.items()}\n Y2 = {a: g2 ** y_i for a, y_i in y.items()}\n\n # Output public and secret keys\n pk = PublicKey(attributes, g1, Y1, g2, X2, Y2) # type:ignore\n sk = SecretKey(x, X1, y)\n return sk, pk", "def generate_key(self):\n cmd = self.generate_key_cmd()\n self.show(cmd)\n if self.dryrun:\n return None\n s, _, _ = self.as_user(cmd)\n assert s == 0, ('failed to generate key', cmd)\n keyname = self.extract_key_name()\n return keyname", "def generate_key():\n key = Fernet.generate_key()\n with open(\"secret.key\", \"wb\") as key_file:\n key_file.write(key)", "def generate_key_and_cert():\n signing_key = rsa.generate_private_key(backend=crypto_default_backend(), public_exponent=65537, key_size=2048)\n subject = issuer = x509.Name(\n [\n x509.NameAttribute(NameOID.COUNTRY_NAME, 'NO'),\n x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, 'OSLO'),\n x509.NameAttribute(NameOID.LOCALITY_NAME, 'OSLO'),\n x509.NameAttribute(NameOID.ORGANIZATION_NAME, 'Intility AS'),\n x509.NameAttribute(NameOID.COMMON_NAME, 'intility.no'),\n ]\n )\n signing_cert = (\n x509.CertificateBuilder()\n .subject_name(subject)\n .issuer_name(issuer)\n .public_key(signing_key.public_key())\n .serial_number(x509.random_serial_number())\n .not_valid_before(datetime.utcnow())\n .not_valid_after(\n # Our certificate will be valid for 10 days\n datetime.utcnow()\n + timedelta(days=10)\n # Sign our certificate with our private key\n )\n .sign(signing_key, hashes.SHA256(), crypto_default_backend())\n .public_bytes(crypto_serialization.Encoding.DER)\n )\n return signing_key, signing_cert", "def create_rsa_key_pair() -> Tuple[str, str]:\n key = RSA.generate(RSA_KEY_STRENGTH)\n public_key = key.publickey().export_key().decode()\n private_key = key.export_key().decode()\n return public_key, private_key", "def generate_key():\n key = Fernet.generate_key()\n with open(\"Secret.key\",\"wb\")as key_file:\n key_file.write(key)", "def create_keypair(self, name=None, public_key=None):\n body = {}\n if name is not None:\n body.update({\"name\": name})\n if public_key is not None:\n body.update({\"public_key\": public_key})\n return self._create(_keypair.Keypair, **body)", "def generate_rsa_auxiliary_key_pair() -> AuxiliaryKeyPair:\n rsa_key_pair = rsa_keypair()\n return AuxiliaryKeyPair(rsa_key_pair.private_key, rsa_key_pair.public_key)", "def gnupg_keypair(\n gnupg_gen_key_conf: Path, gnupg_trust_store: GnuPGTrustStore\n) -> GnuPGKeypair:\n\n LOGGER.debug(\"Initializing GPG keypair ...\")\n environment = {\"HOME\": \"/dev/null\"}\n result = subprocess.run(\n [\n \"gpg\",\n \"--batch\",\n \"--homedir\",\n str(gnupg_trust_store.gnupg_home),\n \"--gen-key\",\n \"--keyid-format\",\n \"long\",\n str(gnupg_gen_key_conf),\n ],\n capture_output=True,\n check=True,\n env=environment,\n )\n keyid = re.findall(\n r\"gpg: key (\\w+) marked as ultimately trusted\", result.stderr.decode(\"utf-8\")\n )[0]\n # LOGGER.debug(\" keyid : %s\", keyid)\n\n result = subprocess.run(\n [\n \"gpg\",\n \"--fingerprint\",\n \"--fingerprint\", # Double --fingerprint needed for subkeys\n \"--homedir\",\n str(gnupg_trust_store.gnupg_home),\n \"--with-colons\",\n str(keyid),\n ],\n capture_output=True,\n check=True,\n env=environment,\n )\n # Fingerprint order: pubkey [, subkey ]...\n fingerprints = re.findall(r\"fpr:{9}(\\w+):\", result.stdout.decode(\"utf-8\"))\n LOGGER.debug(\" Fingerprints:\")\n for fingerprint in fingerprints:\n LOGGER.debug(\" %s\", fingerprint)\n\n yield GnuPGKeypair(\n fingerprints=fingerprints,\n gen_key_conf=gnupg_gen_key_conf,\n gnupg_home=gnupg_trust_store.gnupg_home,\n keyid=keyid,\n )", "def generate_key():\n key = Fernet.generate_key()\n with open(\"pass.key\", \"wb\") as key_file:\n key_file.write(key)", "def _create_pkey(self, commonname, serial):\n pkey = PKey()\n pkey.generate_key(crypto.TYPE_RSA, self.key_bits)\n private = crypto.dump_privatekey(crypto.FILETYPE_PEM,\n pkey).decode()\n key_path = self._get_key_path(commonname, serial)\n if os.path.exists(key_path):\n raise FileExistsError(key_path)\n with open(key_path, 'w') as private_file:\n private_file.writelines(private)\n\n key_link = self._get_key_link(commonname)\n if os.path.exists(key_link):\n os.unlink(key_link)\n os.symlink(os.path.basename(key_path), key_link)\n\n return pkey", "def keygen(\n args: argparse.Namespace,\n config: KSKMConfig,\n p11modules: KSKM_P11,\n logger: logging.Logger,\n) -> bool:\n logger.info(\"Generate key\")\n flags = FlagsDNSKEY.ZONE.value | FlagsDNSKEY.SEP.value\n dnssec_alg = AlgorithmDNSSEC[args.key_alg]\n if is_algorithm_rsa(dnssec_alg):\n if args.key_size is None:\n raise argparse.ArgumentError(\n args.key_size, \"RSA key generation requires key size\"\n )\n p11key = generate_rsa_key(\n flags, args.key_size, p11modules, label=args.key_label\n )\n elif is_algorithm_ecdsa(dnssec_alg):\n crv = algorithm_to_curve(dnssec_alg)\n p11key = generate_ec_key(flags, crv, p11modules, label=args.key_label)\n else:\n raise ValueError(f\"Unknown key algorithm {repr(args.key_alg)}\")\n\n if not p11key or not p11key.public_key:\n raise RuntimeError(\"No public key returned by key generation\")\n\n # Calculate the DNSSEC key tag of the new key and look for a collision in the configuration\n key_tags: List[int] = []\n _key = public_key_to_dnssec_key(\n key=p11key.public_key,\n key_identifier=p11key.label,\n algorithm=AlgorithmDNSSEC[args.key_alg],\n flags=FlagsDNSKEY.SEP.value | FlagsDNSKEY.ZONE.value,\n ttl=config.ksk_policy.ttl,\n )\n logger.info(\n f\"Generated key {p11key.label} has key tag {_key.key_tag} for algorithm={_key.algorithm}, \"\n f\"flags=0x{_key.flags:x}\"\n )\n key_tags += [_key.key_tag]\n _revoked_key = public_key_to_dnssec_key(\n key=p11key.public_key,\n key_identifier=p11key.label,\n algorithm=AlgorithmDNSSEC[args.key_alg],\n flags=FlagsDNSKEY.SEP.value | FlagsDNSKEY.ZONE.value | FlagsDNSKEY.REVOKE.value,\n ttl=config.ksk_policy.ttl,\n )\n logger.info(\n f\"Generated key {p11key.label} has key tag {_revoked_key.key_tag} with the REVOKE bit set \"\n f\"(flags 0x{_revoked_key.flags:x})\"\n )\n key_tags += [_revoked_key.key_tag]\n\n for _name, ksk in config.ksk_keys.items():\n if ksk.key_tag in key_tags:\n logger.error(\n f\"Generated key {p11key.label} has key tags {key_tags} matching \"\n f\"KSK key in configuration: {ksk}\"\n )\n raise RuntimeError(\"Key tag collision detected\")\n\n _now = datetime.utcnow()\n # create_trustanchor_keydigest wants an KSKKey, but it is not used in the digest calculation\n _temp_ksk = KSKKey(\n description=\"Newly generated key\",\n label=_now.isoformat(),\n key_tag=_key.key_tag,\n algorithm=_key.algorithm,\n valid_from=_now,\n valid_until=_now,\n )\n _domain = \".\"\n _ds = create_trustanchor_keydigest(_temp_ksk, _key, domain=_domain)\n digest = binascii.hexlify(_ds.digest).decode(\"UTF-8\").upper()\n _digest_type = \"2\" # create_trustanchor_keydigest always does SHA256\n logger.info(\n f\"DS record for generated key:\\n\"\n f\"{_domain} IN DS {_key.key_tag} {_key.algorithm.value} {_digest_type} {digest}\\n\"\n f\">> {' '.join(pgp_wordlist(_ds.digest))}\"\n )\n\n return True", "def generateKeys(self, keys_path, minion_id):\n #Change directory to keys path\n os.chdir(keys_path)\n #Give permission to the salt user\n self.console_manager.printRed(\"Giving permission to the salt user\")\n command = ['sudo', 'chmod', 'a+rwx', '.']\n self.console_manager.runCommandFromShell(command)\n #Generate keys\n self.console_manager.printRed(''.join([\"Generating keys for minion id: \", minion_id]))\n command = ['sudo', 'salt-key', ''.join(['--gen-keys=', minion_id])]\n self.console_manager.runCommandFromShell(command)\n #Give permission to the salt user\n self.console_manager.printRed(\"Allowing vagrant to handle private keys\")\n command = ['sudo', 'chmod', 'a+rwx', ''.join([minion_id, '.pub']), ''.join([minion_id, '.pem'])]\n self.console_manager.runCommandFromShell(command)\n #Add public key to the accepted minion folder\n self.console_manager.printRed(\"Copying the minion public key to the salt master public keys folder\")\n command = ['sudo', 'cp', ''.join([minion_id, '.pub']), ''.join(['/var/lib/salt/pki/master/minions/', minion_id])]\n self.console_manager.runCommandFromShell(command)\n command = ['sudo', 'cp', ''.join([minion_id, '.pub']), ''.join(['/etc/salt/pki/master/minions/', minion_id])]\n self.console_manager.runCommandFromShell(command)\n return", "def generate(self, force=False):\n if not self.check_force_generate(force):\n return False\n\n mkdirs(self.path)\n\n command = [openssl, 'genrsa', '-out', self.key_file]\n if self.password:\n command += ['-passout', 'pass:{}'.format(self.password)]\n command += [str(self.key_size)]\n\n self.log.info('Generating RSA key')\n if not run_command(command):\n raise RuntimeError('RSA key generation failed')\n\n if not self.exists():\n raise RuntimeError(\n 'Key generation succeeded but key file does not exist. '\n 'This should not happen', self\n )", "def generate_key(self, filename, size):\n if size != 16 and size != 24 and size != 32:\n raise ValueError(\"AES key size not valid.\")\n key = os.urandom(size)\n self.export_key(filename, key)\n return key", "def gen_key():\n key = os.urandom(32) # 256 bit\n return base64.b64encode(key).rstrip('=') # strip off padding", "def gen_pgp_key(name, email, comment=\"generated by sdata\"):\n\n # we can start by generating a primary key. For this example, we'll use RSA, but it could be DSA or ECDSA as well\n key = pgpy.PGPKey.new(PubKeyAlgorithm.RSAEncryptOrSign, 4096)\n\n # we now have some key material, but our new key doesn't have a user ID yet, and therefore is not yet usable!\n uid = pgpy.PGPUID.new(name, comment=comment, email=email)\n\n # now we must add the new user id to the key. We'll need to specify all of our preferences at this point\n # because PGPy doesn't have any built-in key preference defaults at this time\n # this example is similar to GnuPG 2.1.x defaults, with no expiration or preferred keyserver\n key.add_uid(uid, usage={KeyFlags.Sign, KeyFlags.EncryptCommunications, KeyFlags.EncryptStorage},\n hashes=[HashAlgorithm.SHA256, HashAlgorithm.SHA384, HashAlgorithm.SHA512, HashAlgorithm.SHA224],\n ciphers=[SymmetricKeyAlgorithm.AES256, SymmetricKeyAlgorithm.AES192, SymmetricKeyAlgorithm.AES128],\n compression=[CompressionAlgorithm.ZLIB, CompressionAlgorithm.BZ2, CompressionAlgorithm.ZIP,\n CompressionAlgorithm.Uncompressed])\n return key", "def gen_key(self, key):\n b_key = self._hash_digest(key)\n return self._hash_val(b_key, lambda x: x)", "def create_key() -> RSA.RsaKey:\n\n return RSA.generate(1024, Crypto.Random.new().read)", "def create_keypair(key_name):\n if os.path.isfile(SSH_FOLDER + key_name + \".pem\"):\n return # Key already created\n ec2 = boto.ec2.connect_to_region(AWS_REGION)\n key = ec2.create_key_pair(key_name)\n key.save(SSH_FOLDER)", "def create_rsa_keypair( config_path = config.CONFIG_PATH(), prefix = RSA_KEYPAIR_PREFIX(), keysize = RSA_KEYPAIR_SIZE(), force = False ) :\n\n public_key_path, private_key_path = valid_rsa_keypair_paths( config_path, prefix, force )\n utils.create_directory( config_path, 0700 )\n public_key, private_key = rsa.newkeys( keysize )\n with open( public_key_path, \"w\" ) as stream :\n stream.write( public_key.save_pkcs1() )\n os.chmod( public_key_path, 0644 )\n with open( private_key_path, \"w\" ) as stream :\n stream.write( private_key.save_pkcs1() )\n os.chmod( private_key_path, 0600 )\n return public_key_path, private_key_path", "def generate_key(self)->bytes:\n return os.urandom(32)", "def generate_key():\n # generate random key\n key = get_random_string()\n\n # if it's already taken, generate another\n if EmailManager.objects.filter(key=key).exists():\n return EmailManager.generate_key()\n\n # return it\n return key", "def generate_private_key():\n\treturn binascii.hexlify(os.urandom(32)).decode('utf-8').upper()", "def Generate(size=keyinfo.AES.default_size):\n key_bytes = util.RandBytes(size // 8)\n key_string = util.Base64WSEncode(key_bytes)\n hmac_key = HmacKey.Generate() # use default HMAC-SHA1 key size\n return AesKey(key_string, hmac_key, size)", "def _create_rsa_key_pair(self, length, public_exponent=65537):\n self.logger.info(\n \"Generating an RSA key pair with length: {0}, and \"\n \"public_exponent: {1}\".format(\n length, public_exponent\n )\n )\n try:\n private_key = rsa.generate_private_key(\n public_exponent=public_exponent,\n key_size=length,\n backend=default_backend())\n public_key = private_key.public_key()\n\n private_bytes = private_key.private_bytes(\n serialization.Encoding.DER,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption())\n public_bytes = public_key.public_bytes(\n serialization.Encoding.DER,\n serialization.PublicFormat.PKCS1)\n except Exception as e:\n self.logger.exception(e)\n raise exceptions.CryptographicFailure(\n \"An error occurred while generating the RSA key pair. \"\n \"See the server log for more information.\"\n )\n\n public_key = {\n 'value': public_bytes,\n 'format': enums.KeyFormatType.PKCS_1,\n 'public_exponent': public_exponent\n }\n private_key = {\n 'value': private_bytes,\n 'format': enums.KeyFormatType.PKCS_8,\n 'public_exponent': public_exponent\n }\n\n return public_key, private_key", "def generate_key_cmd(self, cfg_path=None):\n # TODO: use tempfile\n if cfg_path is None:\n cfg_path = '/tmp/gen-key.cfg'\n self.create_gen_key_cfg_file(cfg_path)\n return '/usr/bin/gpg --batch --gen-key {cfg_path}'.format(cfg_path=cfg_path)", "def generate_key(self, rand = random.SystemRandom()):\n k = rand.randrange(0, self.n - 1)\n return k, self.base_mul(k)" ]
[ "0.71120954", "0.68204904", "0.65850353", "0.6553852", "0.65218014", "0.6443924", "0.6378423", "0.63755655", "0.6358998", "0.6324435", "0.63147116", "0.6309187", "0.62952316", "0.6251726", "0.61868006", "0.61828184", "0.61474425", "0.6133282", "0.61287004", "0.60888094", "0.6040706", "0.60231555", "0.60195357", "0.6008617", "0.5979798", "0.597343", "0.5961485", "0.5926425", "0.5888555", "0.5860322", "0.5832551", "0.58271146", "0.5826103", "0.5816542", "0.5814549", "0.5814375", "0.5811282", "0.580594", "0.5791124", "0.5787104", "0.5783938", "0.5780605", "0.5772091", "0.577154", "0.57712805", "0.5761656", "0.5755106", "0.57416207", "0.5735575", "0.5733245", "0.5733074", "0.57320446", "0.5718721", "0.5714307", "0.5711257", "0.5708222", "0.5707401", "0.5701368", "0.57010907", "0.5681143", "0.5656023", "0.56471545", "0.56464636", "0.56459415", "0.56458044", "0.5638617", "0.5627469", "0.56126213", "0.5609052", "0.559526", "0.5586959", "0.5567825", "0.5563508", "0.5558474", "0.5553827", "0.5553125", "0.5548605", "0.5540845", "0.5537478", "0.5534268", "0.5534091", "0.55316025", "0.55031395", "0.5498549", "0.548971", "0.5485279", "0.547969", "0.5466015", "0.5443588", "0.5441804", "0.5425906", "0.54223436", "0.5415646", "0.54140556", "0.5407447", "0.5395988", "0.53800106", "0.5367588", "0.5365692", "0.5363728" ]
0.8180875
0
Test pointwise arithmetic with stencil offsets across a single functions with buffering dimension in indexed expression format
def test_indexed_buffered(self, expr, result): i, j, l = dimify('i j l') a = symbol(name='a', dimensions=(i, j, l), value=2., mode='indexed').base fa = a.function eqn = eval(expr) Operator(eqn)(fa) assert np.allclose(fa.data[1, 1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_indexed_stencil(self, expr, result):\n j, l = dimify('j l')\n a = symbol(name='a', dimensions=(j, l), value=0., mode='indexed').base\n fa = a.function\n b = symbol(name='b', dimensions=(j, l), value=2., mode='indexed').base\n fb = b.function\n\n eqn = eval(expr)\n Operator(eqn)(fa, fb)\n assert np.allclose(fa.data[1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)", "def _evaluable_view(self, stencil, arr, offset=0):\n if self.dim == 1:\n if isinstance(stencil, Stencil):\n\n l = self.borders[0]-stencil.b[0][0]\n r = -(self.borders[1]-stencil.b[0][1])\n else:\n l = self.borders[0]-stencil[0][0]\n r = -(self.borders[1]-stencil[0][1])\n return arr[l+offset: r+offset]\n else:\n raise NotImplementedError(\"Another dimension than one \"\n \"is not supplied\")", "def test_indexed_open_loops(self, expr, result):\n i, j, l = dimify('i j l')\n pushed = [d.size for d in [j, l]]\n j.size = None\n l.size = None\n a = DenseData(name='a', dimensions=(i, j, l), shape=(3, 5, 6)).indexed\n fa = a.function\n fa.data[0, :, :] = 2.\n\n eqn = eval(expr)\n Operator(eqn)(fa)\n assert np.allclose(fa.data[1, 1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)\n j.size, l.size = pushed", "def test_elemwise_multiple_inputs_optimisation(self):\r\n\r\n shp = (5, 5)\r\n fx, fy, fz = fmatrices('xyz')\r\n dx, dy, dz = dmatrices('xyz')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype=\r\n 'float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype=\r\n 'float64').reshape(1, shp[0])\r\n cases = [\r\n (fx + fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n (fx * fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n# (fx+fy+fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\r\n# (dx+dy+dz,(dx,dy,dz),(dxv,dyv,dzv),1,'float64'),\r\n# (fx*fy*fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\r\n# (dx*dy*dz,(dx,dy,dz),(dxv,dyv,dzv),1,'float64'),\r\n# (fx*fy*(fx+fy+fz),(fx,fy,fz),(fxv,fyv,fzv),2,'float32'),\r\n# (dx*dy*(dx+dy+dz),(dx,dy,dz),(dxv,dyv,dzv),2,'float64'),\r\n# (fx*fy*(fx+fy+dz),(fx,fy,dz),(dxv,dyv,dzv),2,'float64'),#check mixed type add\r\n# (dz*fy*(fx+fy),(fx,fy,dz),(dxv,dyv,dzv),2,'float64'),#check mixed type mul\r\n #check with dimshuffle of constant\r\n (fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {'custom':\r\n 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {'custom':\r\n 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n# (2+fx+fy+fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\r\n# (2*fx*fy*fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\r\n (2 + fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {\r\n 'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (2 * fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {\r\n 'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n# (fx*fy*2*(fx+fy+fz),(fx,fy,fz),(fxv,fyv,fzv),2,'float32'),\r\n# (fx*fy*(2+fx+fy+fz),(fx,fy,fz),(fxv,fyv,fzv),2,'float32'),\r\n (fx * fy * 2 * (fx + fy + fz+2), (fx, fy, fz), (fxv, fyv, fzv), 2, {\r\n 'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n\r\n #check with broadcast of row\r\n# (fx+fy+fz+fv,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\r\n# (fx*fy*fz*fv,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\r\n# (fv+fx+fy+fz,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\r\n# (fv*fx*fy*fz,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\r\n# (fx*fy*fv*(fx+fy+fz),(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),2,'float32'),\r\n# (fx*fy*(fv+fx+fy+fz),(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),2,'float32'),\r\n# (fx*fy*fv*(fv+fx+fy+fz),(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),2,'float32'),\r\n# (dx+dy+dz+dv,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\r\n# (dx*dy*dz*dv,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\r\n# (dv+dx+dy+dz,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\r\n# (dv*dx*dy*dz,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\r\n# (dx*dy*dv*(dx+dy+dz),(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),2,'float64'),\r\n# (dx*dy*(dv+dx+dy+dz),(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),2,'float64'),\r\n# (dx*dy*dv*(dv+dx+dy+dz),(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),2,'float64'),\r\n ] # [10:11]\r\n# print cases\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n old_optimizer = mode._optimizer\r\n try:\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.excluding(\r\n 'local_elemwise_fusion')\r\n for id, [g, sym_inputs, val_inputs, nb_elemwise, out_dtype] in enumerate(cases):\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n #we need the optimisation enabled, debug do this.\r\n mode=mode)\r\n\r\n out = f(*val_inputs)\r\n assert(len(f.maker.fgraph.toposort()) == nb_elemwise)\r\n assert(out_dtype == out.dtype)\r\n finally:\r\n mode._optimizer = old_optimizer", "def test_elemwise1():\r\n\r\n shape = (3, 4)\r\n a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape),\r\n dtype='float32') + 0.5, 'a')\r\n b = tensor.fmatrix()\r\n\r\n #let debugmode catch any mistakes\r\n f = pfunc([b], [], updates=[(a, b ** a)], mode=mode_with_gpu)\r\n f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)\r\n\r\n #let debugmode catch any mistakes\r\n f = pfunc([b], [], updates=[(a, tensor.exp(b ** a))], mode=mode_with_gpu)\r\n f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)\r\n\r\n #let debugmode catch any mistakes\r\n f = pfunc([b], [], updates=[(a, a + b * tensor.exp(b ** a))],\r\n mode=mode_with_gpu)\r\n f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)", "def bitwise_stats(traces, N, delta):\n \n X = np.zeros((N,),dtype=int)\n padded_traces = pad_traces(traces, N)\n num_traces = padded_traces.shape[0]\n \n p = np.zeros(N)\n for j in range(N):\n p[j] = np.sum(padded_traces[:,j])/num_traces\n \n c = np.concatenate((np.ones(N,), np.zeros(N,)), axis=None)\n bounds = (0,1)\n \n for i in range(N):\n A_ub = np.zeros((2*N,2*N))\n for j in range(N):\n for k in range(N):\n if j == k:\n A_ub[j,k] = -1\n A_ub[j+N,k] = -1\n A_ub[j,k+N] = alpha(j+1,k+1,delta)\n A_ub[j+N,k+N] = -1*A_ub[j,k+N]\n elif k > j:\n A_ub[j,k+N] = alpha(j+1,k+1,delta)\n A_ub[j+N,k+N] = -1*A_ub[j,k+N]\n \n b_ub = np.concatenate((p,-1*p), axis=None)\n \n A_eq0 = np.zeros((i+1,2*N))\n b_eq0 = np.zeros(i+1)\n for j in range(i+1):\n A_eq0[j,j+N] = 1\n if j==i:\n b_eq0[j] = 0\n else:\n b_eq0[j] = X[j]\n \n A_eq1 = np.zeros((i+1,2*N))\n b_eq1 = np.zeros(i+1)\n for j in range(i+1):\n A_eq1[j,j+N] = 1\n if j==i:\n b_eq1[j] = 1\n else:\n b_eq1[j] = X[j]\n \n res0 = linprog(c,A_ub,b_ub,A_eq0,b_eq0,bounds,method='interior-point')\n res1 = linprog(c,A_ub,b_ub,A_eq1,b_eq1,bounds,method='interior-point')\n if res0.fun < res1.fun:\n X[i] = 0\n else:\n X[i] = 1\n \n return X", "def test_var_idx_in_args(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\n \"float array vars =\\n\\t0.5, 1\\n\\nMZgate(vars[0], vars[1]) | [0, 1]\"\n )\n assert bb.operations == [\n {'op': 'MZgate', 'args': [0.5, 1.0], 'kwargs': {}, 'modes': [0, 1]}\n ]", "def local_func(f, t, x, w):\n x_func = np.zeros_like(t, dtype='f')\n for i, jd in enumerate(t.jd):\n sel = (t.jd >= (jd - w)) & (t.jd <= (jd + w))\n x_func[i] = f(x[sel])\n return x_func", "def test_flat(self, expr, result, mode):\n i, j = dimify('i j')\n a = symbol(name='a', dimensions=(i, j), value=2., mode=mode)\n b = symbol(name='b', dimensions=(i, j), value=3., mode=mode)\n fa = a.base.function if mode == 'indexed' else a\n fb = b.base.function if mode == 'indexed' else b\n\n eqn = eval(expr)\n Operator(eqn)(fa, fb)\n assert np.allclose(fa.data, result, rtol=1e-12)", "def test_elemwise_multiple_inputs_optimisation2(self):\r\n raise SkipTest(\"Current implementation of Canonizer does not \"\r\n \"implement all cases. Skip the corresponding test.\")\r\n\r\n shp = (5, 5)\r\n fx, fy, fz = fmatrices('xyz')\r\n dx, dy, dz = dmatrices('xyz')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])\r\n cases = [\r\n (fx + fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n (fx * fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n (fx + fy + fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (dx + dy + dz, (dx, dy, dz), (dxv, dyv, dzv), 1, 'float64'),\r\n (fx * fy * fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (dx * dy * dz, (dx, dy, dz), (dxv, dyv, dzv), 1, 'float64'),\r\n (fx * fy * (fx + fy + fz), (fx, fy, fz), (fxv, fyv,\r\n fzv), 2, 'float32'),\r\n (dx * dy * (dx + dy + dz), (dx, dy, dz), (dxv, dyv,\r\n dzv), 2, 'float64'),\r\n (fx * fy * (fx + fy + dz), (fx, fy, dz), (dxv, dyv, dzv), 2,\r\n 'float64'), # check mixed type add\r\n (dz * fy * (fx + fy), (fx, fy, dz), (dxv, dyv, dzv), 2,\r\n 'float64'), # check mixed type mul\r\n #check with dimshuffle of constant\r\n (fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (2 + fx + fy + fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (2 * fx * fy * fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (2 + fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv,\r\n fzv), 1, 'float32'),\r\n (2 * fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv,\r\n fzv), 1, 'float32'),\r\n (fx * fy * 2 * (fx+fy+fz), (fx, fy, fz), (fxv, fyv,\r\n fzv), 2, 'float32'),\r\n (fx*fy*(2+fx+fy+fz), (fx, fy, fz), (fxv, fyv, fzv), 2, 'float32'),\r\n (fx*fy*2*(fx+fy+fz+2), (fx, fy, fz), (fxv, fyv,\r\n fzv), 2, 'float32'),\r\n\r\n #check with broadcast of row\r\n (fx+fy+fz+fv, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fx*fy*fz*fv, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fv+fx+fy+fz, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fv*fx*fy*fz, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fx*fy*fv*(fx+fy+fz), (fx, fy, fz, fv), (fxv, fyv,\r\n fzv, fvv), 2, 'float32'),\r\n (fx*fy*(fv+fx+fy+fz), (fx, fy, fz, fv), (fxv, fyv,\r\n fzv, fvv), 2, 'float32'),\r\n (fx*fy*fv*(fv+fx+fy+fz), (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 2, 'float32'),\r\n (dx+dy+dz+dv, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dx*dy*dz*dv, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dv+dx+dy+dz, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dv*dx*dy*dz, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dx*dy*dv*(dx+dy+dz), (dx, dy, dz, dv), (dxv, dyv,\r\n dzv, dvv), 2, 'float64'),\r\n (dx*dy*(dv+dx+dy+dz), (dx, dy, dz, dv), (dxv, dyv,\r\n dzv, dvv), 2, 'float64'),\r\n (dx*dy*dv*(dv+dx+dy+dz), (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 2, 'float64'),\r\n\r\n ] # [10:11]\r\n# print cases\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.excluding('local_elemwise_fusion')\r\n for id, [g, sym_inputs, val_inputs, nb_elemwise, out_dtype] in enumerate(cases):\r\n f = compile.function(list(sym_inputs), g,\r\n #we need the optimisation enabled, debug do this.\r\n mode=mode)\r\n\r\n out = f(*val_inputs)\r\n assert(len(f.maker.fgraph.toposort()) == nb_elemwise)\r\n assert(out_dtype == out.dtype)", "def position_op(x, wfunc):\n return x*wfunc", "def test_jitable_funcs(self):\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = None\n\n self.basic_lindblad.evaluation_mode = \"dense_vectorized\"\n\n self.jit_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(1.0, Array(np.array([0.2, 0.4, 0.6, 0.8])))\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(1.0, Array(np.array([0.2, 0.4, 0.6, 0.8])))\n\n self.basic_lindblad.rotating_frame = None", "def test_elemwise2():\r\n rng = numpy.random.RandomState(int(time.time()))\r\n shape = (3, 5)\r\n for pattern in [(0, 1), (1, 0)]:\r\n a = tcn.shared_constructor(theano._asarray(rng.rand(*shape),\r\n dtype='float32'), name=None)\r\n b = tensor.Tensor(dtype='float32', broadcastable=[0] * len(shape))()\r\n f = pfunc([b], [], updates=[(a, (a + b).dimshuffle(pattern))],\r\n mode=mode_with_gpu)\r\n has_elemwise = False\r\n for i, node in enumerate(f.maker.fgraph.toposort()):\r\n has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)\r\n assert not has_elemwise\r\n #let debugmode catch errors\r\n f(theano._asarray(rng.rand(*shape), dtype='float32') * .3)\r\n\r\n shape = (3, 4, 5, 6)\r\n a = tcn.shared_constructor(theano._asarray(rng.rand(*shape),\r\n dtype='float32'), 'a')\r\n b = tensor.Tensor(dtype='float32', broadcastable=[0] * len(shape))()\r\n f = pfunc([b], [], updates=[(a, (a + b).dimshuffle([2, 0, 3, 1]) *\r\n tensor.exp(b ** a).dimshuffle([2, 0, 3, 1]))], mode=mode_with_gpu)\r\n has_elemwise = False\r\n for i, node in enumerate(f.maker.fgraph.toposort()):\r\n has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)\r\n assert not has_elemwise\r\n #let debugmode catch errors\r\n f(theano._asarray(rng.rand(*shape), dtype='float32'))", "def test_grad_binary_int(func, motion, optimized, preserve_result, a, n):\n utils.test_reverse_array(func, motion, optimized, preserve_result, a, n)", "def test_2d_2d(self):\n \n self.assertEqual(len(self.storage), 0)\n \n @interpolated(self.storage, max_distance=0.75)\n def func(point):\n x, y = point\n return [x**2, y]\n \n a = func((1, 1))\n self.assertEqual(len(self.storage), 1)\n self.assertEqual(a, [1**2, 1])\n \n a = func((2, 1))\n self.assertEqual(len(self.storage), 2)\n self.assertEqual(a, [2**2, 1])\n \n a = func((1, 2))\n self.assertEqual(len(self.storage), 3)\n self.assertEqual(a, [1**2, 2])\n \n a = func((2, 2))\n self.assertEqual(len(self.storage), 4)\n self.assertEqual(a, [2**2, 2])\n \n a = func((1.5, 1.5))\n self.assertEqual(len(self.storage), 4)\n self.assertAlmostEqual(a[0], 0.5*(1**2 + 2**2))\n self.assertAlmostEqual(a[1], 1.5)", "def test_var_idx_in_modes(self, arr, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\n \"int array vars =\\n{}\\nMZgate(0, 1) | [vars[0], vars[1], vars[2]]\".format(arr)\n )\n assert bb.operations == [\n {'op': 'MZgate', 'args': [0, 1], 'kwargs': {}, 'modes': [1, 2, 3]}\n ]", "def test_1d_2d(self):\n \n self.assertEqual(len(self.storage), 0)\n \n @interpolated(self.storage, max_distance=0.75)\n def func(x):\n return [x**2, x]\n \n a = func(1)\n self.assertEqual(len(self.storage), 1)\n self.assertAllClose(a, [1**2, 1])\n \n a = func(2)\n self.assertEqual(len(self.storage), 2)\n self.assertAllClose(a, [2**2, 2])\n \n a = func(1)\n self.assertEqual(len(self.storage), 2)\n self.assertAllClose(a, [1**2, 1])\n \n a = func(1.5)\n self.assertEqual(len(self.storage), 2)\n self.assertAlmostEqual(a[0], 0.5*(1**2 + 2**2))\n self.assertAlmostEqual(a[1], 1.5)", "def test_multiple_case(self):\r\n\r\n shp = (3, 3)\r\n fx, fy, fz, fw = fmatrices('xyzw')\r\n dx, dy, dz, dw = dmatrices('xyzw')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fwv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dwv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n old_optimizer = mode._optimizer\r\n try:\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.including('ShapeOpt')\r\n mode._optimizer = mode._optimizer.excluding(\r\n 'local_elemwise_fusion')\r\n\r\n #test x / x -> 1\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([(fx/fx,[fx],[fxv],'float32'),\r\n (dx/dx,[dx],[dxv],'float64'),\r\n (fv/fv,[fv],[fvv],'float32'),\r\n (dv/dv,[dv],[dvv],'float64'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert (out == numpy.ones(shp, dtype=out_dtype)).all()\r\n topo = f.maker.fgraph.toposort()\r\n if sym_inputs[0].broadcastable[0]:\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, tensor.Alloc)\r\n else:\r\n assert len(topo) == 3\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, Shape_i)\r\n assert isinstance(topo[2].op, tensor.Alloc)\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (x * y) / x -> y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx*dy)/dx,[dx,dy],[dxv,dyv],0,'float64'),\r\n ((fx*fy)/fx,[fx,fy],[fxv,fyv],0,'float32'),\r\n ((dv*dy)/dv,[dv,dy],[dvv,dyv],0,'float64'),\r\n ((fv*fy)/fv,[fv,fy],[fvv,fyv],0,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n ((dx*dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float64, row)>)]\r\n ((fx*fv)/fx,[fx,fv],[fxv,fvv],1,'float32')\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float32, row)>)]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert(out_dtype == out.dtype)\r\n assert numpy.allclose(out, val_inputs[1])\r\n topo = f.maker.fgraph.toposort()\r\n print \"ID TOPO\", id, topo, sym_inputs\r\n for r, t in f.maker.fgraph.shape_feature.shape_of.items():\r\n print ' ', r, t\r\n if topo and not(len(topo)==1 and topo[0].op==deep_copy_op):\r\n for node in topo[:-1]:\r\n assert isinstance(node.op, Shape_i)\r\n assert isinstance(topo[-1].op, tensor.Alloc)\r\n\r\n #test x / y / x -> 1 / y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx/dy)/dx,[dx,dy],[dxv,dyv],1,'float64'),\r\n ((fx/fy)/fx,[fx,fy],[fxv,fyv],1,'float32'),\r\n ((dv/dy)/dv,[dv,dy],[dvv,dyv],1,'float64'),\r\n ((fv/fy)/fv,[fv,fy],[fvv,fyv],1,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n\r\n ((dx/dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float64, row)>), Alloc]\r\n ((fx/fv)/fx,[fx,fv],[fxv,fvv],1,'float32'),\r\n #topo:[Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float32, row)>), Alloc]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (1 / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n print topo\r\n elem = [t for t in topo if isinstance(t.op, T.Elemwise)]\r\n assert len(elem) == nb_elemwise\r\n assert isinstance(elem[0].op, (T.Elemwise, ))\r\n assert isinstance(elem[0].op.scalar_op, (\r\n theano.scalar.basic.Inv, theano.scalar.basic.TrueDiv))\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (a / b) * (b / c) * (c / d) -> a / d\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((dx / dy) * (dy / dz) * (dz / dw),[dx,dy,dz,dw],[dxv,dyv,dzv,dwv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fw),[fx,fy,fz,fw],[fxv,fyv,fzv,fwv],'float32'),\r\n ((dv / dy) * (dy / dz) * (dz / dw),[dv,dy,dz,dw],[dvv,dyv,dzv,dwv],'float64'),\r\n ((fv / fy) * (fy / fz) * (fz / fw),[fv,fy,fz,fw],[fvv,fyv,fzv,fwv],'float32'),\r\n ((dx / dv) * (dv / dz) * (dz / dw),[dx,dv,dz,dw],[dxv,dvv,dzv,dwv],'float64'),\r\n ((fx / fv) * (fv / fz) * (fz / fw),[fx,fv,fz,fw],[fxv,fvv,fzv,fwv],'float32'),\r\n ((dx / dy) * (dy / dv) * (dv / dw),[dx,dy,dv,dw],[dxv,dyv,dvv,dwv],'float64'),\r\n ((fx / fy) * (fy / fv) * (fv / fw),[fx,fy,fv,fw],[fxv,fyv,fvv,fwv],'float32'),\r\n ((dx / dy) * (dy / dz) * (dz / dv),[dx,dy,dz,dv],[dxv,dyv,dzv,dvv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fv),[fx,fy,fz,fv],[fxv,fyv,fzv,fvv],'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (val_inputs[0] / val_inputs[3]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[0].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (2.0 * x) / (4.0 * y) -> (0.5 * x) / y\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (((2.0*dx)/(4.0*dy)),[dx,dy],[dxv,dyv],'float64'),\r\n (((2.0*fx)/(4.0*fy)),[fx,fy],[fxv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dv)/(4.0*dy)),[dv,dy],[dvv,dyv],'float64'),\r\n (((2.0*fv)/(4.0*fy)),[fv,fy],[fvv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dx)/(4.0*dv)),[dx,dv],[dxv,dvv],'float64'),\r\n (((2.0*fx)/(4.0*fv)),[fx,fv],[fxv,fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (0.5 *\r\n val_inputs[0] / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.Mul)\r\n assert len(topo[0].inputs) == 2\r\n assert isinstance(topo[1].op, (T.Elemwise, ))\r\n assert isinstance(topo[1].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[1].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test 2 * x / 2 -> x\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2*dx)/2,[dx],[dxv],'float64'),\r\n ((2*fx)/2,[fx],[fxv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2*dv)/2,[dv],[dvv],'float64'),\r\n ((2*fv)/2,[fv],[fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, val_inputs[0])\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n topo[0].op == deep_copy_op\r\n assert(out_dtype == out.dtype)\r\n\r\n #test x / abs(x) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (dx/abs(dx),[dx],[0.5-dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.5-fxv], 'float32'),\r\n (dx/abs(dx),[dx],[0.1*dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.1*fxv], 'float32'),\r\n (dv/abs(dv),[dv],[0.5-dvv],'float64'),\r\n (fv/abs(fv),[fv],[0.5-fvv], 'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]))\r\n assert(out_dtype == out.dtype)\r\n assert len(f.maker.fgraph.toposort()) == 1\r\n\r\n #test (2*x) / (3*abs(x)) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.5 - dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.5 - fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.1 * dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.1 * fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dv) / (3 * abs(dv)), [dv], [0.5 - dvv], 'float64'),\r\n ((2 * fv) / (3 * abs(fv)), [fv], [0.5 - fvv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n topo = f.maker.fgraph.toposort()\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]) * 2 / 3)\r\n assert(out_dtype == out.dtype)\r\n finally:\r\n mode._optimizer = old_optimizer", "def run_2dtest(dim=3):\n\n traces = []\n\n for smoothing in range(10, 101, 10):\n pencilbeams = []\n num_sight_lines = 100\n\n # Construct our pencilbeams\n xlin = np.linspace(0., 1., num_sight_lines+1) * smoothing\n ylin = np.linspace(0., 1., num_sight_lines+1) * smoothing\n X,Y = np.meshgrid(xlin, ylin)\n\n # Store resulting LoS integrations in results\n results = X\n for i in range(0,num_sight_lines+1):\n for j in range(0,num_sight_lines+1): \n results[i,j] = testsph(X[i,j],Y[i,j],smoothing,dim=dim)\n\n # Integrate the pencilbeam weightings to find the full SPH weighting\n # This is the plane x-z from origin along +ve x-axis (sitting at y=0)\n\n # Have to integrate across x for every y\n Int_step = np.zeros( num_sight_lines+1 )\n for iy in range(0, num_sight_lines+1):\n isfin = np.isfinite(results[iy,:])\n Int_step[iy] = integrate.trapz(results[iy,isfin], xlin[isfin])\n # Now integrate across y\n isfin = np.isfinite(Int_step)\n particle_integral = integrate.trapz(Int_step[isfin], ylin[isfin])\n # \"All smoothing lengths should integrate to the same value of unity \"\n # We've sampled a quadrant in x-y and integrated entirely along z, so mulitply by 4\n print particle_integral * 4.\n\n isfin = np.isfinite(results[0,:])\n traces.append(go.Scatter(y=results[0,isfin], x=xlin[isfin]))\n\n # The integral of the entire particle should be unity, the trace of axis will not be however\n plot(traces)", "def test_amp_sums_can_be_simplified(free_alg):\n dr = free_alg\n v = dr.names.v\n n, i, j = symbols('n i j')\n x = IndexedBase('x')\n r = Range('D', 0, n)\n\n tensor = dr.sum((i, r), (j, r), i ** 2 * x[j] * v[j])\n res = tensor.simplify_sums()\n assert res == dr.sum((j, r), (\n n ** 3 / 3 - n ** 2 / 2 + n / 6\n ) * x[j] * v[j])", "def test_fortran_frontend_view_test_3():\n test_name = \"view3_test\"\n test_string = \"\"\"\n PROGRAM \"\"\" + test_name + \"\"\"_program\nimplicit none\ninteger, parameter :: n=10\ndouble precision a(n,n+1,12),b(n,n+1,12)\n\nCALL \"\"\" + test_name + \"\"\"_function(a,b,n)\n\nend\n\nSUBROUTINE \"\"\" + test_name + \"\"\"_function(aa,bb,n)\n\ninteger, parameter :: n=10\ndouble precision a(n,n+1,12),b(n,n+1,12)\ninteger j,k\n\nj=1\n call viewlens(aa(:,:,j),bb(:,:,j),bb(:,:,j+1))\n\nend SUBROUTINE \"\"\" + test_name + \"\"\"_function\n\nSUBROUTINE viewlens(aa,bb,cc)\n\nIMPLICIT NONE\n\ndouble precision :: aa(10,11),bb(10,11),cc(10,11) \n\nINTEGER :: JK, JL\n\nDO JK=1,10\n DO JL=1,11\n cc(JK,JL)=bb(JK,JL)+aa(JK,JL)\n ENDDO\nENDDO\n\nEND SUBROUTINE viewlens\n \"\"\"\n sdfg = fortran_parser.create_sdfg_from_string(test_string, test_name)\n sdfg.simplify(verbose=True)\n a = np.full([10, 11, 12], 42, order=\"F\", dtype=np.float64)\n b = np.full([10, 11, 12], 42, order=\"F\", dtype=np.float64)\n\n b[0, 0, 0] = 1\n sdfg(aa=a, bb=b, n=10)\n assert (b[0, 0, 0] == 1)\n assert (b[0, 0, 1] == 43)", "def test_sum_pos_3() -> None:\n # 2nd step - 3rd square\n assert nth(sw.sum_walk(), 1) == 2", "def test_simple_2d(self):\r\n a = tt.dmatrix()\r\n increment = tt.dscalar()\r\n sl1 = slice(None)\r\n sl2_end = tt.lscalar()\r\n sl2 = slice(sl2_end)\r\n\r\n for do_set in [False, True]:\r\n\r\n if do_set:\r\n resut = tt.set_subtensor(a[sl1, sl2], increment)\r\n else:\r\n resut = tt.inc_subtensor(a[sl1, sl2], increment)\r\n\r\n f = theano.function([a, increment, sl2_end], resut)\r\n\r\n val_a = numpy.ones((5, 5))\r\n val_inc = 2.3\r\n val_sl2_end = 2\r\n\r\n result = f(val_a, val_inc, val_sl2_end)\r\n\r\n expected_result = numpy.copy(val_a)\r\n if do_set:\r\n expected_result[:, :val_sl2_end] = val_inc\r\n else:\r\n expected_result[:, :val_sl2_end] += val_inc\r\n\r\n utt.assert_allclose(result, expected_result)", "def test_indexed_increment(self, expr, result):\n j, l = dimify('j l')\n a = symbol(name='a', dimensions=(j, l), value=2., mode='indexed').base\n fa = a.function\n fa.data[1:, 1:] = 0\n\n eqn = eval(expr)\n Operator(eqn)(fa)\n assert np.allclose(fa.data, result, rtol=1e-12)", "def test_gradable_funcs(self):\n self.jit_grad_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_grad_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = None\n\n self.basic_lindblad.evaluation_mode = \"dense_vectorized\"\n\n self.jit_grad_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_grad_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([0.2, 0.4, 0.6, 0.8]))\n )\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_grad_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_grad_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([0.2, 0.4, 0.6, 0.8]))\n )\n\n self.basic_lindblad.rotating_frame = None", "def test052_2d_numerical_comparison_on_vs_np_mp(\n self,\n batch_size=8,\n num_features=16,\n height=45,\n width=64,\n alpha_fwd=0.99,\n alpha_bkw=0.99,\n itrs=2,\n ):\n # create inputs\n np_inputs = np.random.randn(batch_size, num_features, height, width) + .25\n # instantiate gradient at the output\n np_grad_out = np.random.randn(batch_size, num_features, height, width) + .125\n\n tf.keras.backend.set_floatx('float16')\n\n self.template_numerical_comparison_on_vs_np(\n np_inputs,\n np_grad_out=np_grad_out,\n axis=1,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n itrs=itrs,\n dtype=Policy('infer_float32_vars'),\n )", "def test032_2d_numerical_comparison_on_vs_np(\n self,\n batch_size=8,\n num_features=16,\n height=45,\n width=64,\n alpha_fwd=0.99,\n alpha_bkw=0.99,\n itrs=2,\n ):\n # create inputs\n np_inputs = np.random.randn(batch_size, num_features, height, width) + .25\n # instantiate gradient at the output\n np_grad_out = np.random.randn(batch_size, num_features, height, width) + .125\n\n self.template_numerical_comparison_on_vs_np(\n np_inputs,\n np_grad_out=np_grad_out,\n axis=1,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n itrs=itrs,\n )", "def evaluable_view(self, stencil, offset=0):\n return self._evaluable_view(stencil, self.arr, offset)", "def test_fortran_frontend_view_test_2():\n test_name = \"view2_test\"\n test_string = \"\"\"\n PROGRAM \"\"\" + test_name + \"\"\"_program\nimplicit none\ninteger, parameter :: n=10\ndouble precision a(n,11,12),b(n,11,12),c(n,11,12)\n\nCALL \"\"\" + test_name + \"\"\"_function(a,b,c,n)\n\nend\n\nSUBROUTINE \"\"\" + test_name + \"\"\"_function(aa,bb,cc,n)\n\ninteger, parameter :: n=10\ndouble precision a(n,11,12),b(n,11,12),c(n,11,12)\ninteger j,k\n\nj=1\n call viewlens(aa(:,:,j),bb(:,:,j),cc(:,:,j))\nk=2\n call viewlens(aa(:,:,k),bb(:,:,k),cc(:,:,k))\n\nend SUBROUTINE \"\"\" + test_name + \"\"\"_function\n\nSUBROUTINE viewlens(aa,bb,cc)\n\nIMPLICIT NONE\n\ndouble precision :: aa(10,11),bb(10,11),cc(10,11) \n\nINTEGER :: JK, JL\n\nDO JK=1,10\n DO JL=1,11\n cc(JK,JL)=bb(JK,JL)+aa(JK,JL)\n ENDDO\nENDDO\n\nEND SUBROUTINE viewlens\n \"\"\"\n sdfg = fortran_parser.create_sdfg_from_string(test_string, test_name)\n sdfg.simplify(verbose=True)\n a = np.full([10, 11, 12], 42, order=\"F\", dtype=np.float64)\n b = np.full([10, 11, 12], 42, order=\"F\", dtype=np.float64)\n c = np.full([10, 11, 12], 42, order=\"F\", dtype=np.float64)\n\n b[0, 0, 0] = 1\n sdfg(aa=a, bb=b, cc=c, n=10)\n assert (c[0, 0, 0] == 43)\n assert (c[1, 1, 1] == 84)", "def test_elemwise3():\r\n\r\n shape = (3, 4, 5, 6)\r\n a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape),\r\n dtype='float32'), 'a')\r\n b = tensor.fvector()\r\n new_val = (a + b).dimshuffle([2, 0, 3, 1])\r\n new_val *= tensor.exp(1 + b ** a).dimshuffle([2, 0, 3, 1])\r\n f = pfunc([b], [], updates=[(a, new_val)], mode=mode_with_gpu)\r\n has_elemwise = False\r\n for i, node in enumerate(f.maker.fgraph.toposort()):\r\n has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)\r\n assert not has_elemwise\r\n #let debugmode catch errors\r\n f(theano._asarray(numpy.random.rand(6), dtype='float32'))", "def test_stencil_derivative(grid, shape, SymbolType, dim):\n i = dim(grid) # issue fixtures+parametrize: github.com/pytest-dev/pytest/issues/349\n u = SymbolType(name='u', grid=grid)\n u.data[:] = 66.6\n di = u.diff(i)\n dii = u.diff(i, i)\n # Check for sympy Derivative objects\n assert(isinstance(di, Derivative) and isinstance(dii, Derivative))\n s_di = di.as_finite_difference([i - i.spacing, i])\n s_dii = dii.as_finite_difference([i - i.spacing, i, i + i.spacing])\n # Check stencil length of first and second derivatives\n assert(len(s_di.args) == 2 and len(s_dii.args) == 3)\n u_di = s_di.args[0].args[1]\n u_dii = s_di.args[0].args[1]\n # Ensure that devito meta-data survived symbolic transformation\n assert(u_di.grid.shape == shape and u_dii.grid.shape == shape)\n assert(u_di.shape == u.shape and u_dii.shape == u.shape)\n assert(np.allclose(u_di.data, 66.6))\n assert(np.allclose(u_dii.data, 66.6))", "def test022_2d_numerical_comparison_on_fprop_vs_np(\n self,\n batch_size=8,\n num_features=16,\n height=45,\n width=64,\n alpha_fwd=0.99,\n alpha_bkw=0.99,\n itrs=2,\n ):\n # create inputs\n np_inputs = np.random.randn(batch_size, num_features, height, width) + .25\n\n self.template_numerical_comparison_on_vs_np(\n np_inputs,\n np_grad_out=None,\n axis=1,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n itrs=itrs,\n )", "def test_elemwise4():\r\n\r\n shape = (3, 4)\r\n a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape),\r\n dtype='float32'), 'a')\r\n b = tensor.fvector()\r\n c = tensor.fvector()\r\n f = pfunc([b, c], [],\r\n updates=[(a, (a + b.dimshuffle('x', 0) * c.dimshuffle(0, 'x')))],\r\n mode=mode_with_gpu)\r\n has_elemwise = False\r\n for i, node in enumerate(f.maker.fgraph.toposort()):\r\n has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)\r\n assert not has_elemwise\r\n #let debugmode catch errors\r\n f(theano._asarray(numpy.random.rand(4), dtype='float32'),\r\n theano._asarray(numpy.random.rand(3), dtype='float32'))", "def test_generate_condition_function():\n masks = 4 # Always > 2\n vals = 15\n np_masks = np.random.randint(2, size=(masks, vals), dtype=bool)\n tf_masks = [tf.constant(i, dtype=tf.bool) for i in np_masks]\n # Generate the functions for and and or\n f_and = generate_condition_function(masks, \"and\")\n f_or = generate_condition_function(masks, \"or\")\n # Get the numpy and tf results\n np_ands = np.all(np_masks, axis=0)\n np_ors = np.any(np_masks, axis=0)\n tf_ands, idx_ands = f_and(*tf_masks)\n tf_ors, idx_ors = f_or(*tf_masks)\n # Check the values are the same\n util_check(np_ands, tf_ands, idx_ands)\n util_check(np_ors, tf_ors, idx_ors)\n # Check a combination\n f_comb = generate_condition_function(3, [\"and\", \"or\"])\n np_comb = np_masks[0] & np_masks[1] | np_masks[2]\n tf_comb, idx_comb = f_comb(*tf_masks[:3])\n util_check(np_comb, tf_comb, idx_comb)\n # Check failures\n with pytest.raises(ValueError):\n generate_condition_function(1, \"and\")\n with pytest.raises(ValueError):\n generate_condition_function(5, \"bad_condition\")\n with pytest.raises(ValueError):\n generate_condition_function(5, [\"or\", \"and\"])\n with pytest.raises(ValueError):\n generate_condition_function(3, [\"or\", \"bad_condition\"])", "def test_quad_flush_ind(self):", "def test_SMEB_args():\n testing_function('sme_bl', bilinear=True)", "def test_advanced_manipulations(free_alg):\n dr = free_alg\n p = dr.names\n i, j, k = p.i, p.j, p.k\n\n u = IndexedBase('u')\n v = IndexedBase('v')\n f = Vec('f')\n\n tensor = dr.einst(u[i, j] * f[j] + v[i, j] * f[j])\n assert tensor.n_terms == 2\n\n def has_u(term):\n \"\"\"Test if a term have u tensor.\"\"\"\n return term.amp.has(u)\n\n expect = dr.sum((j, p.R), u[i, j] * f[j])\n for res in [\n tensor.filter(has_u),\n tensor.bind(lambda x: [x] if has_u(x) else [])\n ]:\n assert res.n_terms == 1\n assert res == expect\n\n def subst_i(term):\n \"\"\"Substitute i index in the terms.\"\"\"\n return Term(term.sums, term.amp.xreplace({i: k}), term.vecs)\n\n expect = dr.sum((j, p.R), u[k, j] * f[j] + v[k, j] * f[j])\n for res in [\n tensor.map(subst_i),\n tensor.bind(lambda x: [subst_i(x)]),\n tensor.map2scalars(lambda x: x.xreplace({i: k}))\n ]:\n assert res.n_terms == 2\n assert res == expect\n\n alpha, beta = symbols('alpha beta')\n assert tensor.bind(\n lambda x: [Term(x.sums, x.amp * i_, x.vecs) for i_ in [alpha, beta]]\n ) == (tensor * alpha + tensor * beta)\n\n assert tensor.map2scalars(\n lambda x: x.xreplace({j: k})\n ) == dr.sum((j, p.R), u[i, k] * f[k] + v[i, k] * f[k])\n\n assert tensor.map2scalars(\n lambda x: x.xreplace({j: k}), skip_vecs=True\n ) == dr.sum((j, p.R), u[i, k] * f[j] + v[i, k] * f[j])", "def test_SMEL_args():\n testing_function('sme', bilinear=False)", "def test_sum_pos_4() -> None:\n # Third step, 4th square.\n assert nth(sw.sum_walk(), 2) == 4", "def test_add_op_jit():\n x = np.array([1, 2, 3, 4, 5, 6, 7])\n paddle_x = paddle.to_tensor(x).astype(\"float32\")\n paddle_x.stop_gradient = False\n print(paddle_x)\n a = 1\n b = 5\n out = custom_ops.slice_test(paddle_x, a, b)\n print(\"out: \", out)\n print(\"numpy out: \", x[a:b])\n assert np.allclose(out.numpy(), x[a:b])\n print(\"run success\")", "def inter_op_dp_inner_loop(\n n_layers, n_devices, submesh_sizes, valid_idxs_costs, max_n_succ_stages\n):\n F = np.full((n_layers + 1, n_layers + 1, n_devices + 1), np.inf, dtype=np.float32)\n F_stage_max = np.full(\n (n_layers + 1, n_layers + 1, n_devices + 1), 0.0, dtype=np.float32\n )\n F_argmin = np.full(\n (n_layers + 1, n_layers + 1, n_devices + 1, 3), -1, dtype=np.int32\n )\n F[0, n_layers, 0] = 0\n\n for d in range(1, n_devices + 1):\n for (\n l,\n i,\n submesh_shape_idx,\n sharding_config_idx,\n stage_cost,\n ) in valid_idxs_costs:\n l, i, submesh_shape_idx, sharding_config_idx = map(\n int, (l, i, submesh_shape_idx, sharding_config_idx)\n )\n\n n_submesh_devices = submesh_sizes[submesh_shape_idx]\n if n_submesh_devices <= d:\n for s in range(1, n_layers + 1):\n if (\n s - 1\n > max_n_succ_stages[\n l, i, submesh_shape_idx, sharding_config_idx\n ]\n ):\n continue\n\n new_cost = F[s - 1, i + 1, d - n_submesh_devices] + stage_cost\n if new_cost < F[s, l, d]:\n F[s, l, d] = new_cost\n F_argmin[s, l, d] = (\n i + 1,\n submesh_shape_idx,\n sharding_config_idx,\n )\n F_stage_max[s, l, d] = max(\n F_stage_max[s - 1, i + 1, d - n_submesh_devices], stage_cost\n )\n\n return F, F_stage_max, F_argmin", "def test():\n Z = func.evaluate_circuit(F, e_x, e_y, e_xor)\n if Z == d[0]:\n return 0\n elif Z == d[1]:\n return 1", "def test_positional_convolution_backward():\n i = 1\n for num_batch in [1, 2, 4]:\n for num_channel in [4, 8, 12]:\n for input_height, input_width in itertools.product([10, 12, 18], [10, 12, 18]):\n for num_filter in [2, 4, 8]:\n for kernel in [(3, 3), (2, 2)]:\n for stride in [(1, 1), (2, 2)]:\n for pad in [(0, 0), (1, 1)]:\n for dilate in [(1, 1), (2, 2)]:\n # for num_group in [1, 2, 4]:\n grad_nodes = ['im_data', 'scale_data', 'weight', 'bias']\n output_height = np.floor(\n (input_height + 2 * pad[0] - dilate[0] * (kernel[0] - 1) - 1) * 1.0 / stride[0]\n ) + 1\n output_width = np.floor(\n (input_width + 2 * pad[1] - dilate[1] * (kernel[1] - 1) - 1) * 1.0 / stride[1]\n ) + 1\n im_data = np.random.rand(num_batch, num_channel, input_height, input_width)\n scale_data = \\\n np.random.rand(num_batch, num_channel, int(output_height), int(output_width))\\\n * 0.8 + 0.1\n\n weight = np.random.normal(0, 0.001, (num_filter, num_channel, kernel[0], kernel[1]))\n bias = np.random.rand(num_filter)\n\n im_data_var = mx.symbol.Variable(name=\"im_data\")\n scale_data_var = mx.symbol.Variable(name=\"scale_data\")\n weight_var = mx.symbol.Variable(name=\"weight\")\n bias_var = mx.symbol.Variable(name=\"bias\")\n op = mx.sym.contrib.PositionalConvolution(name='test_op',\n data=im_data_var,\n scale=scale_data_var,\n weight=weight_var,\n bias=bias_var,\n num_filter=num_filter,\n kernel=kernel, stride=stride, pad=pad,\n dilate=dilate\n )\n rtol, atol = 1e-4, 1e-3\n # absolute(a - b) <= (atol + rtol * absolute(b))\n check_numeric_gradient(op, [im_data, scale_data, weight, bias], rtol=rtol,\n atol=atol, grad_nodes=grad_nodes, ctx=mx.gpu(0))\n print(\"check numeric gradient successfully for the {} times\".format(i))\n i += 1", "def f_unc(xpts, offset, *params):\n res = 0\n for i, p in enumerate(coefficients):\n res += p*xpts**i\n return res", "def _evaluate(self, w, x, y, z):\n if _isscalar(x):\n y_pos = max(min(np.searchsorted(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(np.searchsorted(self.z_list, z), self.z_n - 1), 1)\n alpha = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n beta = (z - self.z_list[z_pos - 1]) / (\n self.z_list[z_pos] - self.z_list[z_pos - 1]\n )\n f = (\n (1 - alpha)\n * (1 - beta)\n * self.wxInterpolators[y_pos - 1][z_pos - 1](w, x)\n + (1 - alpha) * beta * self.wxInterpolators[y_pos - 1][z_pos](w, x)\n + alpha * (1 - beta) * self.wxInterpolators[y_pos][z_pos - 1](w, x)\n + alpha * beta * self.wxInterpolators[y_pos][z_pos](w, x)\n )\n else:\n m = len(x)\n y_pos = np.searchsorted(self.y_list, y)\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list, z)\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n z_pos[z_pos < 1] = 1\n f = np.zeros(m) + np.nan\n for i in range(1, self.y_n):\n for j in range(1, self.z_n):\n c = np.logical_and(i == y_pos, j == z_pos)\n if np.any(c):\n alpha = (y[c] - self.y_list[i - 1]) / (\n self.y_list[i] - self.y_list[i - 1]\n )\n beta = (z[c] - self.z_list[j - 1]) / (\n self.z_list[j] - self.z_list[j - 1]\n )\n f[c] = (\n (1 - alpha)\n * (1 - beta)\n * self.wxInterpolators[i - 1][j - 1](w[c], x[c])\n + (1 - alpha)\n * beta\n * self.wxInterpolators[i - 1][j](w[c], x[c])\n + alpha\n * (1 - beta)\n * self.wxInterpolators[i][j - 1](w[c], x[c])\n + alpha * beta * self.wxInterpolators[i][j](w[c], x[c])\n )\n return f", "def test_grad_binary(func, motion, optimized, preserve_result, a, b):\n utils.test_reverse_array(func, motion, optimized, preserve_result, a, b)", "def CalcGreenFunctions(x, y, z, x_src_l, y_src_l, alpha, dire, Lambda_y, \\\n gamma_l, c, omega, G_sen, dir_meas, dir_meas_deg, airloss_alpha, f, n):\n \n G = greens_fct(repmat(x_src_l, np.shape(x)[0],1), repmat(y_src_l, np.shape(y)[0],1), omega, c, \\\n np.transpose(repmat(x, np.shape(x_src_l)[0], 1)), np.transpose(repmat(y, np.shape(y_src_l)[0], 1)), z)\n\n G = G_sen * G\n \n beta = np.arcsin((np.transpose(repmat(y, np.shape(y_src_l)[0], 1)) - repmat(y_src_l, np.shape(y)[0], 1)) \\\n * np.sqrt((np.transpose(repmat(x, np.shape(x_src_l)[0], 1)) - \\\n repmat(x_src_l, np.shape(x)[0], 1))**2 + \\\n (np.transpose(repmat(y, np.shape(y_src_l)[0], 1)) - repmat(y_src_l, np.shape(y)[0], 1))**2)**(-1)) \\\n + repmat(gamma_l, np.shape(x)[0], 1)\n \n # air attenuation\n src_rec_dist = np.sqrt((np.transpose(repmat(x, np.shape(x_src_l)[0], 1)) - repmat(x_src_l, np.shape(x)[0], 1))**2 \\\n + (np.transpose(repmat(y, np.shape(y_src_l)[0], 1)) - repmat(y_src_l, np.shape(y)[0], 1))**2)\n \n air_att = airloss_alpha * src_rec_dist\n air_att = 10**(-air_att / 20)\n G = G * air_att\n \n H_post = calc_directivity(dire, alpha, Lambda_y, beta, omega, c, f, dir_meas, dir_meas_deg, n)\n\n G = G * H_post\n \n return G", "def border_function_generator(self, stencil):\n\n def is_on_border(indice):\n for i in range(self.dim):\n if indice[0] < stencil.b[0][0] or indice[0] >= self.mid.shape[0]+stencil.b[0][0]:\n return True\n return is_on_border", "def structure_function(f, index=0):\r\n\r\n def structured_function(*args):\r\n pattern = args[index]\r\n evaluated = f(*args)\r\n evaluated[pattern == 0] = 0\r\n return evaluated\r\n return structured_function", "def test_lifted_index():\n pressure = np.array([1014., 1000., 997., 981.2, 947.4, 925., 914.9, 911.,\n 902., 883., 850., 822.3, 816., 807., 793.2, 770.,\n 765.1, 753., 737.5, 737., 713., 700., 688., 685.,\n 680., 666., 659.8, 653., 643., 634., 615., 611.8,\n 566.2, 516., 500., 487., 484.2, 481., 475., 460.,\n 400.]) * units.hPa\n temperature = np.array([24.2, 24.2, 24., 23.1, 21., 19.6, 18.7, 18.4,\n 19.2, 19.4, 17.2, 15.3, 14.8, 14.4, 13.4, 11.6,\n 11.1, 10., 8.8, 8.8, 8.2, 7., 5.6, 5.6,\n 5.6, 4.4, 3.8, 3.2, 3., 3.2, 1.8, 1.5,\n -3.4, -9.3, -11.3, -13.1, -13.1, -13.1, -13.7, -15.1,\n -23.5]) * units.degC\n dewpoint = np.array([23.2, 23.1, 22.8, 22., 20.2, 19., 17.6, 17.,\n 16.8, 15.5, 14., 11.7, 11.2, 8.4, 7., 4.6,\n 5., 6., 4.2, 4.1, -1.8, -2., -1.4, -0.4,\n -3.4, -5.6, -4.3, -2.8, -7., -25.8, -31.2, -31.4,\n -34.1, -37.3, -32.3, -34.1, -37.3, -41.1, -37.7, -58.1,\n -57.5]) * units.degC\n parcel_prof = parcel_profile(pressure, temperature[0], dewpoint[0])\n li = lifted_index(pressure, temperature, parcel_prof)\n assert_almost_equal(li, -7.9115691 * units.delta_degree_Celsius, 2)", "def structure_function(f, index=0):\n\n def structured_function(*args):\n pattern = args[index]\n evaluated = f(*args)\n evaluated[pattern == 0] = 0\n return evaluated\n\n return structured_function", "def pyelemfunctions():\n for elemid in unique(top.idpyelem[:top.nppyelem]):\n ip = (top.idpyelem[:top.nppyelem] == elemid)\n x = top.xpyelem[:top.nppyelem][ip]\n y = top.ypyelem[:top.nppyelem][ip]\n z = top.zpyelem[:top.nppyelem][ip]\n # --- The conversion to int is needed since a numpy.int64 is different than an int.\n (ex,ey,ez,bx,by,bz) = pyelemfunctionsdict[int(elemid)](x,y,z)\n top.expyelem[:top.nppyelem][ip] = ex\n top.eypyelem[:top.nppyelem][ip] = ey\n top.ezpyelem[:top.nppyelem][ip] = ez\n top.bxpyelem[:top.nppyelem][ip] = bx\n top.bypyelem[:top.nppyelem][ip] = by\n top.bzpyelem[:top.nppyelem][ip] = bz", "def test042_2d_numerical_comparison_on_fprop_vs_np_mp(\n self,\n batch_size=8,\n num_features=16,\n height=45,\n width=64,\n alpha_fwd=0.99,\n alpha_bkw=0.99,\n itrs=2,\n ):\n # create inputs\n np_inputs = np.random.randn(batch_size, num_features, height, width) + .25\n\n tf.keras.backend.set_floatx('float16')\n\n self.template_numerical_comparison_on_vs_np(\n np_inputs,\n np_grad_out=None,\n axis=1,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n itrs=itrs,\n dtype=Policy('infer_float32_vars'),\n )", "def past_weight_grad_calculator2(xs, es, kp_x, kd_x, kp_e, kd_e, shapes):\n kp_x, kd_x, kp_e, kd_e = [as_floatx(k) for k in (kp_x, kd_x, kp_e, kd_e)]\n n_samples, n_in, n_out = shapes\n rx = kd_x/(kp_x+kd_x)\n re = kd_e/(kp_e+kd_e)\n\n xr = create_shared_variable(np.zeros((n_samples, n_in)))\n er = create_shared_variable(np.zeros((n_samples, n_out)))\n\n\n\n\n # xr_new = xr*rx + xs/(kp_x+kd_x)\n # er_new = er*re + es/(kp_e+kd_e)\n\n arr = rx*re/(1-rx*re)\n\n xr_new = xr*arr + xs/(kp_x+kd_x)\n er_new = er*arr + es/(kp_e+kd_e)\n\n xsum = create_shared_variable(np.zeros((n_samples, n_in)))\n esum = create_shared_variable(np.zeros((n_samples, n_out)))\n\n xsum_new = xsum+xr_new\n esum_new = esum+er_new\n\n x_nospikes = tt.eq(xs, 0)\n e_nospikes = tt.eq(es, 0)\n\n dw = xs.T.dot(esum_new) + xsum_new.T.dot(es)\n\n add_update(xr, xr_new)\n add_update(er, er_new)\n add_update(xsum, xsum_new*x_nospikes)\n add_update(esum, esum_new*e_nospikes)\n\n return xs.T.dot(er) + xr.T.dot(es)\n # return xr.T.dot(er)\n # return dw", "def test_cspad_xy_at_z():\n ## 'CxiDs1.0:Cspad.0)' or 'DscCsPad'\n basedir = '/reg/g/psdm/detector/alignment/cspad/calib-cxi-camera1-2014-09-24/'\n fname_geometry = basedir + '2016-06-03-geometry-cxi06216-r25-camera1-z175mm.txt'\n fname_data = basedir + '2016-06-03-chun-cxi06216-0025-DscCsPad-max.txt'\n\n geometry = GeometryAccess(fname_geometry, pbits=0o377)\n\n # get pixel coordinate index arrays:\n xyc = xc, yc = 1000, 1000\n #rows, cols = geometry.get_pixel_coord_indexes(xy0_off_pix=xyc)\n #rows, cols = geometry.get_pixel_coord_indexes(do_tilt=True)\n #rows, cols = geometry.get_pixel_xy_inds_at_z(zplane=None, xy0_off_pix=xyc)\n rows, cols = geometry.get_pixel_xy_inds_at_z(zplane=150000)\n\n root, ext = os.path.splitext(fname_data)\n arr = np.load(fname_data) if ext == '.npy' else np.loadtxt(fname_data, dtype=np.float)\n\n #logger.info('arr.shape=', arr.shape\n arr.shape= (32,185,388)\n\n #ave, rms = arr.mean(), arr.std()\n #amp_range = (ave-rms, ave+3*rms)\n amp_range = (0, 1000)\n logger.info('amp_range:' + str(amp_range))\n\n logger.info('shapes rows: %s cols: %s weight: %s' % (str(rows.shape), str(cols.shape), str(arr.shape)))\n img = img_from_pixel_arrays(rows,cols,W=arr)\n\n axim = gg.plotImageLarge(img,amp_range=amp_range)\n gg.move(500,10)\n gg.show()", "def test0521_2d_numerical_comparison_on_vs_np_batchsize1_mp(\n self,\n batch_size=1,\n num_features=16,\n height=45,\n width=64,\n alpha_fwd=0.99,\n alpha_bkw=0.99,\n itrs=16,\n ):\n # create inputs\n np_inputs = np.random.randn(batch_size, num_features, height, width) + .25\n # instantiate gradient at the output\n np_grad_out = np.random.randn(batch_size, num_features, height, width) + .125\n\n tf.keras.backend.set_floatx('float16')\n\n self.template_numerical_comparison_on_vs_np(\n np_inputs,\n np_grad_out=np_grad_out,\n axis=1,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n itrs=itrs,\n dtype=Policy('infer_float32_vars'),\n )", "def test_window_funcs():\n # get a PSpecData\n uvd = UVData()\n uvd.read_miriad(\n os.path.join(DATA_PATH, 'zen.even.xx.LST.1.28828.uvOCRSA'),\n use_future_array_shapes=True\n )\n beam = pspecbeam.PSpecBeamUV(os.path.join(DATA_PATH, \"HERA_NF_dipole_power.beamfits\"))\n ds = pspecdata.PSpecData(dsets=[copy.deepcopy(uvd)], beam=beam)\n ds.set_spw((0, 20))\n ds.set_taper('bh')\n bl = (37, 38)\n key = (0, bl, 'xx')\n d = uvd.get_data(bl)\n C = np.cov(d[:, :20].T).real\n iC = np.linalg.pinv(C)\n # iterate over various R and M matrices and ensure\n # normalization and dtype is consistent\n for data_weight in ['identity', 'iC']:\n ds.set_weighting(data_weight)\n for norm in ['H^-1', 'I', 'V^-1/2']:\n for exact_norm in [True, False]:\n if exact_norm and norm != 'I':\n # exact_norm only supported for norm == 'I'\n continue\n ds.clear_cache()\n if data_weight == 'iC':\n # fill R with iC\n ds._R[(0, (37, 38, 'xx'), 'iC', 'bh')] = iC\n # compute G and H\n Gv = ds.get_G(key, key, exact_norm=exact_norm, pol='xx')\n Hv = ds.get_H(key, key, exact_norm=exact_norm, pol='xx')\n Mv, Wv = ds.get_MW(Gv, Hv, mode=norm, exact_norm=exact_norm,\n band_covar=C)\n # assert row-sum is normalized to 1\n assert np.isclose(Wv.sum(axis=1).real, 1).all()\n # assert this is a real matrix, even though imag is populated\n assert np.isclose(Wv.imag, 0, atol=1e-6).all()", "def test_fortran_frontend_view_test():\n test_name = \"view_test\"\n test_string = \"\"\"\n PROGRAM \"\"\" + test_name + \"\"\"_program\nimplicit none\ndouble precision a(10,11,12)\ndouble precision res(1,1,2) \n\nCALL \"\"\" + test_name + \"\"\"_function(a,res)\n\nend\n\nSUBROUTINE \"\"\" + test_name + \"\"\"_function(aa,res)\n\ndouble precision aa(10,11,12)\ndouble precision res(1,1,2) \n\ncall viewlens(aa(:,:,1),res)\n\nend SUBROUTINE \"\"\" + test_name + \"\"\"_function\n\nSUBROUTINE viewlens(aa,res)\n\nIMPLICIT NONE\n\ndouble precision :: aa(10,11,23) \ndouble precision :: res(1,1,2)\n\nINTEGER :: JK, JL\n\nres(1,1,1)=0.0\nDO JK=1,10\n DO JL=1,11\n res(1,1,1)=res(1,1,1)+aa(JK,JL)\n ENDDO\nENDDO\naa(1,1)=res(1,1,1)\n\n\nEND SUBROUTINE viewlens\n \"\"\"\n sdfg = fortran_parser.create_sdfg_from_string(test_string, test_name)\n sdfg.simplify(verbose=True)\n a = np.full([10, 11, 12], 42, order=\"F\", dtype=np.float64)\n b = np.full([1, 1, 2], 42, order=\"F\", dtype=np.float64)\n b[0, 0, 0] = 1\n sdfg(aa=a, res=b)\n assert (a[0, 0, 1] == 42)\n assert (a[0, 0, 0] == 4620)\n assert (b[0, 0, 0] == 4620)", "def test_offsets():\n B = 100\n H = 20\n E = 210000\n sections = ((B, H, 0, E),)\n sections2 = ((B, H, 12.435, E),)\n EI, top, bot = bm.EI(sections, E)\n EI2, top2, bot2 = bm.EI(sections2, E)\n assert 0.99 < EI / EI2 < 1.01\n assert 0.99 < top / top2 < 1.01\n assert 0.99 < bot / bot2 < 1.01", "def test_special_XYX(self, angexp):\n a, b, c, d = angexp[0]\n exp = {(\"rx\", \"ry\")[g]: angexp[1][g] for g in (0, 1) if angexp[1][g]}\n tgt = np.exp(1j * d) * RXGate(b).to_matrix() @ RYGate(a).to_matrix() @ RXGate(c).to_matrix()\n self.check_oneq_special_cases(tgt, \"XYX\", exp)", "def test_ex_2_5(self):\n\n compiler = Compiler()\n\n X = Variable()\n Y = Variable()\n query = Compound('p', Compound('f', X), Compound('h', Y, Compound('f', Atom('a'))), Y)\n query_reg_allocation = RegisterAllocation()\n query_instrs = compiler.compile_query(query, query_reg_allocation)\n\n W = Variable()\n Z = Variable()\n program = Compound('p', Z, Compound('h', Z, W), Compound('f', W))\n program_reg_allocation = RegisterAllocation()\n program_instrs = compiler.compile_program(program, program_reg_allocation)\n\n wam = WAM()\n wam.execute(query_instrs)\n wam.execute(program_instrs)\n aW = wam.deref_reg(program_reg_allocation[W])\n aX = wam.deref_reg(query_reg_allocation[X])\n aY = wam.deref_reg(query_reg_allocation[Y])\n aZ = wam.deref_reg(program_reg_allocation[Z])\n self.assertEqual(wam.get_term_repr(aW), 'f(a)')\n self.assertEqual(wam.get_term_repr(aX), 'f(a)')\n self.assertEqual(wam.get_term_repr(aY), 'f(f(a))')\n self.assertEqual(wam.get_term_repr(aZ), 'f(f(a))')", "def test_make_efuncs(exprs, nfuncs, ntimeiters, nests):\n exprs = list(as_tuple(exprs))\n\n grid = Grid(shape=(10, 10))\n t = grid.stepping_dim # noqa\n x, y = grid.dimensions # noqa\n\n u = Function(name='u', grid=grid) # noqa\n v = TimeFunction(name='v', grid=grid) # noqa\n\n # List comprehension would need explicit locals/globals mappings to eval\n for i, e in enumerate(list(exprs)):\n exprs[i] = eval(e)\n\n op = Operator(exprs)\n\n # We create one ElementalFunction for each Iteration nest over space dimensions\n efuncs = []\n for n, tree in enumerate(retrieve_iteration_tree(op)):\n root = filter_iterations(tree, key=lambda i: i.dim.is_Space)[0]\n efuncs.append(make_efunc('f%d' % n, root))\n\n assert len(efuncs) == len(nfuncs) == len(ntimeiters) == len(nests)\n\n for efunc, nf, nt, nest in zip(efuncs, nfuncs, ntimeiters, nests):\n # Check the `efunc` parameters\n assert all(i in efunc.parameters for i in (x.symbolic_min, x.symbolic_max))\n assert all(i in efunc.parameters for i in (y.symbolic_min, y.symbolic_max))\n functions = FindSymbols().visit(efunc)\n assert len(functions) == nf\n assert all(i in efunc.parameters for i in functions)\n timeiters = [i for i in FindSymbols('basics').visit(efunc)\n if isinstance(i, Dimension) and i.is_Time]\n assert len(timeiters) == nt\n assert all(i in efunc.parameters for i in timeiters)\n assert len(efunc.parameters) == 4 + len(functions) + len(timeiters)\n\n # Check the loop nest structure\n trees = retrieve_iteration_tree(efunc)\n assert len(trees) == 1\n tree = trees[0]\n assert all(i.dim.name == j for i, j in zip(tree, nest))\n\n assert efunc.make_call()", "def _evaluate(self, w, x, y, z):\n if _isscalar(w):\n x_pos = max(min(np.searchsorted(self.x_list, x), self.x_n - 1), 1)\n y_pos = max(min(np.searchsorted(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(np.searchsorted(self.z_list, z), self.z_n - 1), 1)\n alpha = (x - self.x_list[x_pos - 1]) / (\n self.x_list[x_pos] - self.x_list[x_pos - 1]\n )\n beta = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n gamma = (z - self.z_list[z_pos - 1]) / (\n self.z_list[z_pos] - self.z_list[z_pos - 1]\n )\n f = (\n (1 - alpha)\n * (1 - beta)\n * (1 - gamma)\n * self.wInterpolators[x_pos - 1][y_pos - 1][z_pos - 1](w)\n + (1 - alpha)\n * (1 - beta)\n * gamma\n * self.wInterpolators[x_pos - 1][y_pos - 1][z_pos](w)\n + (1 - alpha)\n * beta\n * (1 - gamma)\n * self.wInterpolators[x_pos - 1][y_pos][z_pos - 1](w)\n + (1 - alpha)\n * beta\n * gamma\n * self.wInterpolators[x_pos - 1][y_pos][z_pos](w)\n + alpha\n * (1 - beta)\n * (1 - gamma)\n * self.wInterpolators[x_pos][y_pos - 1][z_pos - 1](w)\n + alpha\n * (1 - beta)\n * gamma\n * self.wInterpolators[x_pos][y_pos - 1][z_pos](w)\n + alpha\n * beta\n * (1 - gamma)\n * self.wInterpolators[x_pos][y_pos][z_pos - 1](w)\n + alpha * beta * gamma * self.wInterpolators[x_pos][y_pos][z_pos](w)\n )\n else:\n m = len(x)\n x_pos = np.searchsorted(self.x_list, x)\n x_pos[x_pos > self.x_n - 1] = self.x_n - 1\n y_pos = np.searchsorted(self.y_list, y)\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list, z)\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n z_pos[z_pos < 1] = 1\n f = np.zeros(m) + np.nan\n for i in range(1, self.x_n):\n for j in range(1, self.y_n):\n for k in range(1, self.z_n):\n c = np.logical_and(\n np.logical_and(i == x_pos, j == y_pos), k == z_pos\n )\n if np.any(c):\n alpha = (x[c] - self.x_list[i - 1]) / (\n self.x_list[i] - self.x_list[i - 1]\n )\n beta = (y[c] - self.y_list[j - 1]) / (\n self.y_list[j] - self.y_list[j - 1]\n )\n gamma = (z[c] - self.z_list[k - 1]) / (\n self.z_list[k] - self.z_list[k - 1]\n )\n f[c] = (\n (1 - alpha)\n * (1 - beta)\n * (1 - gamma)\n * self.wInterpolators[i - 1][j - 1][k - 1](w[c])\n + (1 - alpha)\n * (1 - beta)\n * gamma\n * self.wInterpolators[i - 1][j - 1][k](w[c])\n + (1 - alpha)\n * beta\n * (1 - gamma)\n * self.wInterpolators[i - 1][j][k - 1](w[c])\n + (1 - alpha)\n * beta\n * gamma\n * self.wInterpolators[i - 1][j][k](w[c])\n + alpha\n * (1 - beta)\n * (1 - gamma)\n * self.wInterpolators[i][j - 1][k - 1](w[c])\n + alpha\n * (1 - beta)\n * gamma\n * self.wInterpolators[i][j - 1][k](w[c])\n + alpha\n * beta\n * (1 - gamma)\n * self.wInterpolators[i][j][k - 1](w[c])\n + alpha\n * beta\n * gamma\n * self.wInterpolators[i][j][k](w[c])\n )\n return f", "def test_coord_preceding_fs(self):", "def TestFunc1(x):\r\n return 12*x[0]*x[0] + 4*x[1]*x[1] - 12*x[0]*x[1] + 2*x[1]", "def test_ex_2_9(self):\n\n compiler = Compiler()\n\n X = Variable()\n Y = Variable()\n query = Compound('p', Compound('f', X), Compound('h', Y, Compound('f', Atom('a'))), Y)\n query_reg_allocation = RegisterAllocation()\n query_instrs = compiler.compile_query_m1(query, query_reg_allocation)\n\n W = Variable()\n Z = Variable()\n program = Compound('p', Z, Compound('h', Z, W), Compound('f', W))\n # Because there is a shared register space, we reuse the query's register allocation to\n # force the program's registers into different slots.\n program_reg_allocation = query_reg_allocation # RegisterAllocation()\n program_instrs = compiler.compile_program_m1(program, program_reg_allocation)\n program_instrs = program_instrs[:-1] # last instruction is proceed; remove it\n\n wam = WAM()\n wam.load(None, query_instrs)\n wam.load(program.get_functor(), program_instrs)\n wam.run()\n\n aW = wam.deref_reg(program_reg_allocation[W])\n aX = wam.deref_reg(query_reg_allocation[X])\n aY = wam.deref_reg(query_reg_allocation[Y])\n aZ = wam.deref_reg(program_reg_allocation[Z])\n\n #print 'X reg:', query_reg_allocation.reg_allocation[X], 'X addr:', aX, 'X: ', wam.get_term_repr(aX)\n #print 'Y reg:', query_reg_allocation.reg_allocation[Y], 'Y addr:', aY, 'Y: ', wam.get_term_repr(aY)\n #print 'Z reg:', program_reg_allocation.reg_allocation[Z], 'Z addr:', aZ, 'Z: ', wam.get_term_repr(aZ)\n #print 'W reg:', program_reg_allocation.reg_allocation[W], 'W addr:', aW, 'W: ', wam.get_term_repr(aW)\n self.assertEqual(wam.get_term_repr(aW), 'f(a)')\n self.assertEqual(wam.get_term_repr(aX), 'f(a)')\n self.assertEqual(wam.get_term_repr(aY), 'f(f(a))')\n self.assertEqual(wam.get_term_repr(aZ), 'f(f(a))')", "def apply_symm_elems_to_index_xyz(symm_elems, index_xyz, points_abc):\n b_n_1, b_n_2, b_n_3 = symm_elems[0], symm_elems[1], symm_elems[2]\n b_d = symm_elems[3]\n r_11, r_12, r_13 = symm_elems[4], symm_elems[5], symm_elems[6]\n r_21, r_22, r_23 = symm_elems[7], symm_elems[8], symm_elems[9]\n r_31, r_32, r_33 = symm_elems[10], symm_elems[11], symm_elems[12]\n i_1, i_2, i_3 = index_xyz[0], index_xyz[1], index_xyz[2]\n \n n1, n2, n3 = points_abc[0], points_abc[1], points_abc[2]\n p_1, p_2, p_3 = n1//b_d, n2//b_d, n3//b_d\n \n ni_1 = numpy.mod(r_11*i_1 + r_12*i_2 + r_13*i_3 + b_n_1*p_1, n1)\n ni_2 = numpy.mod(r_21*i_1 + r_22*i_2 + r_23*i_3 + b_n_2*p_2, n2)\n ni_3 = numpy.mod(r_31*i_1 + r_32*i_2 + r_33*i_3 + b_n_3*p_3, n3)\n ni = numpy.stack([ni_1, ni_2, ni_3], axis=0)\n return ni", "def run_test(dim=3):\n\n traces = []\n\n for smoothing in range(10, 101, 10):\n pencilbeams = []\n num_sight_lines = 100\n\n # Construct our pencilbeams\n for ix in range(0, num_sight_lines+1):\n # Make impact parameters covering the full\n # particle in x\n x = ix / (1. * num_sight_lines) * smoothing\n \n pencilbeams.append(\n dict(x=x, y=0),\n )\n\n results = []\n for pencilbeam in pencilbeams:\n result = testsph(h=smoothing, dim=dim, **pencilbeam)\n results.append(result)\n\n # Integrate the pencilbeam weightings to find the full SPH weighting\n # This is the plane x-z from origin along +ve x-axis (sitting at y=0)\n particle_integral = integrate.trapz([x for x in results], [x['x'] for x in pencilbeams])\n \n # \"All smoothing lengths should integrate to the same value \"\n\n # We've sampled a quadrant in x-y and integrated entirely along z, so mulitply by 4\n print particle_integral * 4.\n\n traces.append(go.Scatter(y=[x for x in results], x=[y['x'] for y in pencilbeams]))\n\n # The mass of a particle should be the area under each of these curves(?)\n plot(traces)", "def test0421_2d_numerical_comparison_on_fprop_vs_np_batchsize1_mp(\n self,\n batch_size=1,\n num_features=16,\n height=45,\n width=64,\n alpha_fwd=0.99,\n alpha_bkw=0.99,\n itrs=16,\n ):\n # create inputs\n np_inputs = np.random.randn(batch_size, num_features, height, width) + .25\n\n tf.keras.backend.set_floatx('float16')\n\n self.template_numerical_comparison_on_vs_np(\n np_inputs,\n np_grad_out=None,\n axis=1,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n itrs=itrs,\n dtype=Policy('infer_float32_vars'),\n )", "def test_get_debug_values_success():\r\n\r\n prev_value = config.compute_test_value\r\n for mode in ['ignore', 'warn', 'raise']:\r\n\r\n try:\r\n config.compute_test_value = mode\r\n\r\n x = T.vector()\r\n x.tag.test_value = numpy.zeros((4,), dtype=config.floatX)\r\n y = numpy.zeros((5, 5))\r\n\r\n iters = 0\r\n\r\n for x_val, y_val in op.get_debug_values(x, y):\r\n\r\n assert x_val.shape == (4,)\r\n assert y_val.shape == (5, 5)\r\n\r\n iters += 1\r\n\r\n assert iters == 1\r\n\r\n finally:\r\n config.compute_test_value = prev_value", "def test_apply(buffer, window, out):\n buffer = np.array(buffer, dtype=np.float32)\n windows.apply(buffer, window)\n assert_almost_equal(buffer, np.array(out))", "def test_advinc_subtensor1():\r\n for shp in [(3, 3), (3, 3, 3)]:\r\n shared = gpuarray_shared_constructor\r\n xval = numpy.arange(numpy.prod(shp), dtype='float32').reshape(shp) + 1\r\n yval = numpy.empty((2,) + shp[1:], dtype='float32')\r\n yval[:] = 10\r\n x = shared(xval, name='x')\r\n y = tensor.tensor(dtype='float32',\r\n broadcastable=(False,) * len(shp),\r\n name='y')\r\n expr = tensor.advanced_inc_subtensor1(x, y, [0, 2])\r\n f = theano.function([y], expr, mode=mode_with_gpu)\r\n assert sum([isinstance(node.op, GpuAdvancedIncSubtensor1)\r\n for node in f.maker.fgraph.toposort()]) == 1\r\n rval = f(yval)\r\n rep = xval.copy()\r\n rep[[0, 2]] += yval\r\n assert numpy.allclose(rval, rep)", "def test_sum_pos_5() -> None:\n # Fourth step, 5th square.\n assert nth(sw.sum_walk(), 3) == 5", "def test031_1d_numerical_comparison_on_vs_np(\n self,\n batch_size=8,\n num_features=16,\n alpha_fwd=0.99,\n alpha_bkw=0.99,\n itrs=2,\n ):\n # create inputs\n np_inputs = np.random.randn(batch_size, num_features) + .25\n # instantiate gradient at the output\n np_grad_out = np.random.randn(batch_size, num_features) + .125\n\n self.template_numerical_comparison_on_vs_np(\n np_inputs,\n np_grad_out=np_grad_out,\n axis=1,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n itrs=itrs,\n )", "def transfer_state_in_buffer(n, gate_matrix, bits, expr_buffer, gate_buffer):\n idx = calc_einsum_idx(bits, n)\n expr_buffer.append(idx)\n gate_buffer.append(gate_matrix)", "def test_member_input_flags(self):\r\n\r\n if config.mode == 'FAST_COMPILE':\r\n return\r\n\r\n M = Module()\r\n M.x = T.dvector()\r\n M.y = T.dvector()\r\n xval= numpy.asarray([0, 0.5])\r\n M.f = Method([io.In(M.x,\r\n mutable=True,\r\n update=(M.x - M.y),\r\n value=xval)], M.x + M.y)\r\n m = M.make()\r\n m.y = numpy.asarray([1, 2])\r\n\r\n assert numpy.all(m.f(xval) == [1, 2.5])\r\n assert numpy.all(xval == [-1, -1.5])", "def test_elemwise_composite_support_code():\r\n X = tcn.shared_constructor(value=numpy.zeros((100, 10), dtype=\"float32\"),\r\n name='X')\r\n W = tcn.shared_constructor(value=numpy.zeros((10, 1), dtype=\"float32\"),\r\n name='W')\r\n U = T.dot(X, W)\r\n Y = tcn.shared_constructor(value=numpy.zeros((100, 1), dtype=\"float32\"),\r\n name='Y')\r\n P = T.exp(-(Y - U) ** 2)\r\n epsilon = numpy.asarray(0.001, dtype=\"float32\")\r\n NLL = -T.mean(T.log(P + epsilon)) # SupportCodeError\r\n G = theano.gradient.grad(NLL, wrt=[W])\r\n\r\n backup = theano.config.warn.identify_1pexp_bug\r\n theano.config.warn.identify_1pexp_bug = False\r\n try:\r\n f_grad = theano.function(inputs=[], outputs=G, mode=mode_with_gpu)\r\n finally:\r\n theano.config.warn.identify_1pexp_bug = backup\r\n f_grad()\r\n\r\n topo = f_grad.maker.fgraph.toposort()\r\n assert sum([isinstance(node.op, T.Elemwise) for node in topo]) == 1\r\n #I suspect this was failing in the original branch too\r\n assert sum([isinstance(node.op, tcn.GpuElemwise) for node in topo]) == 1", "def test_1d_1d(self):\n \n self.assertEqual(len(self.storage), 0)\n \n @interpolated(self.storage, max_distance=0.6)\n def func(x):\n return x**2\n \n a = func(1)\n self.assertEqual(len(self.storage), 1)\n self.assertEqual(a, 1**2)\n \n a = func(2)\n self.assertEqual(len(self.storage), 2)\n self.assertEqual(a, 2**2)\n \n a = func(1.5)\n self.assertEqual(len(self.storage), 2)\n self.assertAlmostEqual(a, 0.5*(1**2 + 2**2))", "def test_get_functions(sersic_2d_image,segm_and_cat):\n cat, segm, segm_deblend = segm_and_cat\n\n base_source = cat[0]\n\n source = base_source\n\n assert pf.get_source_position(source) == (base_source.maxval_xindex, base_source.maxval_yindex)\n assert pf.get_source_elong(source) == base_source.elongation.value\n assert pf.get_source_ellip(source) == base_source.ellipticity.value\n assert pf.get_source_theta(source) == base_source.orientation.to('rad').value\n\n x0, y0 = pf.get_source_position(source)\n ellip, theta = pf.get_source_ellip(source), pf.get_source_theta(source)\n\n assert np.round(pf.get_amplitude_at_r(200, sersic_2d_image, x0, y0 , ellip, theta), 6) == 0.036798", "def elemwise_checker(\n op, expected_f, gap=None, test_dtypes=None, grad_test=True, name=None, gap_grad=None\n):\n\n if test_dtypes is None:\n test_dtypes = sparse.all_dtypes\n\n class TestElemwise:\n def setup_method(self):\n super().setup_method()\n self.op = op\n self.expected_f = expected_f\n self.gap = gap\n if gap_grad is not None:\n self.gap_grad = gap_grad\n else:\n self.gap_grad = gap\n # Ensure the test's name is correct.\n assert eval(self.__class__.__name__) is self.__class__\n\n def test_op(self):\n for format in sparse.sparse_formats:\n for dtype in test_dtypes:\n if dtype == \"int8\" or dtype == \"uint8\":\n continue\n\n # When testing with unsigned integers,\n # we must check if the gap contains\n # negative numbers.\n if dtype.startswith(\"uint\"):\n if self.gap and len(self.gap) == 2 and self.gap[0] < 0:\n if self.gap[1] >= 1:\n self.gap = (0, self.gap[1])\n else:\n raise TypeError(\n \"Gap not suitable for\", dtype, self.__name__\n )\n\n variable, data = sparse_random_inputs(\n format, shape=(4, 7), out_dtype=dtype, gap=self.gap\n )\n\n f = aesara.function(variable, self.op(*variable))\n\n tested = f(*data)\n data = [m.toarray() for m in data]\n expected = self.expected_f(*data)\n\n assert tested.format == format\n tested = tested.toarray()\n\n try:\n utt.assert_allclose(expected, tested)\n except AssertionError:\n raise AssertionError(self.__name__)\n\n # Test with int8 as dtype\n # These tests are not in the loop for two reasons.\n # First, in recent version of numpy, when a numpy\n # function have int8 as input dtype, it returns a\n # float16 as output dtype. Since this does not provide\n # enough precision, we upcast the data before we apply the\n # function.\n # Second, the tolerance for the checkup in DebugMode\n # is too high.\n for dtype in [\"int8\", \"uint8\"]:\n if dtype in test_dtypes:\n if self.gap:\n domain = self.gap\n # When testing with unsigned integers,\n # we must check if the gap contains\n # negative numbers.\n if dtype == \"uint8\":\n if len(domain) == 2 and domain[0] < 0:\n if domain[1] >= 1:\n domain = (0, domain[1])\n else:\n raise TypeError(\n \"Gap not suitable for\", dtype, self.__name__\n )\n\n else:\n domain = (0, 5)\n\n variable, data = sparse_random_inputs(\n format, shape=(4, 7), out_dtype=dtype, gap=domain\n )\n\n f = aesara.function(variable, self.op(*variable))\n\n old_value = (\n tensor.math.float32_atol,\n tensor.math.float32_rtol,\n tensor.math.float64_atol,\n tensor.math.float64_rtol,\n )\n tensor.math.float32_atol = 1e-4\n tensor.math.float32_rtol = 1e-3\n tensor.math.float64_atol = 1e-3\n tensor.math.float64_rtol = 1e-4\n try:\n tested = f(*data)\n finally:\n (\n tensor.math.float32_atol,\n tensor.math.float32_rtol,\n tensor.math.float64_atol,\n tensor.math.float64_rtol,\n ) = old_value\n\n data = [m.toarray().astype(\"float32\") for m in data]\n expected = self.expected_f(*data)\n\n assert tested.format == format\n tested = tested.toarray()\n\n try:\n utt.assert_allclose(tested, expected, rtol=1e-2)\n except AssertionError:\n raise AssertionError(self.__name__)\n\n if grad_test:\n\n def test_grad(self):\n for format in sparse.sparse_formats:\n for dtype in sparse.float_dtypes:\n variable, data = sparse_random_inputs(\n format, shape=(4, 7), out_dtype=dtype, gap=self.gap_grad\n )\n\n verify_grad_sparse(self.op, data, structured=True)\n\n # Set proper class name to uniquely identify tests.\n # Note that it is important to run this code *outside* of the `Tester`\n # class itself, otherwise it will not work properly for some reason.\n if name is None:\n name = op.__name__.capitalize() + \"Tester\"\n TestElemwise.__name__ = name\n if hasattr(TestElemwise, \"__qualname__\"):\n TestElemwise.__qualname__ = name\n assert \"Roundhalftoeven\" not in TestElemwise.__name__\n\n return TestElemwise", "def structured_pow(x, y):\r\n # see decorator for function body\r", "def test_apply_flags():\n true_value = dqflags.pixel['HOT'] + dqflags.pixel['DO_NOT_USE']\n\n print(true_value)\n\n badmap = np.zeros((10, 10), dtype=np.int)\n true_map = np.zeros((10, 10), dtype=np.uint32)\n for i in range(10):\n badmap[i, i] = 1\n true_map[i, i] = true_value\n\n\n print(true_map)\n\n\n flag_names = ['HOT', 'DO_NOT_USE']\n pixmap = bpd.apply_flags(badmap, flag_names)\n\n\n print(pixmap)\n\n\n assert np.all(pixmap == true_map)", "def testNestedFunctionGradientCall(self):\n check_numerics_callback.enable_check_numerics()\n\n x = constant_op.constant(1.0 - 1e-8, dtype=dtypes.float32)\n\n @def_function.function\n def asinp1(x):\n # asin()'s gradient overflows at the value close to 1.0.\n return math_ops.asin(x) + 1.0\n\n @def_function.function\n def loss(x):\n return math_ops.square(asinp1(x))\n\n with backprop.GradientTape() as tape:\n tape.watch(x)\n y = loss(x)\n message = self._assertRaisesInvalidArgumentErrorAndGetMessage(\n lambda: self.evaluate(tape.gradient(y, x)))\n self.assertTrue(re.search(r\"gradient\", message))", "def test_verify():\n Lx = 10; Ly = 10; c = 1.0\n\n def I(x, y):\n return exp(-pow(x-Lx/2.0,2)/2.0 -pow(y-Ly/2.0,2)/2.0)\n def f(x, y, t):\n return sin(2*x) + y\n def bc(x, y, t):\n return sin(t)\n\n # use string formulas instead so also weave can be tested:\n # (need to transfer globals() so that vectorized versions work)\n I = StringFunction('exp(-pow(x-Lx/2.0,2)/2.0 - pow(y-Ly/2.0,2)/2.0)',\n independent_variables=('x', 'y'),\n Lx=Lx, Ly=Ly, globals=globals())\n f = StringFunction('sin(2*x) + y',\n independent_variables=('x', 'y', 't'),\n globals=globals())\n bc = StringFunction('sin(t)',\n independent_variables=('x', 'y', 't'),\n globals=globals())\n\n #nx = 15; ny = 10; tstop = 2\n nx = 4; ny = 3; tstop = 16\n verify_implementations(I, f, c, bc, Lx, Ly, nx, ny, tstop)", "def _set_bounds(b, x, n):\n for j in range(1, n - 1):\n for i in range(1, n - 1):\n x[index_of(i, j, 0, n)] = -x[index_of(i, j, 1, n)] if b == 3 else x[index_of(i, j, 1, n)]\n x[index_of(i, j, 0, n - 1)] = -x[index_of(i, j, 1, n - 2)] if b == 3 else x[index_of(i, j, 1, n - 2)]\n for k in range(1, n - 1):\n for i in range(1, n - 1):\n x[index_of(i, 0, k, n)] = -x[index_of(i, 1, k, n)] if b == 2 else x[index_of(i, 1, k, n)]\n x[index_of(i, n - 1, 0, n - 1)] = -x[index_of(i, n - 2, k, n - 2)] if b == 2 else x[\n index_of(i, n - 2, k, n - 2)]\n for k in range(1, n - 1):\n for j in range(1, n - 1):\n x[index_of(0, j, k, n)] = -x[index_of(1, j, k, n)] if b == 1 else x[index_of(1, j, k, n)]\n x[index_of(n - 1, j, k, n - 1)] = -x[index_of(n - 2, j, k, n)] if b == 1 else x[\n index_of(n - 2, j, k, n)]\n\n x[index_of(0, 0, 0, n)] = 1 / 3 * (x[index_of(1, 0, 0, n)] + x[index_of(0, 1, 0, n)] + x[index_of(0, 0, 1, n)])\n x[index_of(0, n - 1, 0, n)] = 1 / 3 * (\n x[index_of(1, n - 1, 0, n)] + x[index_of(0, n - 2, 0, n)] + x[index_of(0, n - 1, 1, n)])\n x[index_of(0, 0, n - 1, n)] = 1 / 3 * (\n x[index_of(1, 0, n - 1, n)] + x[index_of(0, 1, n - 1, n)] + x[index_of(0, 0, n - 2, n)])\n x[index_of(0, n - 1, n - 1, n)] = 1 / 3 * (\n x[index_of(1, n - 1, n - 1, n)] + x[index_of(0, n - 2, n - 1, n)] + x[index_of(0, n - 1, n - 2, n)])\n x[index_of(n - 1, 0, 0, n)] = 1 / 3 * (\n x[index_of(n - 2, 0, 0, n)] + x[index_of(n - 1, 1, 0, n)] + x[index_of(n - 1, 0, 1, n)])\n x[index_of(n - 1, n - 1, 0, n)] = 1 / 3 * (\n x[index_of(n - 2, n - 1, 0, n)] + x[index_of(n - 1, n - 2, 0, n)] + x[index_of(n - 1, n - 1, 1, n)])\n x[index_of(n - 1, 0, n - 1, n)] = 1 / 3 * (\n x[index_of(n - 2, 0, n - 1, n)] + x[index_of(n - 1, 1, n - 1, n)] + x[index_of(n - 1, 0, n - 2, n)])\n x[index_of(n - 1, n - 1, n - 1, n)] = 1 / 3 * (\n x[index_of(n - 2, n - 1, n - 1, n)] + x[index_of(n - 1, n - 2, n - 1, n)] + x[\n index_of(n - 1, n - 1, n - 2, n)])", "def TestFunc2(x):\r\n return 10*(-0.02*x[0] + 0.5*x[0]*x[0] + x[1])**2 \\\r\n + 128*(-0.02*x[0] + 0.5*x[0]*x[0] - x[1]/4) \\\r\n - (8e-5)*x[0]", "def test0321_2d_numerical_comparison_on_vs_np_batchsize1(\n self,\n batch_size=1,\n num_features=16,\n height=45,\n width=64,\n alpha_fwd=0.99,\n alpha_bkw=0.99,\n itrs=16,\n ):\n # create inputs\n np_inputs = np.random.randn(batch_size, num_features, height, width) + .25\n # instantiate gradient at the output\n np_grad_out = np.random.randn(batch_size, num_features, height, width) + .125\n\n self.template_numerical_comparison_on_vs_np(\n np_inputs,\n np_grad_out=np_grad_out,\n axis=1,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n itrs=itrs,\n )", "def test_independent_parameter(self, mocker):\r\n spy = mocker.spy(qml.gradients.finite_difference, \"generate_shifted_tapes\")\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RX(0.543, wires=[0])\r\n qml.RY(-0.654, wires=[1])\r\n qml.expval(qml.PauliZ(0))\r\n\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n tapes, fn = finite_diff(tape)\r\n res = fn(dev.batch_execute(tapes))\r\n assert res.shape == (1, 2)\r\n\r\n assert len(spy.call_args_list) == 1\r\n\r\n # only called for parameter 0\r\n assert spy.call_args[0][0:2] == (tape, 0)", "def test_deep(self, expr, result, mode):\n i, j, k, l = dimify('i j k l')\n a = symbol(name='a', dimensions=(i, j, k, l), value=2., mode=mode)\n b = symbol(name='b', dimensions=(j, k), value=3., mode=mode)\n fa = a.base.function if mode == 'indexed' else a\n fb = b.base.function if mode == 'indexed' else b\n\n eqn = eval(expr)\n Operator(eqn)(fa, fb)\n assert np.allclose(fa.data, result, rtol=1e-12)", "def test_simple_3d(self):\r\n a = tt.dtensor3()\r\n increment = tt.dscalar()\r\n sl1 = slice(None)\r\n sl2_end = tt.lscalar()\r\n sl2 = slice(sl2_end)\r\n sl3 = 2\r\n\r\n for do_set in [True, False]:\r\n print \"Set\", do_set\r\n\r\n if do_set:\r\n resut = tt.set_subtensor(a[sl1, sl3, sl2], increment)\r\n else:\r\n resut = tt.inc_subtensor(a[sl1, sl3, sl2], increment)\r\n\r\n f = theano.function([a, increment, sl2_end], resut)\r\n\r\n val_a = numpy.ones((5, 3, 4))\r\n val_inc = 2.3\r\n val_sl2_end = 2\r\n\r\n expected_result = numpy.copy(val_a)\r\n result = f(val_a, val_inc, val_sl2_end)\r\n\r\n if do_set:\r\n expected_result[:, sl3, :val_sl2_end] = val_inc\r\n else:\r\n expected_result[:, sl3, :val_sl2_end] += val_inc\r\n\r\n utt.assert_allclose(result, expected_result)", "def test_annular_fpm():\n\n # test some semi-random cases - is the array size as expected? \n assert masks.annular_fpm(3, 2, np.inf).shape == (3*2*2, 3*2*2)\n assert masks.annular_fpm(3, 5, np.inf).shape == (3*5*2, 3*5*2)\n assert masks.annular_fpm(3, 5, 10).shape == (3*10*2, 3*10*2)\n assert masks.annular_fpm(3, 5, 11).shape == (3*11*2, 3*11*2)\n\n # test some pixel values are as expected. \n mask = masks.annular_fpm(3, 2, 10)\n assert mask[0,0]==0 # corner is black\n assert mask[5*10, 5*10]==1 # in between is white\n assert mask[3*10, 3*10]==0 # center is black", "def attest_ind (a, b, dimension=None, printit=0, name1='Samp1', name2='Samp2',writemode='a'):\r\n if dimension == None:\r\n a = N.ravel(a)\r\n b = N.ravel(b)\r\n dimension = 0\r\n x1 = amean(a,dimension)\r\n x2 = amean(b,dimension)\r\n v1 = avar(a,dimension)\r\n v2 = avar(b,dimension)\r\n n1 = a.shape[dimension]\r\n n2 = b.shape[dimension]\r\n df = n1+n2-2\r\n svar = ((n1-1)*v1+(n2-1)*v2) / float(df)\r\n zerodivproblem = N.equal(svar,0)\r\n svar = N.where(zerodivproblem,1,svar) # avoid zero-division in 1st place\r\n t = (x1-x2)/N.sqrt(svar*(1.0/n1 + 1.0/n2)) # N-D COMPUTATION HERE!!!!!!\r\n t = N.where(zerodivproblem,1.0,t) # replace NaN/wrong t-values with 1.0\r\n probs = abetai(0.5*df,0.5,float(df)/(df+t*t))\r\n\r\n if type(t) == N.ndarray:\r\n probs = N.reshape(probs,t.shape)\r\n if probs.shape == (1,):\r\n probs = probs[0]\r\n \r\n if printit <> 0:\r\n if type(t) == N.ndarray:\r\n t = t[0]\r\n if type(probs) == N.ndarray:\r\n probs = probs[0]\r\n statname = 'Independent samples T-test.'\r\n outputpairedstats(printit,writemode,\r\n name1,n1,x1,v1,N.minimum.reduce(N.ravel(a)),\r\n N.maximum.reduce(N.ravel(a)),\r\n name2,n2,x2,v2,N.minimum.reduce(N.ravel(b)),\r\n N.maximum.reduce(N.ravel(b)),\r\n statname,t,probs)\r\n return\r\n return t, probs", "def test_ex_2_3(self):\n\n wam = WAM()\n wam.execute(self.fig_2_3_instrs)\n aW = wam.deref_reg(5)\n aZ = wam.deref_reg(2)\n wam.execute(self.fig_2_4_instrs)\n aX = wam.deref_reg(5)\n aY = wam.deref_reg(4)\n self.assertEqual(wam.get_term_repr(aW), 'f(a)')\n self.assertEqual(wam.get_term_repr(aX), 'f(a)')\n self.assertEqual(wam.get_term_repr(aY), 'f(f(a))')\n self.assertEqual(wam.get_term_repr(aZ), 'f(f(a))')", "def test0221_2d_numerical_comparison_on_fprop_vs_np_batchsize1(\n self,\n batch_size=1,\n num_features=16,\n height=45,\n width=64,\n alpha_fwd=0.99,\n alpha_bkw=0.99,\n itrs=16,\n ):\n # create inputs\n np_inputs = np.random.randn(batch_size, num_features, height, width) + .25\n\n self.template_numerical_comparison_on_vs_np(\n np_inputs,\n np_grad_out=None,\n axis=1,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n itrs=itrs,\n )", "def test_local_dot22_to_dot22scalar():\r\n A = T.dmatrix()\r\n mode = theano.compile.mode.get_default_mode()\r\n opt = theano.tensor.opt.in2out(\r\n theano.tensor.blas.local_dot22_to_dot22scalar)\r\n mode = mode.__class__(optimizer=opt)\r\n\r\n x = T.dscalar()\r\n y = T.dscalar()\r\n z = T.dscalar()\r\n # make sure to don't have dimshuffle as we don't opt those cases\r\n m = T.dmatrix()\r\n r = T.drow()\r\n for idx, node in enumerate([\r\n #Old working cases\r\n T.mul(_dot22(A, A), x),\r\n T.mul(_dot22(A, A), x, y),\r\n T.mul(_dot22(A, A), x, r),\r\n T.mul(_dot22(A, A), m, x),\r\n T.mul(_dot22(A, A), x, m),\r\n T.mul(_dot22(A, A), x, (m * y)),\r\n T.mul(_dot22(A, A), (m * y), x),\r\n T.mul(_dot22(A, A), x, (r * y)),\r\n T.mul(_dot22(A, A), (r * y), x),\r\n T.mul(_dot22(A, A), (x * y), (m * x)),\r\n T.mul(_dot22(A, A), (r * y), (y * x)),\r\n\r\n # Case that was raising an assert that is fixed in gh-1507\r\n T.mul(_dot22(A, A), (m * y), m),\r\n T.mul(_dot22(A, A), m, (m * y)),\r\n T.mul(_dot22(A, A), (r * y), (m * x)),\r\n\r\n # assert fixed in gh-1507 and opt case added in gh-1515\r\n T.mul(_dot22(A, A), (m * y * z), m),\r\n T.mul(_dot22(A, A), m, (m * y * z)),\r\n\r\n # Opt case added in gh-1515\r\n T.mul(_dot22(A, A), T.mul(m, y, z), m),\r\n T.mul(_dot22(A, A), m, T.mul(m, y, z)),\r\n\r\n #Case that opt later in gh-1515\r\n T.mul(_dot22(A, A), (r * m), (m * x)),\r\n ]):\r\n node2 = theano.tensor.blas.local_dot22_to_dot22scalar.transform(\r\n node.owner)\r\n assert node2\r\n f = theano.function([x, y, z, m, r, A], node,\r\n mode=mode, on_unused_input='ignore')\r\n f(.1, .2, .3, [[1, 2], [3, 4]], [[5, 6]], [[7, 8], [9, 10]])", "def mem_update(ops, x, mem, spike):\n #print(ops(x).size())\n #print(spike.size())\n mem = mem * decay * (1. - spike) + ops(x)\n spike = act_fun(mem) # act_fun : approximation firing function\n return mem, spike", "def test_grad_vector(func, motion, optimized, preserve_result, x):\n utils.test_reverse_array(func, motion, optimized, preserve_result, x)", "def test07(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n c, d = bcolz.carray(a, rootdir=self.rootdir), b\n cr = c[\"a + 2 * d - 3 > 0\"]\n nr = a[(a + 2 * b - 3) > 0]\n # print \"ca[expr] ->\", cr\n # print \"numpy ->\", nr\n assert_array_equal(cr[:], nr, \"carray[expr] does not work correctly\")", "def test_matrix22(gridsize=50):\n\n v1 = vec2(3,0)\n v2 = vec2(0,3)\n\n #rotate 45 degrees \n m22 = matrix22()\n m22.from_euler(45)\n\n # make a second matrix, also 45 degrees, should give us 90 total \n m22_2 = matrix22()\n m22_2.from_euler(45)\n m22 = m22_2 * m22\n\n # mutliply a vector by the matrix \n v3 = m22 * v2 \n\n fb = pixel_op() \n fb.create_buffer(800, 800)\n fb.graticule(gridsize)\n \n pts = [ (0,0), (0,1), (2,1), (0,2) ]\n #bloody_simple_2drender('2d_rotation.png', pts=pts, gridsize=50, pfb=fb)\n\n vecs = [v2,v3]\n bloody_simple_2drender('2d_rotation.png', vecs=vecs, gridsize=50, pfb=fb)\n\n #rotate the points by matrix multiplication \n pts = m22.batch_mult_pts(pts) \n bloody_simple_2drender('2d_rotation.png', pts=pts, gridsize=50, pfb=fb)\n fb.save('2d_rotation.png')", "def loc_eval(x, b):\r\n loc_est = 0\r\n for i in enumerate(b): loc_est+=i[1]*(x**i[0])\r\n return(loc_est)" ]
[ "0.70109046", "0.5731927", "0.5725207", "0.5534058", "0.5485873", "0.54508567", "0.5420271", "0.5402067", "0.53938526", "0.5361206", "0.53606236", "0.5359261", "0.5359202", "0.5320887", "0.5293664", "0.5291382", "0.5265721", "0.52385396", "0.523383", "0.52290106", "0.52282166", "0.52210116", "0.52201825", "0.52003807", "0.52002525", "0.51925", "0.5190279", "0.517582", "0.5171569", "0.5146713", "0.51125073", "0.51123774", "0.50978607", "0.50955325", "0.5090912", "0.50880325", "0.50740653", "0.5071987", "0.50706035", "0.5070082", "0.5057871", "0.50481606", "0.5044593", "0.50376034", "0.50371355", "0.5036854", "0.50213027", "0.50172997", "0.50147253", "0.50146693", "0.500944", "0.5008418", "0.5004359", "0.50037366", "0.50023204", "0.5002317", "0.5001583", "0.49806282", "0.49805528", "0.49785385", "0.4961971", "0.4958011", "0.49392173", "0.4914446", "0.49130878", "0.49093792", "0.4898081", "0.48907334", "0.48811746", "0.48791227", "0.48775607", "0.4875819", "0.48648772", "0.4864752", "0.48561007", "0.48539585", "0.48535743", "0.48447603", "0.48419723", "0.48402712", "0.48342207", "0.48335135", "0.4832914", "0.48265654", "0.4824387", "0.4822452", "0.48208398", "0.4820327", "0.48199952", "0.48177442", "0.48174328", "0.48074389", "0.4804056", "0.48031142", "0.48018813", "0.4797731", "0.4794664", "0.47879082", "0.4786367", "0.47737578" ]
0.5903518
1
You can override this method if you want to change the format of outputs (e.g., storing gradients)
def update_output(self, ): input_ids, outputs, grads, adv_tokens = self.batch_output probs = softmax(outputs, dim=-1) probs, labels = torch.max(probs, dim=-1) tokens = [ self.tokenizer.convert_ids_to_tokens(input_ids_) for input_ids_ in input_ids ] embedding_grads = grads.sum(dim=2) # norm for each sequence norms = torch.norm(embedding_grads, dim=1, p=2) # need check hyperparameter # normalizing for i, norm in enumerate(norms): embedding_grads[i] = torch.abs(embedding_grads[i]) / norm batch_output = [] # check probs, labels shape labels = torch.reshape(labels, (1, -1)) probs = torch.reshape(probs, (1, -1)) iterator = zip(tokens, probs, embedding_grads, labels) for example_tokens, example_prob, example_grad, example_label in iterator: example_dict = dict() # as we do it by batches we has a padding so we need to remove it example_tokens = [t for t in example_tokens if t != self.tokenizer.pad_token] example_dict['tokens'] = example_tokens example_dict['grad'] = example_grad.cpu().tolist()[:len(example_tokens)] example_dict['label'] = example_label.cpu().tolist()[:len(example_tokens)] # example_label.item() example_dict['prob'] = example_prob.cpu().tolist()[:len(example_tokens)] # example_prob.item() batch_output.append(example_dict) return batch_output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def out(self, inputs):", "def _save_grad_output(self, mod, grad_input, grad_output):\n if mod.training:\n self.state[mod][\"gy\"] = grad_output[0] * grad_output[0].size(0)", "def _output_update(self):\n self._outputtype = self.inputs.outputtype", "def outputs(self):\n return super().outputs", "def outputs(self):\n return super().outputs", "def outputs(self):\n return super().outputs", "def outputs(self):\n return super().outputs", "def format_model_output(self, output, batch_size=1):\r\n return output", "def _model_output(inputs, data_format):\n if data_format == 'channels_first':\n return tf.transpose(a=inputs, perm=[0, 2, 3, 1])\n else:\n return inputs", "def output_shape(self):\n raise NotImplementedError", "def outputs(self):\n pass", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n if self.incoming_shape == self.scale_size:\n self.out = incoming\n else:\n self.out = resize2d(incoming, size=self.scale_size, method=self.method,\n align_corners=self.align_corners)\n if self.method_name == 'AREA':\n self.out = tf.stop_gradient(self.out)\n \n return self.out", "def gradient(self, node, output_grad):\r\n \"\"\"TODO: Your code here\"\"\"\r\n return [ output_grad / node.inputs[0] ]", "def gradient(self, node, output_grad):\r\n return [output_grad]\r\n \"\"\"higher accuracy notice notice here\"\"\"", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n self.out = tf.stop_gradient(incoming)\n return self.out", "def calculate_output(self):", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n self.out = incoming[..., 0::3] * 0.299 + incoming[..., 1::3] * 0.587 + incoming[..., 2::3] * 0.114\n return self.out", "def convert_outputs(self):\n self.out('relaxed_structure', self.ctx.workchain.outputs.output_structure)\n self.out('total_energy', get_total_energy(self.ctx.workchain.outputs.output_parameters))\n self.out('forces', get_forces_from_trajectory(self.ctx.workchain.outputs.output_trajectory))\n self.out('stress', get_stress_from_trajectory(self.ctx.workchain.outputs.output_trajectory))", "def outputs(self, inputs):\n return inputs", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n self.out = tf.reshape(incoming, self.shape)\n \n return self.out", "def gradient(self, node, output_grad):\r\n return [output_grad]", "def gradient(self, node, output_grad):\r\n return [output_grad]", "def get_output_shape(self):\n return []", "def _create_outputs(self) -> ComponentOutputs:\n raise NotImplementedError", "def __call__(self, gradient):\n audio_out = self.modem.convert_data_to_audio(gradient.flatten())\n decoded_gradients = self.modem.convert_audio_to_floats(audio_out)\n\n # if you want to regret being alive,\n # self.stream.write(audio_out.tobytes())\n\n return decoded_gradients.reshape(gradient.shape)", "def gradient(self, inputs):\n raise NotImplementedError", "def outputs_convert_hook(\n self,\n outputs: Any,\n ) -> Any:\n outputs = self._maybe_mod_outputs_dtype_transform(outputs)\n return outputs", "def gradient(self, node, output_grad):\n return [output_grad]", "def output_layer(self, features, **kwargs):\n raise NotImplementedError", "def output_layer(self, features, **kwargs):\n raise NotImplementedError", "def _update_output_type(self):\n pass", "def add_output_ops(self, graph, output):\n return output", "def gradient(self, node, output_grad):\r\n \"\"\"TODO: Your code here\"\"\"\r\n return [output_grad * exp(node.inputs[0])]", "def gradient(self, node, output_grad):\r\n \"\"\"TODO: Your code here\"\"\"\r\n return [output_grad / node.const_attr ]", "def output_data(self):\n pass", "def result(self) -> Dict[str, tf.Tensor]:\n return super().result()", "def result(self) -> Dict[str, tf.Tensor]:\n return super().result()", "def gradient(self, node, output_grad):\r\n return [ - output_grad]", "def _format(self, state):\n x = state\n if not isinstance(x, torch.Tensor):\n x = torch.tensor(x,\n device=self.device,\n dtype=torch.float32)\n x = x.unsqueeze(0)\n return x", "def gradient(self, node, output_grad):\r\n \"\"\"TODO: Your code here\"\"\"\r\n return [output_grad * node.const_attr ]", "def gradient(self, node, output_grad):\r\n \"\"\"TODO: Your code here\"\"\"\r\n hahaha233 = MatMulOp()\r\n return [ hahaha233( output_grad, node.inputs[1], False , True) , hahaha233( node.inputs[0] , output_grad , True , False ) ]\r\n #return [output_grad * node.inputs[1] , output_grad * node.inputs[0] ]\r", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n self.out = incoming * self.factor\n return self.out", "def interpret_output(self, batch_output):\n raise NotImplementedError", "def _generate_output(self):\n raise NotImplementedError()", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incomings = [incoming(prev_layers=prev_layers, **kwargs) for incoming in self.incomings]\n with tf.variable_scope(self.layer_scope):\n self.out = self.a(tf.concat(axis=len(self.incoming_shapes[0]) - 1, values=incomings))\n \n return self.out", "def __init__(self, inputs=[]):\n self.inputs = inputs # input_list <- C, Java <- 匈牙利命名法 -> Python 特别不建议\n # self.outputs = outputs # output_list\n self.value = None\n self.outputs = []\n self.gradients = {}\n\n for node in self.inputs:\n node.outputs.append(self) # build a connection relationship", "def gradient(self, node, output_grad):\r\n return [reshape_op(output_grad , get_shape_op(node.inputs[0]))]", "def gradient(self, node, output_grad):\n \"\"\"TODO: Your code here\"\"\"\n return [output_grad * node.const_attr]", "def gradients(self):\n return {}", "def output(self, inputs):\n self._in_j = self._input(inputs) #Previous weighted inputs\n return self._g(self._in_j)", "def _convert_raw_outputs(self, raw_output):\n outputs = [\n np.array(raw_output.getLayerFp16(self._output_layers[i]),\n dtype=np.float32).reshape((1, -1) + self._output_shape)\n for i in range(len(self._output_layers))\n ]\n return outputs", "def gradient(self, node, output_grad):\r\n \"\"\"TODO: Your code here\"\"\"\r\n return [ - output_grad * node.const_attr / node.inputs[0] / node.inputs[0] ]", "def get_outputs(self):\n raise NotImplementedError", "def gradient(self, node, output_grad):\r\n return [reshape_op(output_grad , get_shape_op(node.inputs[0])), zeroslike_op(node.inputs[1])]", "def GraphFn(self, inp):\n tensor = inp * 2.0\n tensor = array_ops.reshape(tensor, self.tensor_shapes[1])\n tensor = tensor + 3.0\n tensor = array_ops.reshape(tensor, self.tensor_shapes[2])\n tensor = tensor * 4.0\n tensor = array_ops.reshape(tensor, self.tensor_shapes[3])\n tensor += tensor + 5.0\n return array_ops.identity(tensor, name='output_0')", "def gradient(self, node, output_grad):\r\n \"\"\"TODO: Your code here\"\"\"\r\n return [auto_sum_op(output_grad * node.inputs[1] , get_shape_op(node.inputs[0])), auto_sum_op(output_grad * node.inputs[0] , get_shape_op(node.inputs[1]))]", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n \n if self not in prev_layers:\n prev_layers += [self]\n self.out = tf.tile(self.incoming, multiples=self.multiples)\n \n return self.out", "def set_output_shape(self):\n self.output_shape = (reduce(mul, self.input_shape),)", "def gradient(self, node, output_grad):\n return [output_grad, output_grad]", "def gradient(self, node, output_grad):\r\n \"\"\"TODO: Your code here\"\"\"\r\n return [auto_sum_op(output_grad / node.inputs[1] ,get_shape_op(node.inputs[0])), auto_sum_op(-output_grad * node.inputs[0] / node.inputs[1] / node.inputs[1] , get_shape_op(node.inputs[1]) ) ]", "def layer_output(self, layer_id, example_id=0, batch_id=0, use_val=False):\n raise Exception(\n \"currently_unsupported: layer_output method is not yet supported for \"\n + \"graph neural networks in ktrain\"\n )", "def layer_output(self, layer_id, example_id=0, batch_id=0, use_val=False):\n raise Exception(\n \"currently_unsupported: layer_output method is not yet supported for \"\n + \"graph neural networks in ktrain\"\n )", "def get_output(self, prev_layers=None, **kwargs):\n \n noise = self.noisefct(shape=tf.shape(self.incoming()), **self.noiseparams)\n \n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n if self.backprop_noise:\n out = incoming * noise\n else:\n out = incoming * tf.stop_gradient(noise)\n self.out = self.a(out)\n \n return self.out", "def output_tensors(self):\r\n return self._output_tensors", "def outputDataType(self):\n raise NotImplementedError()", "def output(self, input, in_features, out_features,reuse=False):\n # with tf.variable_scope(self.name):\n # print('f'*20,input.get_shape().as_list(),in_features,out_features)\n w=self._create_weight([self.cnn_size,self.cnn_size,in_features,out_features],name='Wfn')\n out=self._conv2d(input,w,[1, self.cnn_stride, self.cnn_stride, 1],pre_name='convfn')\n return out", "def gradient(self, node, output_grad):\r\n #return [output_grad]\r\n return [broadcast_to(output_grad,get_shape_op(node.inputs[0]),node.const_attr)]", "def forward(self, outputs: Dict[str, Any], labels: Dict[str, Any]):\n utils.check_condition(self.output_name in outputs,\n \"output '%s' not found. Loss requires this output key\" % self.output_name)\n utils.check_condition(self.label_name in labels,\n \"label '%s' not found. Loss requires this label key\" % self.output_name)\n output = outputs[self.output_name]\n label = labels[self.label_name]\n return super().forward(output.astype(label, copy=False), label)", "def set_output_shape(self):\n self.output_shape = ((self.input_shape[0] // self.stride[0],\n self.input_shape[1] // self.stride[1],\n self.input_shape[2]\n ))", "def compute_output(self):\n x, y = self.input_nodes\n print(x.name, y.name)\n self.output_value = backend.dot(x.output_value, y.output_value)\n return self.output_value", "def _store_feats(layer, inp, output):\n _model_feats.append(output.cpu().numpy())", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is not None:\n self.prev_layers.extend(prev_layers)\n \n if self not in self.prev_layers:\n self.prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n self.out = tf.layers.batch_normalization(incoming, axis=self.axis, training=self.training,\n name=self.name)\n \n return self.out", "def __set_outputs__(self):\n self.__set_in_out_var__(None, 1)", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n if (len(incoming.shape) > 2 and self.flatten_input) or (len(incoming.shape) > 3):\n # Flatten all but first dimension (e.g. flat seq_pos and features)\n X = tf.reshape(incoming, self.incoming_shape)\n else:\n X = incoming\n net = dot_product(X, self.W)\n if self.b is not None:\n net += self.b\n self.out = self.a(net)\n \n return self.out", "def TensorRepresentations(self) -> tensor_adapter.TensorRepresentations:", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n self.out = self.a(incoming)\n \n return self.out", "def update_output(self, latent_mat, weight_mat, y_list):", "def to_tensor(self): \n raise NotImplementedError", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n self.out = self.a(conv2d_transpose2d(incoming, W=self.W, output_shape=self.output_shape,\n strides=self.strides, padding=self.padding,\n data_format=self.data_format) + self.b)\n return self.out", "def test_param_to_gradient(self):\n pass", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n self.out = avgpool2D(incoming, ksize=self.ksize, strides=self.strides, padding=self.padding,\n data_format=self.data_format)\n return self.out", "def inputs(self):\n pass", "def gradient(self, node, output_grad):\r\n return [relu_op(output_grad, node.inputs[1]) , zeroslike_op(node.inputs[1])]\r\n #assert True\r", "def outputs(self):\r\n return self._outputs", "def __generate_output_data(self):\n if not len(self.output_data) == 0:\n return\n try:\n self.output_data = s.load(open('output/output_data.p', 'rb'))\n self.class_indices = s.load(open('output/class_indices.p', 'rb'))\n if not self.classes_to_visualise == None:\n self.__filter_output_data(self.classes_to_visualise)\n except:\n self.output_data = generate_output_for_test_data(image_data=self.image_data,\n binary_output=self.binary_output) if self.testing else generate_output_for_train_data(\n image_data=self.image_data, binary_output=self.binary_output)\n self.class_indices = get_all_class_indices(training=False) if self.testing else get_all_class_indices()\n if not self.classes_to_visualise == None:\n self.__filter_output_data(self.classes_to_visualise)\n s.dump([out.tolist() for out in self.output_data], open('output/output_data.p', 'wb'))\n s.dump(self.class_indices, open('output/class_indices.p', 'wb'))\n\n self.legend = get_class_names_for_class_indices(list(set(sorted(self.class_indices))))", "def gradient(self, node, output_grad):\r\n #return [output_grad]\r\n return [broadcast_mean_to(output_grad,get_shape_op(node.inputs[0]),node.const_attr)]", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n \n if self not in prev_layers:\n prev_layers += [self]\n incomings = [incoming(prev_layers=prev_layers, **kwargs) for incoming in self.incomings]\n with tf.variable_scope(self.layer_scope):\n if np.all([i_s == self.incoming_shapes[0] for i_s in self.incoming_shapes]):\n out = tf.add_n(incomings)\n else:\n out = incomings[0]\n shape_len = len(out.shape)\n for incoming in incomings[1:]:\n while len(incoming.shape) < shape_len:\n incoming = tf.expand_dims(incoming, axis=-2)\n out += incoming\n self.out = self.a(out)\n return self.out", "def inputs(self):\n return NotImplementedError", "def build_outputs(self, **inputs):\n print(\"Building all outputs, \", self.name)\n# invscale, _ = self.build_output('invscale', **inputs)\n# loc, _ = self.build_output('loc', invscale=invscale, **inputs)\n# samp, _ = self.build_output('main', invscale=invscale, loc=loc)\n self.build_output('invscale', **inputs)\n self.build_output('loc', **inputs)\n self.build_output('main', **inputs)", "def gradient(self, node, output_grad):\r\n return [auto_broadcast_op(output_grad, get_shape_op(node.inputs[0])) , zeroslike_op(node.inputs[1])]\r\n #assert True\r", "def model_output(model, t, s, i):\n return 0, 0, 0, 0", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n \n if self not in prev_layers:\n prev_layers += [self]\n incomings = [incoming(prev_layers=prev_layers, **kwargs) for incoming in self.incomings]\n with tf.variable_scope(self.layer_scope):\n self.out = self.a(tf.add_n(incomings) / len(incomings))\n \n return self.out", "def get_output(self):\n raise NotImplementedError", "def set_outputs_from_pairs(self, new_outputs: Iterable[Tuple[tf.DType,\n tf.shape]]):\n self._outputs = []\n i = 0\n for (dtype, shape) in new_outputs:\n self._outputs.append(tensor.Tensor(self, i, dtype, shape))\n i += 1\n self._graph.increment_version_counter() # Just in case", "def op_output_values(self):\n return self.solid_output_values", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.output_shape", "def output(self):\n # print \"Neuron output\"\n\n if self.output_cache is not None:\n # print self, \"returning from cache\"\n return self.output_cache\n\n self.inputs_cache = []\n\n sum = 0\n for input_edge in self.inputs:\n input = input_edge.from_.output()\n self.inputs_cache.append(input)\n sum += input * input_edge.w\n\n self.output_cache = sigmoid(sum)\n # print \"node output:\", self.output_cache, sum\n return self.output_cache" ]
[ "0.6842611", "0.63906467", "0.62235004", "0.62234104", "0.62234104", "0.62234104", "0.62234104", "0.6200916", "0.61630815", "0.6138572", "0.6102943", "0.6075281", "0.6048927", "0.60099345", "0.60045785", "0.59973353", "0.5995878", "0.59723157", "0.5926936", "0.5917695", "0.5914432", "0.5914432", "0.58857656", "0.58500564", "0.5824264", "0.5800219", "0.57999325", "0.57863903", "0.5773613", "0.5773613", "0.5773016", "0.57678294", "0.57665503", "0.57539237", "0.5750291", "0.57384825", "0.57384825", "0.57310355", "0.5722749", "0.57198733", "0.5712609", "0.5706907", "0.5706432", "0.5700613", "0.56768346", "0.5665394", "0.5658785", "0.56586784", "0.5657553", "0.56450546", "0.5643102", "0.5642493", "0.56347245", "0.5632942", "0.5632138", "0.5629101", "0.5626614", "0.56186134", "0.5615739", "0.560224", "0.56013536", "0.56013536", "0.55942726", "0.55928427", "0.5584313", "0.5572084", "0.5567754", "0.5565766", "0.5564226", "0.5562288", "0.55591583", "0.55560386", "0.55517167", "0.5544731", "0.5535732", "0.55347335", "0.5533216", "0.5531994", "0.5528886", "0.5525778", "0.55193454", "0.55173814", "0.55162483", "0.5514704", "0.54962164", "0.54786855", "0.54748416", "0.5463207", "0.5457929", "0.54441845", "0.5443755", "0.544239", "0.5436623", "0.5427044", "0.542296", "0.5420061", "0.5420061", "0.5420061", "0.5420061", "0.5420061", "0.5409702" ]
0.0
-1
If USE_PATH is True rely on PATH to look for binaries. Otherwise ../src/ is used by default.
def binary_location(cmd, USE_PATH=False): if USE_PATH: return cmd else: return os.path.join(BIN_PREFIX, cmd)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def linkpath(srcdir, pkg):\n home = os.getenv('HOME')\n if srcdir:\n rval = '{}/{}'.format(srcdir, pkg)\n else:\n rval = '{}/bin/{}'.format(home, pkg)\n return rval", "def binary_location(cmd, USE_PATH=False):\n return os.path.join(BIN_PREFIX, cmd)", "def set_path():\n import os\n import sys\n\n sys.path.insert(0, os.path.join(os.path.dirname(__file__), \"..\"))", "def set_proto_src(path):\n if sys.path.count(path) == 0:\n sys.path.append(path)", "def load_libsrc():\n import sys\n ops_dir = os.path.dirname(os.path.realpath(__file__))\n fst_package = ops_dir + '/../lib_src/fst_pipeline'\n sys.path.append(fst_package)\n return", "def test_relative_paths(self):\n command_line = self._MENU + [\n \"some_pool\",\n \"../dev\",\n \"./fake\",\n \"/abc\",\n ]\n TEST_RUNNER(command_line)", "def path_which(args):\n print(header(\"$PATH Lookup: {}\".format(args.look)))\n loop_fmt = \"{color}{path}\"\n\n cnt = 0\n for part in os.environ[\"PATH\"].split(\":\"):\n color = u\"\"\n if args.color:\n color = CODES[cnt]\n\n msg = check_exec(part, args.look, args.version)\n if msg:\n print(header(loop_fmt.format(color=color, path=part), '-'))\n print(msg)\n cnt = (cnt + 1) % len(CODES)", "def get_kernel_path():\n path = \"/\".join(os.path.dirname(os.path.realpath(__file__)).split('/')[:-1])\n return path+'/src/'", "def main():\n arg0 = sys.argv[0]\n if not os.path.isfile(arg0):\n sys.exit(\"sys.argv[0] is not a path to a file: \\\"\" + str(arg0) + \"\\\". Exiting now.\")\n absolute_path_to_file = os.path.realpath(arg0) # realpath follows symlinks, which is what we want in this case.\n absolute_path_to_src = os.path.dirname(absolute_path_to_file)\n (absolute_path_to_repo, src_dirname) = os.path.split(absolute_path_to_src)\n if src_dirname != \"src\":\n sys.exit(\"The driver script should be located in directory \\\"src\\\". It is instead in \\\"\" + src_dirname + \"\\\". Exiting now.\")\n os.chdir(absolute_path_to_repo)", "def SearchPath(name, path=None):\n path = path or os.environ['PATH']\n for dir in path.split(os.pathsep):\n binpath = os.path.join(dir, name)\n if os.path.exists(binpath):\n return os.path.abspath(binpath)\n return None", "def srcdir(path):\n if not workflow.included_stack:\n return None\n return workflow.current_basedir.join(path).get_path_or_uri()", "def test_add_srcdirs_to_syspath(root_path: Path) -> None:\n add_srcdirs_to_syspath()\n\n # Test to see if runtime_syspath's 'src' directory in now in sys.path\n src_path: Path = root_path / \"src\"\n src_path_str: str = os.fspath(src_path)\n sys_paths: List[str] = list()\n found_src_path: bool = False\n syspath_member: str\n for syspath_member in sys.path:\n sys_paths.append(syspath_member)\n if src_path_str == syspath_member:\n found_src_path = True\n break\n\n if not found_src_path:\n msg: str = f\"{src_path.as_posix()} is not in:\"\n syspath_mem: str\n for syspath_mem in sorted(sys_paths):\n msg += f\"\\n\\t{Path(syspath_mem).as_posix()}\"\n pytest.fail(msg)", "def syspath():\n import sys\n pprint(sys.path)", "def activateLocalFastPath() -> None:\n global _FAST_PATH, _FAST_PATH_IS_TEMPORARY, APP_DATA\n\n # Try to fix pathing issues in Windows.\n if os.name == \"nt\":\n APP_DATA = APP_DATA.replace(\"/\", \"\\\\\")\n\n _FAST_PATH = os.path.join(\n APP_DATA,\n \"{}{}-{}\".format(\n MPI_RANK,\n os.environ.get(\"PYTEST_XDIST_WORKER\", \"\"), # for parallel unit testing,\n datetime.datetime.now().strftime(\"%Y%m%d%H%M%S%f\"),\n ),\n )\n\n _FAST_PATH_IS_TEMPORARY = True", "def getPythonPath():\n python_path = os.environ.get(\"PYTHONPATH\",\"\")\n \n if os.path.basename(os.path.abspath(os.curdir)) == \"Test\":\n new_python_path = os.path.pathsep.join([\n python_path,os.path.normpath(\"../Lib/external/SQLObject-compat\"),\n os.path.normpath(\"../Lib/external\"),\n os.path.normpath(\"../Lib\"),\n ])\n else:\n new_python_path = os.path.pathsep.join([\n python_path,os.path.normpath(\"./Lib/external/SQLObject-compat\"),\n os.path.normpath(\"./Lib/external\"),\n os.path.normpath(\"./Lib\"),\n ])\n \n return new_python_path", "def add_path(package):\n\n path_file_name = '../{0}/test/path.txt'.format(package)\n\n if os.path.exists(path_file_name):\n with open(path_file_name, 'r') as path_file:\n for directory in path_file.readlines():\n sys.path.insert(0, os.path.abspath(\n '../{0}/{1}'.format(package, directory.strip('\\n'))\n ))", "def _whicha(cmd, paths=None):\n import os\n if paths is None:\n paths = os.environ['PATH'].split(':')\n possibilities = [os.path.expanduser(os.path.join(p, cmd)) for p in paths]\n return filter(lambda bin: os.path.exists(bin), possibilities)", "def thepath = getProgramPath(theprog):\r\n\r\n theprog = lower(theprog);\r\n\r\n if strcmp(theprog,'POV-Ray')\r\n # install location for POV-Ray\r\n thepath = '/usr/local/bin';\r\n\r\n else if strcmp(theprog,'quietpov')\r\n # install location for the QuietPOV add-on\r\n thepath = 'C:\\Program Files\\POV-Ray for Windows v3.6\\guiext\\QuietPOV';\r\n\r\n else if strcmp(theprog,'imagemagick')\r\n # install location for ImageMagick\r\n thepath = '/home/kieran/Downloads/ImageMagick-6.8.5-8';\r\n\r\n else if strcmp(theprog,'ffmpeg')\r\n # install location for the ffmpeg library\r\n thepath = '/usr/bin/ffmpeg';\r\n\r\n else\r\n thepath = '';", "def FindBinary( binary, user_options ):\n\n def _FindPath():\n key = '{0}_binary_path'.format( binary )\n if user_options.get( key ):\n return user_options[ key ]\n return GO_BINARIES.get( binary )\n\n binary_path = _FindPath()\n if os.path.isfile( binary_path ):\n return binary_path\n return None", "def where(self, exe, path=None):\n if exe is None:\n return None\n if path is None:\n path = os.environ['PATH']\n paths = path.split(os.pathsep)\n extlist = ['']\n\n def is_executable(path):\n return os.path.isfile(path) and os.access(path, os.X_OK)\n\n if sys.platform == 'win32':\n pathext = os.environ['PATHEXT'].lower().split(os.pathsep)\n (base, ext) = os.path.splitext(exe)\n if ext.lower() not in pathext:\n extlist = pathext\n for ext in extlist:\n exe_name = exe + ext\n for p in paths:\n exe_path = os.path.join(p, exe_name)\n if is_executable(exe_path):\n return exe_path\n\n return None", "def test_remote_sys_path(pytester: pytest.Pytester) -> None:\n pytester.makepyfile(\n \"\"\"\n import sys\n\n def test_sys_path():\n assert \"\" not in sys.path\n \"\"\"\n )\n result = pytester.runpytest(\"-n1\")\n assert result.ret == 0", "def shared_binary_location(cmd=\"shared\"):\n return os.path.join(BIN_PREFIX, cmd)\n return binary_location(cmd, SHARED_USE_PATH)", "def module_path() -> Path:\n if hasattr(sys, \"frozen\"):\n return Path(sys.executable).resolve().parent\n else:\n return (Path(__file__) / \"..\").resolve().parent", "def init_env_path(path=None) -> None:\n if path is None:\n sys.path.insert(1, file_dir_dir())\n else:\n sys.path.insert(1, path)", "def insert_package_path():\n sys.path.insert(0, ospdn(ospdn(ospdn(ospap(__file__)))))", "def check_module_path(pkg):\n src_dir_root = ''\n print(\"[root-get] DEBUG: Checking module path\")\n check_module_name = os.system('find %s -mindepth 2 -type d -name \"%s\" ! -path \"*tutorials*\" ! -path \"*dictpch*\"' % (ROOT_SOURCES, pkg))\n if check_module_name != 0:\n print(\"Not a ROOT package (we are working only with ROOT packages for now.)\")\n return False\n else:\n # if have such directory in root then we can try to get it's real path\n path = PathChecker()\n src_dir_root = path.path4module(pkg, ROOT_SOURCES)\n if src_dir_root != None:\n print(\"[root-get] We would use a module from {0:s}\".format(src_dir_root))\n else:\n print(\"Package not present in rootbase.\")\n print(\"Please provide manifest file path, else enter 'NA'\")\n p_manifest = raw_input()\n if p_manifest != 'NA':\n value = yaml_validator(p_manifest)\n if value == 1:\n print(\"Not a valid yml. Please provide valid yml. Exiting now.\")\n else:\n print(\"Downloading package using url.\")\n dn_path = downloader(p_manifest)\n #get path for downloaded directory\n filepath = Path(dn_path + \"/CMakeLists.txt\")\n if filepath.is_file():\n src_dir_root = dn_path\n else:\n print(\"No CMakeLists.txt present. Creating using manifest.\")\n rule_name = re.compile(\".*name:.*\")\n with open(p_manifest) as mn:\n read = mn.read()\n name = rule_name.findall(read)\n parc_name = [x.lstrip(' name: ') for x in name]\n cml = open(dn_path + \"/CMakeLists.txt\", 'a')\n cml.write(\"ROOT_STANDARD_LIBRARY_PACKAGE(\" + parc_name[0] + \" DEPENDENCIES RIO)\")\n src_dir_root = dn_path\n\n else:\n print(\"Can you provide package path..(if available)\")\n dir_path = raw_input()\n filepath = Path(dir_path + \"/CMakeLists.txt\")\n if filepath.is_file():\n src_dir_root = dir_path\n else:\n print(\"No CMakeLists.txt present. Creating using manifest.\")\n rule_name = re.compile(\".*name:.*\")\n with open(p_manifest) as mn:\n read = mn.read()\n name = rule_name.findall(read)\n parc_name = [x.lstrip(' name: ') for x in name]\n cml = open(dn_path + \"/CMakeLists.txt\", 'a')\n cml.write(\"ROOT_STANDARD_LIBRARY_PACKAGE(\" + parc_name[0] + \" DEPENDENCIES RIO)\")\n src_dir_root = dn_path\n\n print(\"[root-get] We would use a module from {0:s}\".format(src_dir_root))\n return src_dir_root", "def path(src, name='default'):\n try:\n return get_output(['hg', 'path', name], cwd=src).strip()\n except subprocess.CalledProcessError:\n return None", "def find_executable(binary):\n\n\tfor syspath in os.environ.get('PATH', default_path).split(':'):\n\t\tif os.path.exists(os.path.join(syspath, binary)):\n\t\t\treturn os.path.join(syspath, binary)\n\n\treturn None", "def main():\n if getattr(sys, 'frozen', False):\n folderCurrent = os.path.dirname(sys.executable)\n else:\n folderCurrent = os.path.abspath(os.path.dirname(__file__))\n\n replaceAll(folderCurrent)", "def get_golem_path():\r\n return os.path.abspath(os.path.join(os.path.dirname(__file__), \"../\"))", "def GetPath(path_from_src):\n path = os.path.join(os.path.dirname(__file__), '../..', path_from_src)\n if not os.path.isfile(path):\n print 'WARNING: %s does not exist. Maybe moved or renamed?' % path\n return path", "def module_path():\r\n if hasattr(sys, \"frozen\"):\r\n return os.path.dirname(sys.executable)\r\n return os.path.dirname(__file__)", "def which():\n\n location = None\n if os.path.basename(_git_path) != _git_path:\n if os.path.isfile(_git_path):\n location = _git_path\n else:\n paths = [x for x in os.environ[\"PATH\"].split(os.pathsep) if not x.isspace()]\n for path in paths:\n exe = os.path.join(path, _git_path)\n if os.path.isfile(exe):\n location = exe\n break\n return location", "def cwd_in_path():\n ...", "def get_pythonpath(working_set, buildout, prefixes):\n\n # get all paths available in the current working set\n paths = list(working_set.entries)\n\n if hasattr(zc.buildout.easy_install, 'distribute_loc'):\n prepend_path(zc.buildout.easy_install.distribute_loc, paths)\n elif hasattr(zc.buildout.easy_install, 'setuptools_loc'):\n prepend_path(zc.buildout.easy_install.setuptools_loc, paths)\n else:\n prepend_path(zc.buildout.easy_install.setuptools_path, paths)\n\n return [k for k in working_set.entries \\\n if os.path.realpath(k) not in site_paths(buildout, prefixes)]", "def get_lex_path(env, append_paths: bool=False) -> Optional[str]:\n for prog in BINS:\n bin_path = SCons.Tool.find_program_path(\n env,\n prog,\n default_paths=DEFAULT_PATHS,\n add_path=append_paths,\n )\n if bin_path:\n return bin_path\n\n SCons.Warnings.warn(\n SCons.Warnings.SConsWarning,\n 'lex tool requested, but lex or flex binary not found in ENV PATH'\n )", "def AddScriptDirToPath():\n path = os.path.abspath(__file__)\n\n for _ in range(3):\n path, _ = os.path.split(path)\n\n if not path in sys.path:\n sys.path.append(path)", "def relative_to_buildroot(self):\n return [os.path.join(self.rel_path, source) for source in self.source_paths]", "def sublime_haskell_package_path():\n return os.path.dirname(os.path.realpath(__file__))", "def GetSrc():\n return os.path.abspath(os.path.join(_THIS_DIR, os.pardir, os.pardir,\n os.pardir))", "def _env_with_python_module_search_path():\n e = os.environ\n module_search_path = os.path.join(vmcheckerpaths.root, 'bin')\n if 'PYTHONPATH' in e.keys():\n module_search_path = os.pathsep.join(\n e['PYTHONPATH'], module_search_path)\n e['PYTHONPATH'] = module_search_path\n return e", "def Which(binary, path=None):\n if path is None:\n path = os.environ.get('PATH', '')\n for p in path.split(':'):\n p = os.path.join(p, binary)\n if os.access(p, os.X_OK):\n return p\n return None", "def relative_path(__file__, path):\n return os.path.abspath(os.path.join(os.path.dirname(__file__), path))", "def task_binary_location(cmd=\"task\"):\n return binary_location(cmd, TASK_USE_PATH)", "def find_executable(name, paths):\n for path in paths:\n full_path = os.path.join(path, name)\n if os.path.isfile(full_path):\n return full_path\n # If not found, just assume it's in the PATH.\n return name", "def test():\n # usage()\n path_obj = Env('PATH')\n path_obj.pclean()\n path_obj.padd('/home/mahmud/downloads///')\n path_obj.padd('/home/mahmud/apps//', -1)\n path_obj.premove('/abcd')\n path_obj.premove('/cad/tools/platform/lsf/7.0/linux2.6-glibc2.3-x86_64/etc')\n path_obj.premove('/cad/tools/platform/lsf/7.0/linux2.6-glibc2.3-x86_64/bin')\n path_obj.plist()\n cmd = 'add /usr/bin/'\n cmd = 'clean abcd'\n cmd = 'ld_clean'\n cmd = 'lic_add /bin /tmp'\n cmd = ''\n cmd = 'env_remove CADENCE_PATH /some/arbitrary/dir'\n cmd = 'env_list CADENCE_PATH'\n cmd = 'ld_remove /cad/tools/cliosoft/sos_5.31_linux/lib /cad/tools/cadence/soc/SOC71/tools/lib'\n (cmd, var, args) = process_options(cmd.split())\n print(\"Executing: \", cmd, var, args)\n execute (cmd, var, args)", "def find_binary_in_path(filename: str) -> str:\n if \"PATH\" not in os.environ:\n raise PATHNotFoundError\n for directory in os.environ[\"PATH\"].split(os.pathsep):\n binary = os.path.abspath(os.path.join(directory, filename))\n if os.path.isfile(binary) and os.access(binary, os.X_OK):\n return binary\n raise BinaryNotFoundError", "def patch_sys_path():\n this_dir = os.path.dirname(__file__)\n to_add = os.path.join(this_dir, \"..\")\n to_add = os.path.abspath(to_add)\n sys.path.insert(0, to_add)", "def _rel_path(fn):\n return os.path.join('./eng-edu/ml/cc/src', fn)", "def get_tools_path(work_dir=consts.WORK_DIR):\r\n return join_path(work_dir, consts.TOOLS_DIR)", "def fix_sys_path():\n sys.path = EXTRA_PATHS + sys.path", "def __find_tool_path(self):\n tool_path = Path(os.path.dirname(os.path.realpath(__file__)))\n # We asume the installion path is relative to our installation path\n tool_path = tool_path / '../../../bin'\n if os.name == 'posix':\n ret = tool_path / 'fast-discovery-server'\n if not os.path.exists(ret):\n print('fast-discovery-server tool not installed')\n sys.exit(1)\n elif os.name == 'nt':\n ret = tool_path / 'fast-discovery-server.exe'\n if not os.path.exists(ret):\n ret = tool_path / 'fast-discovery-server.bat'\n if not os.path.exists(ret):\n print('fast-discovery-server tool not installed')\n sys.exit(1)\n else:\n print(f'{os.name} not supported')\n sys.exit(1)\n\n return ret", "def get_exec_path(self):\n bin_name = 'test_hint_time'\n # Look for in place build\n script_dir = os.path.dirname(os.path.realpath(__file__))\n bin_path = os.path.join(script_dir, '.libs', bin_name)\n if not os.path.exists(bin_path):\n # Look for out of place build from using apps/build_func.sh\n int_dir = os.path.dirname(script_dir)\n bin_path_op = os.path.join(int_dir, 'build/integration/test/.libs', bin_name)\n if not os.path.exists(bin_path_op):\n msg = 'Could not find application binary, tried \\n \"{}\"\\n \"{}\"'.format(\n bin_path, bin_path_op)\n raise RuntimeError(msg)\n bin_path = bin_path_op\n return bin_path", "def localPython ( localPath ) :\r\n\r\n if not type( localPath ) == str : return\r\n\r\n if not localPath.endswith( os.sep ) : localPath = localPath + os.sep\r\n\r\n # reads the paths to add to sys.path\r\n \r\n try :\r\n\r\n handler = open( localPath + \"sysPath.txt\", \"r\" )\r\n\r\n text = handler.read()\r\n\r\n handler.close()\r\n\r\n items = text.splitlines()\r\n\r\n except Exception, exception :\r\n\r\n items = [ ]\r\n\r\n\r\n # places the local paths before the previous search paths. only those that exist\r\n\r\n sysPath = [ ]\r\n\r\n for item in items :\r\n\r\n item = item.strip().replace( \"\\\\\", os.sep ).replace( \"/\", os.sep )\r\n\r\n if len( item ) == 0 : continue\r\n\r\n item = item.strip( os.sep )\r\n\r\n item = localPath + item\r\n\r\n if item in sysPath : continue\r\n\r\n if not os.path.exists( item ) : continue\r\n\r\n sysPath.append( item )\r\n\r\n # places the previous paths. only those that exist\r\n\r\n\r\n for item in sys.path :\r\n\r\n if item in sysPath : continue\r\n\r\n if not os.path.exists( item ) : continue\r\n\r\n sysPath.append( item )\r\n\r\n sys.path = sysPath", "def which(file, env=os.environ):\n if file is None:\n return None\n for path in env.get('PATH', '').split(os.pathsep):\n if path:\n result = os.path.join(path, file)\n if os.path.exists(result):\n return os.path.realpath(result)\n return None", "def add_path(path):\n if path not in sys.path:\n sys.path.insert(0, path)", "def initLibPath():\n libHash = {\n 'Framework': 1,\n 'UserControlleLib': 1,\n 'CaseLib': 1\n }\n\n binPath = os.path.split(os.path.realpath(__file__))[0]\n\n for key in libHash:\n sys.path.append(os.path.join(__getLibAbsPath(binPath, libHash[key]), key))", "def path(self):\n if not self._path:\n logger.spam(\"Checking for helper executable %s\", self.name)\n self._path = distutils.spawn.find_executable(self.name)\n if self._path:\n logger.debug(\"%s is at %s\", self.name, self.path)\n self._installed = True\n else:\n logger.debug(\"No path to %s found\", self.name)\n return self._path", "def _which(self, program):\n\n def is_exe(path):\n return os.path.isfile(path) and os.access(path, os.X_OK)\n\n fpath, fname = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n basedir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n os.environ[\"PATH\"] += os.pathsep + '%s/bin/' % basedir\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n print 'ishakesumd not found, build it or place it in the PATH before using this tool.'\n exit(1)", "def find_example_dir(python):\n # Replace %s with directory to check for shoebot menus.\n paths = [\n path.format(sys_prefix=sys.prefix, cwd=os.getcwd())\n for path in [\n 'share/shoebot/examples', # default\n 'examples/', # user installed shoebot with -e\n ]\n ]\n code = textwrap.dedent(\"\"\"\n from os.path import isdir\n from pkg_resources import resource_filename, Requirement, DistributionNotFound\n \n for path in {paths}:\n try:\n res_path = resource_filename(Requirement.parse('shoebot'), path)\n if isdir(res_path):\n print(res_path)\n break\n except DistributionNotFound:\n pass\n \"\"\".format(paths=paths))\n\n # Needs to run in same python env as shoebot (may be different to gedits)\n cmd = [python, \"-c\", code]\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output, errors = p.communicate()\n if errors:\n sys.stderr.write('Shoebot experienced errors searching for install and examples.\\n')\n sys.stderr.write('Errors:\\n{0}'.format(errors.decode('utf-8')))\n return None\n else:\n examples_dir = output.decode('utf-8').strip()\n if os.path.isdir(examples_dir):\n return examples_dir\n\n if examples_dir:\n sys.stderr.write('Shoebot could not find examples at: {0}\\n'.format(examples_dir))\n else:\n sys.stderr.write('Shoebot could not find install dir and examples.\\n')", "def find_lib_path():\n curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))\n # make pythonpack hack: copy this directory one level upper for setup.py\n dll_path = [curr_path, os.path.join(curr_path, '../../lib/'),\n os.path.join(curr_path, './lib/'),\n os.path.join(sys.prefix, 'xlearn')]\n if sys.platform == 'win32':\n if platform.architecture()[0] == '64bit':\n dll_path.append(os.path.join(curr_path, '../../windows/x64/Release/'))\n # hack for pip installation when copy all parent source directory here\n dll_path.append(os.path.join(curr_path, './windows/x64/Release/'))\n else:\n dll_path.append(os.path.join(curr_path, '../../windows/Release/'))\n # hack for pip installation when copy all parent source directory here\n dll_path.append(os.path.join(curr_path, './windows/Release/'))\n dll_path = [os.path.join(p, 'xlearn_api.dll') for p in dll_path]\n elif sys.platform.startswith('linux'):\n dll_path = [os.path.join(p, 'libxlearn_api.so') for p in dll_path]\n elif sys.platform == 'darwin':\n dll_path = [os.path.join(p, 'libxlearn_api.dylib') for p in dll_path]\n\n lib_path = [p for p in dll_path if os.path.exists(p) and os.path.isfile(p)]\n\n # From github issues, most of installation errors come from machines w/o compilers\n if not lib_path:\n raise XLearnLibraryNotFound(\n 'Cannot find xlearn Library in the candidate path'\n )\n return lib_path", "def setSysPath():\n c = os.path.abspath(os.path.dirname(__file__))\n\n add = [\n ['lib'],\n ]\n\n for item in add:\n p = os.path.join(c, *item)\n if not p in sys.path:\n sys.path[1:1] = [p]\n\n remove = ['django', 'simplejson']\n\n # Remove unwanted paths\n for item in sys.path:\n for r in remove:\n if item.find(r) > 0:\n sys.path.remove(item)", "def scan_path(executable=\"mongod\"):\n for path in os.environ.get(\"PATH\", \"\").split(\":\"):\n path = os.path.abspath(path)\n executable_path = os.path.join(path, executable)\n if os.path.exists(executable_path):\n return executable_path", "def get_pack_path():\r\n return get_package_path().replace(\"\\\\\", \"/\").replace(\"src\", \"\")", "def set_search_path(fname, *args):\n \n # Prepare\n args = [arg.lstrip('/') for arg in args if arg]\n args = [arg for arg in args if arg != '.'] # Because we add empty dir anyway\n args.append('') # make libs search next to themselves\n command = get_command_to_set_search_path()\n \n if sys.platform.startswith('linux'):\n # Create search path value\n rpath = ':'.join( ['$ORIGIN/'+arg for arg in args] )\n # Modify rpath using a call to patchelf utility\n cmd = [command, '--set-rpath', rpath, fname]\n subprocess.check_call(cmd)\n print('Set RPATH for %r' % os.path.basename(fname))\n #print('Set RPATH for %r: %r' % (os.path.basename(fname), rpath))\n \n elif sys.platform.startswith('darwin'):\n # ensure write permissions\n mode = os.stat(fname).st_mode\n if not (mode & stat.S_IWUSR):\n os.chmod(fname, mode | stat.S_IWUSR)\n # let the file itself know its place (simpyl on rpath)\n name = os.path.basename(fname)\n subprocess.call(('install_name_tool', '-id', '@rpath/'+name, fname))\n # find the references: call otool -L on the file\n otool = subprocess.Popen(('otool', '-L', fname),\n stdout = subprocess.PIPE)\n references = otool.stdout.readlines()[1:]\n \n # Replace each reference\n rereferencedlibs = []\n for reference in references:\n # find the actual referenced file name\n referencedFile = reference.decode().strip().split()[0]\n if referencedFile.startswith('@'):\n continue # the referencedFile is already a relative path\n # Get lib name\n _, name = os.path.split(referencedFile)\n if name.lower() == 'python':\n name = 'libpython' # Rename Python lib on Mac\n # see if we provided the referenced file\n potentiallibs = [os.path.join(os.path.dirname(fname), arg, name) \n for arg in args]\n # if so, change the reference and rpath\n if any([os.path.isfile(p) for p in potentiallibs]):\n subprocess.call(('install_name_tool', '-change',\n referencedFile, '@rpath/'+name, fname))\n for arg in args:\n mac_add_rpath(fname, '@loader_path/' + arg)\n mac_add_rpath(fname, '@executable_path/') # use libpython next to exe\n rereferencedlibs.append(name)\n if rereferencedlibs:\n print('Replaced refs for \"%s\": %s' % \n (os.path.basename(fname), ', '.join(rereferencedlibs)) )\n \n elif sys.platform.startswith('win'):\n raise RuntimeError('Windows has no way of setting the search path on a library or exe.')\n else:\n raise RuntimeError('Do not know how to set search path of library or exe on %s' % sys.platform)", "def test_get_pyrin_main_package_path():\n\n pyrin_root_path = application_services.get_pyrin_root_path()\n pyrin_main_package_path = os.path.abspath(os.path.join(pyrin_root_path, 'pyrin'))\n assert application_services.get_pyrin_main_package_path() == pyrin_main_package_path", "def find_binary(binary: str, paths=None, fallback=None) -> str:\n\n if os.path.isabs(binary):\n if not (os.path.isfile(binary) and access(binary, os.X_OK)):\n raise CommandNotFound(binary)\n return binary\n\n if paths is None:\n paths = os.environ.get(\"PATH\", \"\").split(\":\")\n\n for path in paths:\n filename = os.path.join(os.path.abspath(path), binary)\n if access(filename, os.X_OK) and os.path.isfile(filename):\n return filename\n\n if fallback is not None:\n return fallback\n\n raise CommandNotFound(binary)", "def windows_dll_path_setup():\n global WINDOWS_PATH_SET\n if IS_WINDOWS and not WINDOWS_PATH_SET:\n try:\n out = subprocess.run(\n [\"where.exe\", \"tbb.dll\"], check=True, capture_output=True\n )\n tbb_path = os.path.dirname(out.stdout.decode().splitlines()[0])\n os.add_dll_directory(tbb_path)\n except:\n try:\n tbb_path = os.path.abspath(\n os.path.join(\n get_bridgestan_path(), \"stan\", \"lib\", \"stan_math\", \"lib\", \"tbb\"\n )\n )\n os.environ[\"PATH\"] = tbb_path + \";\" + os.environ[\"PATH\"]\n os.add_dll_directory(tbb_path)\n WINDOWS_PATH_SET = True\n except:\n warnings.warn(\n \"Unable to set path to TBB's DLL. Loading BridgeStan models may fail. \"\n f\"Tried path '{tbb_path}'\",\n RuntimeWarning,\n )\n WINDOWS_PATH_SET = False\n try:\n out = subprocess.run(\n [\n \"where.exe\",\n \"libwinpthread-1.dll\",\n \"libgcc_s_seh-1.dll\",\n \"libstdc++-6.dll\",\n ],\n check=True,\n capture_output=True,\n )\n mingw_dir = os.path.abspath(\n os.path.dirname(out.stdout.decode().splitlines()[0])\n )\n os.add_dll_directory(mingw_dir)\n WINDOWS_PATH_SET &= True\n except:\n # no default location\n warnings.warn(\n \"Unable to find MinGW's DLL location. Loading BridgeStan models may fail.\",\n RuntimeWarning,\n )\n WINDOWS_PATH_SET = False", "def _Which(program, paths):\n if sys.platform == 'win32' and not program.lower().endswith('.exe'):\n program += '.exe'\n\n for path in paths:\n candidate = os.path.join(os.path.normpath(path), program)\n if os.path.isfile(candidate):\n return candidate\n\n return None", "def SetToolPaths(toolpaths):\n global tool_search_paths\n\n tool_search_paths = toolpaths", "def add_project_path() -> bool:\n project_path = Path('.')\n cur_path = Path(project_path.absolute())\n for parent in cur_path.parents:\n if 'Pipfile' in [obj.name for obj in parent.glob('*')]:\n project_path = Path(parent.absolute())\n break\n\n src_path = project_path.joinpath('src')\n\n if project_path == '.':\n LOGGER.warning(\"Can't find project_path\")\n return False\n\n if src_path not in sys.path:\n sys.path.append(str(src_path.absolute()))\n return project_path", "def get_python_path():\n\n return get_executable_path('python')", "def check_package_path(pkg):\n src_dir_root = ''\n print(\"[root-get] DEBUG: Checking package path\")\n check_package_name = os.system('find %s -maxdepth 1 -type d -name \"%s\" ! -path \"*tutorials*\" ! -path \"*dictpch*\"' % (ROOT_SOURCES, pkg))\n if check_package_name != 0:\n print(\"Not a ROOT package (we are working only with ROOT packages for now.)\")\n return False\n else:\n # if have such directory in root then we can try to get it's real path\n path = PathChecker()\n src_dir_root = path.path4pkg(pkg, ROOT_SOURCES)\n print(\"[root-get] We would use a package from {0:s}\".format(src_dir_root))\n return src_dir_root", "def test_find_in_current_path(self):\n directory = os.path.dirname(os.path.realpath(__file__))\n result = steptest.find_project_directory(directory)\n self.assertEqual(directory, result)", "def get_package_dir():\n return Path(__file__).parent", "def modify_path():\r\n currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\r\n parentdir = os.path.dirname(currentdir)\r\n sys.path.insert(0,parentdir)", "def FindBinary(module_space, bin_name):\n if not bin_name:\n return None\n if bin_name.startswith(\"//\"):\n # Case 1: Path is a label. Not supported yet.\n raise AssertionError(\n \"Bazel does not support execution of Python interpreters via labels yet\"\n )\n elif os.path.isabs(bin_name):\n # Case 2: Absolute path.\n return bin_name\n # Use normpath() to convert slashes to os.sep on Windows.\n elif os.sep in os.path.normpath(bin_name):\n # Case 3: Path is relative to the repo root.\n return os.path.join(module_space, bin_name)\n else:\n # Case 4: Path has to be looked up in the search path.\n return SearchPath(bin_name)", "def find_path():\n __dir_path__ = os.path.dirname(os.path.realpath(__file__))\n return __dir_path__", "def check_path():\n print('[GenHub] Checking PATH for executables and scripts.')\n\n execs = ['gt', 'cd-hit', 'tidygff3', 'locuspocus', 'xtractore',\n 'canon-gff3', 'pmrna', 'lpdriver.py', 'uloci.py', 'seq-reg.py']\n paths = list()\n for exe in execs:\n try:\n proc = subprocess.Popen(['which', exe], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True)\n resultcode = proc.wait()\n if resultcode == 0:\n procpath = next(proc.stdout)\n procpath = str(procpath).rstrip()\n paths.append((exe, procpath))\n else:\n paths.append((exe, None))\n except subprocess.CalledProcessError:\n paths.append((exe, None))\n\n missing = False\n for exe, path in paths:\n char = '+'\n if path is None:\n char = '-'\n path = '???'\n missing = True\n print('%s %-20s: %s' % (char, exe, path))\n if missing:\n print('Executables / scripts cannot be found in your PATH.', end='')\n print(' Certain build commands will not work.')", "def constrain_path_relative_to(path):\n environ_backup = os.environ\n environ = os.environ\n\n if path:\n environ = os.environ.copy()\n environ[\"PATH\"] = path\n\n os.environ = environ\n\n try:\n yield\n finally:\n os.environ = environ_backup", "def update_path():\n\timport sys\n\tsys.path.append(directory_root())", "def fix_sys_path(extra_extra_paths=()):\n sys.path[1:1] = EXTRA_PATHS\n fix_google_path()", "def here(*args):\n return os.path.join(os.path.dirname(__file__), *args)", "def _fix_sys_path():\n global _fix_sys_path_done\n\n if _fix_sys_path_done:\n return\n _fix_sys_path_done = True\n if not (sys.argv and sys.path):\n # Not enough information\n return\n d = os.path.dirname(os.path.realpath(sys.argv[0]))\n if sys.path[0] == d:\n sys.path.pop(0)", "def set_include_path(space, paths):\n interp = space.ec.interpreter\n old = os.pathsep.join(interp.include_path)\n interp.include_path = []\n for p in paths.split(os.pathsep):\n interp.include_path.append(p)\n return space.newstr(old)", "def get_bin_dir():\n return os.path.abspath(os.path.join(get_root_dir(), 'bin/'))", "def dir_bin():\n return abspath('bin')", "def get_path_to(self, *args):\n return os.path.abspath(os.path.join(os.path.dirname(__file__), *args))", "def _get_path(): # THIS IS JUST FOR GETTING THE FILE\n return os.path.dirname(os.path.abspath(__file__)) + '/'", "def find_executable(cls, name, cmd, dry_run=False):\n if cls.PATH is None:\n cls.PATH = os.environ[\"PATH\"].split(\":\")\n for pdir in cls.PATH:\n pcmd = os.path.join(pdir, cmd)\n if os.path.exists(pcmd):\n return pcmd\n if dry_run:\n return cmd\n raise SystemExit(\"%s '%s' does not exist\" % (name, cmd))", "def get_relative_source_path(self, source_path=None):\r\n if not source_path:\r\n source_path = self.source_path\r\n if source_path is None:\r\n return None\r\n\r\n return os.path.relpath(\r\n os.path.abspath(os.path.join(self.settings['PATH'], source_path)),\r\n os.path.abspath(self.settings['PATH'])\r\n )", "def get_relative_source_path(self, source_path=None):\n if not source_path:\n source_path = self.source_path\n if source_path is None:\n return None\n\n return posixize_path(\n os.path.relpath(\n os.path.abspath(os.path.join(\n self.settings['PATH'],\n source_path)),\n os.path.abspath(self.settings['PATH'])\n ))", "def _path(self):\n if self.target[-1] != \"/\":\n self.target += \"/\"\n\n if \"/\" in self.source:\n self.path = self.target + self.source.split(\"/\")[-1]\n else:\n raise NotImplementedError(\"This software is not done for Windows\")\n if self.method == \"git\":\n self.path = self.path.replace(\".git\", \"\")", "def absPath(path):\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), path)", "def determine_python_path():\n if git_install_requested():\n projects_yaml = config('openstack-origin-git')\n projects_yaml = git_default_repos(projects_yaml)\n return os.path.join(git_pip_venv_dir(projects_yaml),\n 'lib/python2.7/site-packages')\n else:\n return None", "def run_import(path: Path) -> None:\n if not (path / \"__main__.py\").exists():\n return\n try:\n subprocess.check_call(\n [sys.executable, \"-m\", \"pip\", \"install\", \"--no-input\", path.parent.as_posix()],\n stdout=subprocess.DEVNULL,\n )\n if (path / \"__main__.py\").exists():\n subprocess.check_call(\n [sys.executable, \"-c\", f\"import {path.name}\"],\n stdout=subprocess.DEVNULL,\n )\n subprocess.check_call(\n [sys.executable, \"-m\", \"pip\", \"uninstall\", \"--no-input\", \"-y\", path.name],\n stdout=subprocess.DEVNULL,\n )\n except subprocess.CalledProcessError as e:\n raise SnapshotMismatchError(f\"Path {path} cannot be imported: {e}\") from None", "def which(cmd, path=None):\n if path is None:\n path = os.environ[\"PATH\"].split(os.pathsep)\n\n for prefix in path:\n filename = os.path.join(prefix, cmd)\n executable = os.access(filename, os.X_OK)\n is_not_directory = os.path.isfile(filename)\n if executable and is_not_directory:\n return True\n\n return False", "def include_path():\n include_dir = os.path.dirname(os.path.dirname(numba.__file__))\n path = os.path.abspath(include_dir)\n return path", "def get_relative_path(path, start_path=\"\"):\r\n if start_path:\r\n rel_path = lib_path.relpath(path, start_path)\r\n else:\r\n rel_path = lib_path.relpath(path)\r\n return rel_path", "def set_output_path(path):\n\n if not os.path.exists(path):\n cmdline_main.message(\"Creating %s\",path)\n try:\n os.makedirs(path)\n except OSError, e:\n if e.errno != errno.EEXIST:\n cmdline_main.warning(\"Unable to set output path %s\",path)\n\n param.normalize_path.prefix=path\n\n if not path in param.resolve_path.search_paths:\n param.resolve_path.search_paths+=[path]" ]
[ "0.64583826", "0.6119704", "0.60500836", "0.5732794", "0.5658576", "0.56554246", "0.5512849", "0.5505444", "0.54935056", "0.5483672", "0.5481498", "0.5455168", "0.5439266", "0.5433778", "0.54287785", "0.5424196", "0.5423426", "0.5394256", "0.53781176", "0.5329684", "0.5325822", "0.52742755", "0.52715653", "0.52624583", "0.5246451", "0.5242646", "0.52409595", "0.52304184", "0.5229665", "0.5225051", "0.52033633", "0.5201149", "0.5197319", "0.519542", "0.51940525", "0.5182969", "0.5170419", "0.5169428", "0.51537764", "0.5149106", "0.5148469", "0.51409435", "0.5134104", "0.5123738", "0.5110238", "0.51027423", "0.5095024", "0.50935596", "0.5091986", "0.5090286", "0.50842994", "0.50825363", "0.5076648", "0.5071093", "0.506935", "0.5067634", "0.5065574", "0.5060455", "0.50602055", "0.50593334", "0.5059273", "0.5048375", "0.50456494", "0.5044838", "0.5043009", "0.5038083", "0.5030513", "0.50158584", "0.5011537", "0.5007856", "0.50030893", "0.5002455", "0.49968073", "0.49913317", "0.49834475", "0.49799818", "0.49773663", "0.49772424", "0.49733955", "0.496623", "0.49623707", "0.4952024", "0.49502504", "0.49447647", "0.49397078", "0.49392042", "0.4923048", "0.4923018", "0.49152303", "0.49084365", "0.49080262", "0.4903503", "0.49026972", "0.48919156", "0.48878804", "0.48876777", "0.48875", "0.48855394", "0.48821542", "0.4880285" ]
0.6302872
1
Remove the user from 'workers' or 'prospects', if applicable. user A TcsUser instance to remove from workers
def removeWorker(self, user): if user == self.owner: return None # Without these queries, there's no way to tell if anything actually gets removed. # Calling remove() on a user that is not in the set does not raise an error. if self.workers.filter(pk=user.id).exists(): self.workers.remove(user) return self if self.prospects.filter(pk=user.id).exists(): self.prospects.remove(user) return self return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_candidate(self, user):\n self.assignment_related_users.filter(user=user).delete()\n inform_changed_data(self)", "def remove(self, user):\n self.packet.send_room([\"rp\", user.get_int_id(self.rooms),\n user.data.id], user.room)\n self.rooms[user.room][\"users\"].remove(user)", "def addWorker(self, user):\n if (user != self.owner) and not self.workers.filter(pk=user.id).exists():\n self.workers.add(user)\n if self.prospects.filter(pk=user.id).exists():\n self.prospects.remove(user)\n if self.blacklist.filter(pk=user.id).exists():\n self.blacklist.remove(user)\n return self\n return None", "def remove(self, user):\n if user != self.head:\n user.group = None\n user.save()\n self.players.remove(user)", "def remove(self, user_id):\n pass", "def delete(self, user: 'UserCondensed'):\n self._delete(entity=user)", "def remove_user(user):\n # user.confirmed = False\n # user = get_user_by_phone(phone_num)\n db.session.delete(user)\n db.session.commit()\n\n return user\n # DELETE FROM users WHERE user.phone_num == phone)", "async def removeuser(ctx, user: discord.Member):\n channel = ctx.channel\n if not IsATicket(channel.id):\n await ctx.send(\n \"This is not a ticket! Users can only be removed from a ticket channel\"\n )\n return\n\n await channel.set_permissions(user, read_messages=False, send_messages=False)\n await ctx.message.delete()", "def _remove(self, room, regex, user):\n regexes_for_room = self.notifications[room]\n users_for_regex = regexes_for_room[regex]\n\n # users may have been added multiple times in the past, so make sure\n # we remove them all.\n while user in users_for_regex:\n users_for_regex.remove(user)\n\n if not users_for_regex:\n # remove regex from room when there are no users left to notify\n del regexes_for_room[regex]", "def sipserver_user_remove(self, user: str) -> None:\n self.remove_endpoint_from_sipserver(endpoint=user)", "def remove_judge(contest, user):\n _remove_role(contest, user, pcm.Judge)", "async def removeuser(self, ctx, user: discord.Member):\n\n if check_key(user.id):\n delete_key(user.id)\n await self.bot.say(\"{}, you are way out of this league.\".format(user.mention))\n else:\n await self.bot.say(\"That user does not exist in this league.\")", "def _purge_user(self, user):\n self.user_order.remove(user)\n del self.user_queue[user]\n del self.user_skip[user]", "def test_teams_remove_user_from_team_v1(self):\n pass", "def remove_user(self, u: \"Node\") -> None:\n\n if u in self.users_:\n self.users_[u] -= 1\n if self.users_[u] == 0:\n del self.users_[u]", "def delete_user(network, user):\n if user in network:\n del network[user]\n for u in network:\n connections = get_connections(network, u)\n if user in connections:\n i = connections.index(user)\n del connections[i]\n return network", "def test_teams_remove_user_from_team_v2(self):\n pass", "def remove_user(users, curr_username, user_role, request_ip):\n #TODO: error checking\n log_connector.add_log('DELETE USER', \"Removed {} user(s)\".format(len(users)), curr_username, user_role, request_ip)\n user_connector.remove_user(users)", "def remove_user(self, user: discord.User) -> bool:\n\t\tif not self.user_has_entry(user):\n\t\t\treturn False\n\t\t\n\t\tdef data_interaction(cur: Cursor):\n\t\t\tsql = f\"DELETE FROM {StrikeConsts.STRIKE_TABLE} WHERE id=%s;\"\n\t\t\tcur.execute(sql, (user.id,))\n\t\t\t\n\t\t\treturn [True]\n\t\t\t\n\t\treturn self.connect_and_execute(data_interaction)[1][0]", "def remove(self, user):\r\n url = '{0}/{1}'.format(self.get_url(), user)\r\n\r\n return http.Request('DELETE', url), parsers.parse_empty", "def delete_user(self, user):\n self.delete(user)", "def delete_user(self, user):\n # noinspection PyUnresolvedReferences\n self.delete(user)", "def remove_user(self):\n self.currentuser = None\n self.carlocked = False", "def remove_user(self, user):\n\n data = user.to_json()\n key = \"%s:%s\" % (self.channel_id, user.username)\n\n logging.info(data)\n # remove our users timestamp\n affected = self.redis_server.zrem(ENVIRONMENT['REDIS_PREFIX'] + 'users_timestamp',key)\n logging.info(\"removed user timestamp(%d): %s\" % (affected, key))", "def _RemoveUsers(self, remove_users):\n for username in remove_users:\n self.utils.RemoveUser(username)\n self.user_ssh_keys.pop(username, None)\n self.invalid_users -= set(remove_users)", "async def kick(self, user: User):\n coro = self._state.remove_team_member(self.id, user.id)\n await coro", "def remove_users(self, *users):\r\n pass", "def removeUserId(self, user_id):\n self.__register_user_ids.discard(user_id)", "def view_remove_user(self, user, username):\r\n user.realm._checker.removeUser(username)", "def remove_user(self, user_id):\n if user_id in self:\n user = self[user_id]\n del self[user_id]\n return user", "def delete_user(self):\n User.user_list.remove(self)", "def delete_user(self):\n User.user_list.remove(self)", "def delete_user(self):\n User.user_list.remove(self)", "def addToBlacklist(self, user):\n if (user != self.owner) and not self.blacklist.filter(pk=user.id).exists():\n self.blacklist.add(user)\n if self.prospects.filter(pk=user.id).exists():\n self.prospects.remove(user)\n if self.workers.filter(pk=user.id).exists():\n self.workers.remove(user)\n return self\n return None", "def delete_by(self, user):\n if user.is_superuser or user is self.added_by:\n self.delete()", "async def remove_user(app: web.Application, user_id: int) -> None:\n try:\n await delete_user(app, user_id)\n except Exception: # pylint: disable=broad-except\n logger.warning(\n \"User '%s' still has some projects, could not be deleted\", user_id\n )", "def rm_favoriting_user_id(self, circuit_id, user_id):\n key = ':'.join(\n [CIRCUIT_FAV_USRS_1, \n str(circuit_id), \n CIRCUIT_FAV_USRS_2]\n )\n self.RS.srem(key, user_id)", "def test_teams_remove_customer_from_workgroup_v1(self):\n pass", "def remove_ticket(self, user):\n # Get the first ticket that matches the query.\n ticket = RaffleTicket.objects.filter(raffle_prize=self, user=user)[0]\n ticket.delete()", "def remove_user_from_govern(self, request, pk=None, user_id=None):\n try:\n user = UserProfile.objects.get(id=user_id, organization__id=pk)\n except ObjectDoesNotExist:\n raise ResourceNotFound\n else:\n user.organization = None\n user.save()\n\n return Response(status=status.HTTP_204_NO_CONTENT)", "def delete_user(self):\n\n User.user_list.remove(self)", "def remove_user(self, login):\n\t\tif login in self.users_by_name:\n\t\t\tuser = self.users_by_name[login]\n\t\t\tif not user.system:\n\t\t\t\tself.users.pop(user.id, None)\n\t\t\t\tdel(self.users_by_name[login])\n\t\t\t\tself.sync()", "def remove_users(caller, role, *users):\r\n # can always remove self (at this layer)\r\n if not(len(users) == 1 and caller == users[0]):\r\n _check_caller_authority(caller, role)\r\n role.remove_users(*users)", "async def red_delete_data_for_user(self, *, requester, user_id):\n\t\tawait self.config.user_from_id(user_id).clear()", "def delete_user(self, user, instance_m):\n from resela.model.User import authenticate\n if user:\n mikrotik_m = MikrotikManager()\n lab_m = LabManager(current_user.session)\n group_m = GroupManager(current_user.session)\n user_m = UserManager(current_user.session)\n\n # Remove router conf\n mikrotik_m.unbind_vpn_to_vlan(user.email)\n mikrotik_m.delete_vpn_user(user.email)\n\n instance_list = instance_m.list(\n detailed=True,\n search_opts={'all_tenants': True, 'user_id': user.id}\n )\n\n for instance in instance_list:\n instance_name = instance.name.split('|')\n lab_name = instance_name[0] + '|' + instance_name[1]\n lab = lab_m.find(name=lab_name)\n instance_m.delete_instance(\n user_m=self,\n session=current_user.session,\n lab=lab,\n instance_id=instance.id\n )\n\n teacher_group = group_m.find(name='teachers')\n\n try:\n user_m.check_in_group(user=user, group=teacher_group)\n snapshot_factory = lab_m.find(\n name='snapshotFactory|{}'.format(user.email))\n\n session = authenticate(\n credentials=current_user.token,\n project_domain_name='snapshotFactory',\n project_name=snapshot_factory.name\n )\n\n security_handler = SecurityGroupHandler(session=session)\n\n for sec_group in security_handler.list()['security_groups']:\n if sec_group['tenant_id'] == snapshot_factory.id and \\\n 'internet' in sec_group['name']:\n security_handler.delete(sec_group['id'])\n\n lab_m.delete(snapshot_factory)\n\n except ksa_exceptions.NotFound:\n # Removing students will cause en exception as they are not found.\n # Does not need to be handled.\n pass\n\n # Remove user from db\n try:\n user_model = UserModel.query.get(user.id)\n DATABASE.session.delete(user_model)\n DATABASE.session.commit()\n except Exception:\n # Ignore user not in database\n pass\n\n # Remove user from openstack\n removed = self.delete(user)\n\n if not removed:\n print('User was not deleted:', user.id)\n raise Exception(' user not deleted')", "def delete_user(self, user):\n try:\n with dbm.open(self.dbm_path, 'c', 0o600) as db:\n del db[user.name]\n except KeyError as k:\n pass", "def _remove(users, room_name):\n global users_removed\n users_removed = []\n\n try:\n\n for word in users['message']['text'].split():\n\n if word == 'myself':\n user = users['message']['sender']['name']\n check_result = redis.srem(room_name, \"<\" + user + \">\")\n \n if check_result == 1:\n users_removed.append(\"<\" + user + \">\")\n else:\n users_removed.append('Not found ->> ' + \"<\" + user + \">\")\n\n check_continue = 1\n text = '```User removed: %s ```' % (','.join(users_removed))\n\n for _item in range(len(users['message']['text'].split())):\n\n _item = _item + 1\n\n try:\n _type = users['message']['annotations'][_item]['userMention']['user']['type']\n user = users['message']['annotations'][_item]['userMention']['user']['name']\n \n if _type == 'BOT':\n\n if check_continue == 1:\n continue\n else:\n text = 'Please add user with @'\n continue\n \n user = users['message']['annotations'][_item]['userMention']['user']['name']\n check_result = redis.srem(room_name, \"<\" + user + \">\")\n\n except:\n pass\n\n if check_result == 1:\n users_removed.append(\"<\" + user + \">\")\n else:\n users_removed.append(\"Not found ->> \" + \"<\" + user + \">\")\n text = \"```Removed users: %s ```\" % (','.join(list(set(users_removed))))\n return text\n except:\n\n text = 'Please add user with @'\n return text", "def remove_user(self, team, params={}, **options):\n path = \"/teams/%s/removeUser\" % (team)\n return self.client.post(path, params, **options)", "def remove_admin(self, project_id, user_id):\n current_user = request.environ.get('repoze.who.identity')['user']\n user = controller_globals._get_user_from_email(current_user.email)\n\n # make sure we're actually the project lead\n if not self._current_user_leads_review(project_id):\n return \"<font color='red'>tsk, tsk. you're not the project lead, %s.</font>\" % user.fullname\n\n leader_to_remove = Session.query(model.User).filter_by(id=user_id).one()\n review = self._get_review_from_id(project_id)\n review.leaders.remove(leader_to_remove)\n Session.add(review)\n Session.commit()\n\n redirect(url(controller=\"review\", action=\"admin\", project_id=project_id))", "def del_co_worker(self, employee):\n self.co_worker_list.remove(employee)", "def delete_user(self, user):\n self.execute(TABELLE['id_users'][\"delete\"], user[\"id\"])", "def unfollowing_and_removing(self, user_id):\n if self.unfollowing(user_id):\n ind = [i for i, j in enumerate(self.monitored_users) if j.get('user', '') == user_id]\n if ind:\n self.monitored_users.remove(self.monitored_users[ind[0]])", "async def trainer_rm(ctx, user: discord.User):\r\n \r\n trainer_data = load_file(file_path_trainer)\r\n trainer = user.id \r\n #await bot.say(trainer) tester to see if user ID -> string for trainer variable\r\n if trainer not in trainer_data[\"Trainers\"]:\r\n await bot.say(\"This trainer is not registered or has already been removed.\")\r\n \r\n else:\r\n remove_trainer(user)\r\n await bot.say(user.mention + \" has been removed.\")", "def mutate(self, info, user_id):\n del info\n assert self is None, \"Root `self` expected to be `None`!\"\n\n OnChatMessageSent.unsubscribe(group=f\"user_{user_id}\")\n\n return KickOutUser(success=True)", "def delete_user():", "def remove_slaves(self, *, user: str, identity_file: str):\n self.load_manifest(user=user, identity_file=identity_file)\n\n partial_func = functools.partial(\n remove_slaves_node,\n user=user,\n identity_file=identity_file,\n services=self.services,\n cluster=self)\n hosts = [self.master_ip] + self.slave_ips\n\n run_against_hosts(partial_func=partial_func, hosts=hosts)", "def test_resource_user_resource_remove_user_from_user_groups_delete(self):\n pass", "def remove_users(users_to_remove: list, users_dict: dict,\n end_of_service: str) -> None:\n for reciever in users_to_remove:\n if reciever in users_dict:\n send_message(reciever,\n 'Subscription expired\\n',\n end_of_service,\n users_dict[reciever]['carrier'])\n del users_dict[reciever]", "def remove_users(self, *users):\r\n entries = CourseAccessRole.objects.filter(\r\n user__in=users, role=self._role_name, org=self.org, course_id=self.course_key\r\n )\r\n entries.delete()\r\n for user in users:\r\n if hasattr(user, '_roles'):\r\n del user._roles", "async def _clear_heist(self, ctx, user: discord.Member):\r\n author = ctx.message.author\r\n await self.thief.member_clear(user)\r\n await ctx.send(\"```{} administratively cleared {}```\".format(author.name, user.name))", "def on_removeuser(self, username):\n self.users.remove(username)\n print ('%s left the room.' % username)", "async def del_user(conn: LDAPConnection, user: dict, mailman: Client) -> None:\n await conn.delete(user[\"dn\"])\n uid = user[\"attributes\"][\"uid\"][0]\n rmtree(user[\"attributes\"][\"homeDirectory\"][0])\n rmtree(f\"/webtree/{uid[:1]}/{uid}\")\n mailing_list = mailman.get_list(\"announce-redbrick\")\n mailing_list.unsubscribe(f\"{uid}@redbrick.dcu.ie\")", "def removeFriend(self, user):\n user = user if isinstance(user, MyPlexUser) else self.user(user)\n url = self.FRIENDUPDATE.format(userId=user.id)\n return self.query(url, self._session.delete)", "async def _kill_player(self, ctx: Context, *, user: discord.Member):\n\n guild = ctx.guild\n\n player_id = await self.config.guild(guild).player_id()\n player_role = discord.utils.get(guild.roles, id=player_id)\n\n if player_role not in user.roles:\n return await ctx.send(_(\"User doesn't have player role.\"))\n\n try:\n await user.remove_roles(player_role)\n except discord.Forbidden:\n return await ctx.send(\n _(\n \"I either don't have permissions to manage\"\n \" roles or the `{}` role is above my highest role!\"\n ).format(player_role.name)\n )\n\n dead_id = await self.config.guild(guild).dead_id()\n dead_role = discord.utils.get(guild.roles, id=dead_id)\n\n await user.add_roles(dead_role)\n\n await ctx.message.add_reaction(CHECK_MARK)", "def remove_user(self, username):\n del self.user_table[username]", "def cleanup_user(self, cleanup_request):\n user_name = cleanup_request.message.user_name\n self.logger.debug(\"Clean up after user %r\", user_name)\n\n self.logger.debug(\"Removing requests of user %r\", user_name)\n for request in self._requests[:]:\n if request.worker.name == user_name and not request.server_request:\n self._requests.remove(request)\n\n self.logger.debug(\"Releasing locked resources of user %r\", user_name)\n resources = ResourceData.objects.filter(owner=user_name)\n if resources.count() == 0:\n self.logger.debug(\"User %r didn't lock any resource\", user_name)\n\n else:\n resources.update(owner=\"\", owner_time=None)\n self.logger.debug(\"User %r was successfully cleaned\", user_name)\n\n return SuccessReply()", "def disconnect_user(self, user):\n\t\tis_user_removed = False\n\t\tif user in self.users.all():\n\t\t\tself.users.remove(user)\n\t\t\tself.save()\n\t\t\tis_user_removed = True\n\t\treturn is_user_removed", "def remove_user(self, workspace, params={}, **options):\n path = \"/workspaces/%s/removeUser\" % (workspace)\n return self.client.post(path, params, **options)", "def test_remove_user(self):\n pass", "def remove_users_from_team(team, users):\n team_member_list = []\n for user in users:\n member_list = TeamMember.objects.filter(team_fk=team, user_fk=user)\n if not member_list:\n raise Exception('Some users do not belong this team')\n team_member_list.append(member_list[0])\n \n if any([m.is_leader for m in team_member_list]):\n team.delete()\n else:\n for m in team_member_list:\n m.delete()", "async def remove_guest_user_with_all_its_resources(\n app: web.Application, user_id: int\n) -> None:\n logger.debug(\"Will try to remove resources for user '%s' if GUEST\", user_id)\n if not await is_user_guest(app, user_id):\n logger.debug(\"User is not GUEST, skipping cleanup\")\n return\n\n await remove_all_projects_for_user(app=app, user_id=user_id)\n await remove_user(app=app, user_id=user_id)", "def userPart(self, __userID):\n\n\t\tconnectedUsers = self.connectedUsers\n\t\tif (__userID in connectedUsers):\n\t\t\tconnectedUsers.remove(__userID)", "def delete_account(user):\n\n # first delete all owned categories and all the items in those\n # categories, including items that other users added to the category.\n for category in user.categories:\n for item in category.items:\n db.session.delete(item)\n db.session.delete(category)\n db.session.commit()\n\n # then delete all remaining owned items\n for item in user.items:\n db.session.delete(item)\n db.session.commit()\n\n # finally, delete the user\n db.session.delete(user)\n db.session.commit()", "async def ticket_remove(self, ctx, user: discord.Member):\n guild_settings = await self.config.guild(ctx.guild).all()\n is_admin = await is_admin_or_superior(self.bot, ctx.author) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in ctx.author.roles]\n )\n must_be_admin = not guild_settings[\"usercanmodify\"]\n\n if not is_admin and must_be_admin:\n await ctx.send(\"Only Administrators can add/remove other users to tickets.\")\n return\n elif not is_admin:\n author = ctx.author\n author_id = author.id\n elif is_admin:\n # Since the author isn't specified, and it's an admin, we need to guess on who\n # the author is\n inverted = {}\n for author_id, tickets in guild_settings[\"created\"].items():\n for ticket in tickets:\n inverted[ticket[\"channel\"]] = author_id\n try:\n author = ctx.guild.get_member(int(inverted[ctx.channel.id]))\n if author:\n author_id = author.id\n else:\n author_id = int(inverted[ctx.channel.id])\n except KeyError:\n author = ctx.author\n author_id = author.id\n\n index = None\n\n if not guild_settings[\"created\"][str(author_id)]:\n await ctx.send(\"You don't have any open tickets.\")\n return\n elif len(guild_settings[\"created\"][str(author_id)]) == 1:\n index = 0\n else:\n for i, ticket in enumerate(guild_settings[\"created\"][str(author_id)]):\n if ticket[\"channel\"] == ctx.channel.id:\n index = i\n break\n\n if index is None:\n await ctx.send(\n \"You have multiple tickets open. \"\n \"Please run this command in the ticket channel you wish to edit.\"\n )\n return\n\n if user.id not in guild_settings[\"created\"][str(author_id)][index][\"added\"]:\n await ctx.send(\"That user is not added.\")\n return\n\n removing_is_admin = await is_admin_or_superior(self.bot, user) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in user.roles]\n )\n\n if removing_is_admin:\n await ctx.send(\"You cannot remove a user in support or admin team.\")\n return\n\n channel = self.bot.get_channel(guild_settings[\"created\"][str(author_id)][index][\"channel\"])\n if not channel:\n await ctx.send(\"The ticket channel has been deleted.\")\n\n try:\n await channel.set_permissions(user, send_messages=False, read_messages=False)\n except discord.Forbidden:\n await ctx.send(\n \"The Manage Permissions channel for me has been removed. \"\n \"I am unable to modify this ticket.\"\n )\n return\n\n async with self.config.guild(ctx.guild).created() as created:\n created[str(author_id)][index][\"added\"].remove(user.id)\n\n await ctx.send(f\"{user.mention} has been removed from the ticket.\")", "def delete_user(self, user):\n name = utils.get_name(user)\n self._user_manager.delete(name)", "def remove_registrar(contest, user):\n _remove_role(contest, user, pcm.Registrar)", "def remove_user_from_project(tas_project, user_ref):\n keycloak_client = KeycloakClient()\n user = get_user(user_ref)\n keycloak_client.update_membership(tas_project.chargeCode, user.username, \"delete\")\n\n return True", "def del_user(self, username):\n pass", "def delete_user():\n #TODO user delete\n pass", "async def admin_remove(self, ctx: MyContext, wormhole: str, user: discord.User):\n if not self.check_wh_exists(wormhole):\n await ctx.send(\n await self.bot._(\n ctx.guild.id, \"wormhole.error.not-exists\", name=wormhole\n )\n )\n return\n if not self.check_is_admin(wormhole, ctx.author.id):\n await ctx.send(await self.bot._(ctx.guild.id, \"wormhole.error.not-admin\"))\n return\n query = \"SELECT 1 FROM wormhole_admin WHERE name = ? AND admin = ?\"\n isAlready = len(self.bot.db_query(query, (wormhole, user.id))) > 0\n if isAlready:\n query = \"DELETE FROM wormhole_admin WHERE admin = ? AND name = ?\"\n self.bot.db_query(query, (user.id, wormhole))\n await ctx.send(\n await self.bot._(ctx.guild.id, \"wormhole.success.admin-removed\")\n )\n else:\n await ctx.send(\n await self.bot._(\n ctx.guild.id, \"wormhole.error.not-admin\", user=user.name\n )\n )", "async def remove_blacklist(self, ctx, user: discord.Member):\r\n if user.id not in self.settings['blacklist']:\r\n await ctx.send(\"User is not blacklisted.\")\r\n else:\r\n self.settings['blacklist'].remove(user.id)\r\n await ctx.send(\"User removed from blacklist.\")", "def stop(self, user):\n self.logger.info(\"Stopping {}\".format(user))\n return self.director.stop(user)", "def userdel(pwfile, user):\n return __salt__[\"webutil.userdel\"](pwfile, user)", "def action_remove_from_group(self, kwargs):\n user = kwargs[\"user\"]\n group = kwargs[\"group\"]\n\n if self.engine.remove_user_from_group(user, group):\n info(f\"User {user} sucessfully removed from {group}\")\n else:\n error(f\"Unable to remove {user} from {group}, check privileges or dn\")", "def delete_user_collection(another_user_id, user_id):\n\n db_session.query(Collection_User).filter(and_(Collection_User.user_id ==\n user_id, Collection_User.another_user_id == another_user_id)).delete()\n db_session.commit()\n update_collection_num(user_id, another_user_id, False)", "async def unplonk(ctx, user: typing.Union[discord.Member, discord.User]):\n await bot.plonk.delete(user.id)\n await r(ctx, f'Unplonked **{user}**')", "def delete_user():\n del globalopts.appdata[request.user]\n del globalopts.users[request.user]\n return \"\", 200", "def remove_member(self, id, user):\n request = self.request_builder('orgs.teams.remove_member',\n id=id, user=user)\n return self._delete(request)", "def _user_delete(sender, instance, using, **kwargs):\n Booking.objects.filter(requester=instance).update(\n requester=get_sentinel_user(instance.group)\n )", "def delete_user_by_id(user_id):\n return woo_request_helper().delete_details(wc_endpoint='customers/{}'.format(user_id))", "def clean(self):\n super().clean()\n if self.user2:\n self.orig_cloud.delete_user(self.user2.id)", "def del_user(self, name):\n del self.users[irc.strings.IRCFoldedCase(modules.trim_nick(name))]", "def remove_user(cloud_list, user_id, adminu, adminpw):\n url_success = ['Success', 'success']\n for cloud in cloud_list:\n try:\n resp = urllib2.urlopen('%s/services/users/%s?operation=delete&user=%s&password=%s' %\n (cloud, user_id, adminu, adminpw))\n contents = resp.read()\n except urllib2.HTTPError, error:\n contents = error.read()\n except urllib2.URLError:\n contents = 'failed'\n output(contents, cloud, user_id, url_success, '')", "def remove_user(self, username): # remove only users from json file\n return self._user(username=username, remove=True)", "def kill_user_processes(user):\n for pid in get_user_processes(user):\n kill(pid)", "async def removerole(self, ctx, rolename, user: discord.Member=None):\n server = ctx.message.server\n author = ctx.message.author\n\n role = self._role_from_string(server, rolename)\n if role is None:\n await self.bot.say(\"Role not found.\")\n return\n\n if user is None:\n user = author\n\n if role in user.roles:\n try:\n await self.bot.remove_roles(user, role)\n await self.bot.say(\"Role successfully removed.\")\n except discord.Forbidden:\n await self.bot.say(\"I don't have permissions to manage roles!\")\n else:\n await self.bot.say(\"User does not have that role.\")", "def wipe_user(user_name):\n user_name = urllib.unquote(user_name) # Username is coming straight from the url bar.\n user = User.query.filter(User.user_name==user_name).first()\n delete_user(user)", "def delusers(self, args):\n\n if len(args) < 2:\n print(self.addusers.__doc__)\n return\n\n gname = args[0]\n users = args[1:]\n\n g = sr.group(gname)\n\n if not g.in_db:\n print(\"Group '%s' not found.\" % ( gname ))\n return\n\n not_members = g.user_rm( users )\n g.save()\n\n for uname in not_members:\n print(\"Unable to remove non-member '%s' from '%s'\" % ( gname, uname ))", "def delete_users(project):\n for user_id in project.user_id.all():\n project.user_id.remove(user_id.pk)\n project.save()", "def removeOnUserCreate(call, args=(), kwargs={}, nodeClass='*'):\n pass" ]
[ "0.6894938", "0.68542784", "0.685336", "0.67998946", "0.6638343", "0.6393456", "0.63634795", "0.6293484", "0.6282741", "0.6280759", "0.6247314", "0.62332475", "0.6231962", "0.6227879", "0.61912465", "0.6178004", "0.6158775", "0.6148308", "0.6134449", "0.6126714", "0.6120114", "0.60983396", "0.6096013", "0.606499", "0.5955821", "0.5939528", "0.5925723", "0.5921348", "0.5916311", "0.59012145", "0.58971006", "0.58971006", "0.58971006", "0.58745545", "0.5860761", "0.58505446", "0.5838628", "0.5816247", "0.58034956", "0.5793691", "0.5789163", "0.5784681", "0.5779461", "0.5777171", "0.57746196", "0.5770486", "0.57687575", "0.5762873", "0.57434314", "0.5737735", "0.57268333", "0.57217485", "0.57199764", "0.5700944", "0.5692091", "0.56885487", "0.5688173", "0.56859475", "0.5681384", "0.56790423", "0.5678291", "0.5676155", "0.5672018", "0.566497", "0.5657416", "0.56469357", "0.5640149", "0.56293446", "0.561604", "0.5612923", "0.559429", "0.55890685", "0.5577992", "0.55743104", "0.55696183", "0.5563441", "0.5550521", "0.55462307", "0.55400854", "0.5536567", "0.55279297", "0.55246943", "0.5512064", "0.5499752", "0.5493575", "0.54835063", "0.5477116", "0.54745257", "0.5466946", "0.5466898", "0.5458567", "0.5428476", "0.5421473", "0.5419582", "0.5419546", "0.5412158", "0.5400838", "0.5396411", "0.53901035", "0.53863776" ]
0.7817377
0
Unfreeze a FrozenDict. Makes a mutable copy of a `FrozenDict` mutable by transforming it into (nested) dict.
def unfreeze(x: Union[FrozenDict, Dict[str, Any]]) -> Dict[Any, Any]: if isinstance(x, FrozenDict): # deep copy internal state of a FrozenDict # the dict branch would also work here but # it is much less performant because jax.tree_util.tree_map # uses an optimized C implementation. return jax.tree_util.tree_map(lambda y: y, x._dict) # type: ignore elif isinstance(x, dict): ys = {} for key, value in x.items(): ys[key] = unfreeze(value) return ys else: return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _prepare_freeze(xs: Any) -> Any:\n if isinstance(xs, FrozenDict):\n # we can safely ref share the internal state of a FrozenDict\n # because it is immutable.\n return xs._dict # pylint: disable=protected-access\n if not isinstance(xs, dict):\n # return a leaf as is.\n return xs\n # recursively copy dictionary to avoid ref sharing\n return {key: _prepare_freeze(val) for key, val in xs.items()}", "def freeze(xs: Mapping[Any, Any]) -> FrozenDict[Any, Any]:\n return FrozenDict(xs)", "def unfreeze(cls, frozen):\n timeframe = frozen.get('timeframe')\n finished_threshold = frozen.get('finished_threshold')\n if finished_threshold is not None:\n finished_threshold = timedelta(seconds=finished_threshold)\n # if there's no data or cache config an error has occurred\n data_config = frozen['data']\n cache_config = frozen['cache']\n return cls(data_config,\n cache_config,\n timeframe,\n finished_threshold)", "def unfreeze(obj, ignore_types=[]):\n if obj is None:\n return obj\n\n to_process = [obj]\n while len(to_process) > 0:\n _obj = to_process.pop()\n\n for attr in dir(_obj):\n if attr.startswith(\"__\"):\n continue\n value = getattr(_obj, attr)\n if isinstance(value, FrozenDict):\n value = {k: v for k, v in value.items()}\n to_process.extend(value.values())\n elif isinstance(value, FrozenList):\n value = [x for x in value]\n to_process.extend(value)\n elif not callable(value) and not isinstance(value, tuple(ignore_types)):\n to_process.append(value)\n\n try:\n setattr(_obj, attr, value)\n except BaseException:\n pass\n\n return obj", "def _freeze_mapping(d):\r\n d = dict(d)\r\n for k in d.keys():\r\n if hasattr(d[k], '__getitem__') and hasattr(d[k], 'keys'):\r\n d[k] = _freeze_mapping(d[k])\r\n d = types.MappingProxyType(d)\r\n return d", "def pop(\n x: Union[FrozenDict, Dict[str, Any]], key: str\n) -> Tuple[Union[FrozenDict, Dict[str, Any]], Any]:\n\n if isinstance(x, FrozenDict):\n return x.pop(key)\n elif isinstance(x, dict):\n new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x\n value = new_dict.pop(key)\n return new_dict, value\n raise TypeError(f'Expected FrozenDict or dict, got {type(x)}')", "def flat_dict(d):\n nd = {}\n for (key, value) in d.items():\n nd[key] = value.pop()\n\n return nd", "def copy(\n x: Union[FrozenDict, Dict[str, Any]],\n add_or_replace: Union[FrozenDict[str, Any], Dict[str, Any]] = FrozenDict(\n {}\n ),\n) -> Union[FrozenDict, Dict[str, Any]]:\n\n if isinstance(x, FrozenDict):\n return x.copy(add_or_replace)\n elif isinstance(x, dict):\n new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x\n new_dict.update(add_or_replace)\n return new_dict\n raise TypeError(f'Expected FrozenDict or dict, got {type(x)}')", "def dict2frozenset(d):\n return frozenset(d.items())", "def freeze(value):\n if isinstance(value, list):\n value = tuple(freeze(e) for e in value)\n elif isinstance(value, set):\n value = frozenset(freeze(e) for e in value)\n elif isinstance(value, dict):\n value = frozendict({k: freeze(v) for k, v in value.items()})\n elif isinstance(value, tuple):\n value = tuple(freeze(e) for e in value)\n else:\n pass\n return value", "def fl_unfreeze_form(ptr_flform):\n _fl_unfreeze_form = library.cfuncproto(\n library.load_so_libforms(), \"fl_unfreeze_form\", \\\n None, [cty.POINTER(xfdata.FL_FORM)], \\\n \"\"\"void fl_unfreeze_form(FL_FORM * form) \"\"\")\n library.check_if_flinitialized()\n library.verify_flformptr_type(ptr_flform)\n library.keep_elem_refs(ptr_flform)\n _fl_unfreeze_form(ptr_flform)", "def unflatten_dict(flat):\n unflattened = dict()\n\n for key, value in sorted(flat.items(), key=_key_order):\n if '__' in key:\n key, subkey = key.split('__', 1)\n subkey, name = subkey.rsplit('__', 1)\n\n if name.isdigit():\n column_index = int(name)\n row_index = int(subkey)\n\n array = unflattened.setdefault(key, list())\n\n if len(array) == row_index:\n row = list()\n array.append(row)\n elif len(array) == row_index + 1:\n row = array[row_index]\n else:\n # This should never happen\n raise ValueError('There was an error unflattening the extension.')\n\n if len(row) == column_index:\n row.append(value)\n else:\n # This should never happen\n raise ValueError('There was an error unflattening the extension.')\n\n else:\n subdict = unflattened.setdefault(key, dict())\n if subkey.isdigit():\n subkey = int(subkey)\n\n inner = subdict.setdefault(subkey, dict())\n inner[name] = value\n\n else:\n unflattened[key] = value\n\n return unflattened", "def _unparse_dict(d, strategies=None):\n\n def _unparse_val(val):\n for instance_type, func in strategies:\n if isinstance(val, instance_type):\n return func(val)\n else:\n return val\n\n strategies = strategies or []\n out = dict()\n for k, v in d.items():\n if isinstance(v, dict):\n v = _unparse_dict(v, strategies=strategies)\n elif isinstance(v, list):\n v = [_unparse_val(val) for val in v]\n elif isinstance(v, tuple):\n v = tuple(_unparse_val(val) for val in v)\n else:\n v = _unparse_val(v)\n out[k] = v\n return out", "def invert_dict(d):\r\n if isinstance(d, dict):\r\n temp = d\r\n else:\r\n temp = dict(d)\r\n result = {}\r\n for key, val in temp.iteritems():\r\n if val not in result:\r\n result[val] = []\r\n result[val].append(key)\r\n return result", "def clean_dict(d):\n if not isinstance(d, dict):\n return d\n return dict((clean_dict(k), v) for k, v in d.items() if k is not 'dates')", "def CleanUpDict(dct):\n SanityCheck.ValidateTypes(((dct, dict),))\n\n new_dct = {}\n for key in dct:\n if dct[key]:\n new_dct[key] = dct[key]\n\n return new_dct", "def fromkeys(iterable, value=None):\n return FrozenDict(dict.fromkeys(iterable, value))", "def deep_update(d, u):\n for k, v in six.iteritems(u):\n dv = d.get(k, {})\n if not isinstance(dv, collections.abc.Mapping):\n d[k] = v\n elif isinstance(v, collections.abc.Mapping):\n d[k] = deep_update(dv, v)\n else:\n d[k] = v\n return d", "def revert_dictionary(dictionary):\n return {v: k for k, v in dictionary.items()}", "def pop(self, key: K) -> Tuple['FrozenDict[K, V]', V]:\n value = self[key]\n new_dict = dict(self._dict)\n new_dict.pop(key)\n new_self = type(self)(new_dict)\n return new_self, value", "def fl_unfreeze_all_forms():\n _fl_unfreeze_all_forms = library.cfuncproto(\n library.load_so_libforms(), \"fl_unfreeze_all_forms\", \\\n None, [], \\\n \"\"\"void fl_unfreeze_all_forms() \"\"\")\n library.check_if_flinitialized()\n _fl_unfreeze_all_forms()", "def task_2_remove_dict_fields(data: DT, redundant_keys: List[str]) -> DT:\n dict2 = copy.deepcopy(data)\n for item in dict2:\n for key in redundant_keys:\n item.pop(key)\n return dict2", "def shallow(dict, exclude=None):\n\tnew_dict = {}\n\tfor key in dict:\n\t\tif not exclude or key not in exclude:\n\t\t\tnew_dict[key] = dict[key]\n\treturn new_dict", "def unflatten(\n d: Dict[str, Any],\n base: Dict[str, Any] = None,\n) -> Dict[str, Any]:\n if base is None:\n base = {}\n\n for key, value in d.items():\n root = base\n\n ###\n # If a dotted path is encountered, create nested dicts for all but\n # the last level, then change root to that last level, and key to\n # the final key in the path. This allows one final setitem at the bottom\n # of the loop.\n if '.' in key:\n *parts, key = key.split('.')\n\n for part in parts:\n root.setdefault(part, {})\n root = root[part]\n\n if isinstance(value, dict):\n value = unflatten(value, root.get(key, {}))\n\n root[key] = value\n\n return base", "def rconvert(self):\n for k in self:\n if isinstance(self[k], dict):\n if not isinstance(self[k], AttrDict):\n self[k] = AttrDict(self[k])\n self[k].rconvert()\n return self", "def InvertDict(dict_in):\n return dict(zip(dict_in.values(), dict_in.keys()))", "def detachDict(dict, key1, key2):\n\n for key in dict.keys():\n if key == key1 or key == key2:\n del dict[key]\n else:\n for subkey in dict[key].keys():\n if subkey == key1 or subkey == key2:\n del dict[key][subkey]", "def _rev_dict(d):\n return {v: k for k, v in d.items()}", "def deepcopy(obj):\n if isinstance(obj, dict):\n return {deepcopy(key): deepcopy(value) for key, value in obj.items()}\n if hasattr(obj, '__iter__'):\n return type(obj)(deepcopy(item) for item in obj)\n return obj", "def clean_dict(to_clean):\n for k in list(to_clean.keys()):\n if not to_clean.get(k):\n to_clean.pop(k)", "def clear_dict(d: dict) -> dict:\n # TODO delete if not used\n return {k: v for k, v in d.items() if v is not None}", "def invert_dict(d):\n inv_d = {}\n for k, v in d.items():\n inv_d[v] = inv_d.get(v, [])\n inv_d[v].append(k)\n return inv_d", "def update(d, u):\n\n for k, v in u.items():\n if isinstance(v, collections.Mapping):\n d[k] = update(d.get(k, {}), v)\n else:\n d[k] = v\n return d", "def clean_dict(d):\n\n if not isinstance(d, (dict, list)):\n return d\n if isinstance(d, list):\n return [v for v in (clean_dict(v) for v in d) if v]\n return OrderedDict([(k, v) for k, v in ((k, clean_dict(v)) for k, v in list(d.items())) if v])", "def update(d, u):\n for k, v in u.items():\n if isinstance(v, collections.Mapping):\n d[k] = update(d.get(k, {}), v)\n else:\n d[k] = v\n return d", "def invert_dict(d):\n newd={}\n for k in d:\n newd[d[k]]=k\n return newd", "def compact_dict(source_dict):\n return {k: v for k, v in source_dict.items() if v is not None}", "def _flatten_dict(x: Dict) ->Dict:\n new_dict = {}\n for key, value in x.items():\n if isinstance(value, dict):\n for k, v in value.items():\n new_dict[k] = v\n else:\n new_dict[key] = value\n return new_dict", "def deepish_copy(org):\n out = dict().fromkeys(org)\n for k,v in org.items():\n try:\n out[k] = v.copy() # dicts, sets\n except AttributeError:\n try:\n out[k] = v[:] # lists, tuples, strings, unicode\n except TypeError:\n out[k] = v # ints\n \n return out", "def _update_loose (self, dict):\n self.__dict__.update(dict)", "def copy(\n self, add_or_replace: Mapping[K, V] = MappingProxyType({})\n ) -> 'FrozenDict[K, V]':\n return type(self)({**self, **unfreeze(add_or_replace)}) # type: ignore[arg-type]", "def unflatten(dictionary, sep=\".\"):\n unflattened_dictionary = {}\n for key, value in dictionary.items():\n parts = key.split(sep)\n sub_dictionary = unflattened_dictionary\n for part in parts[:-1]:\n if part not in sub_dictionary:\n sub_dictionary[part] = {}\n sub_dictionary = sub_dictionary[part]\n sub_dictionary[parts[-1]] = value\n return unflattened_dictionary", "def _attrdict_copy(d):\n if isinstance(d, collections.Iterable):\n return AttrDict(dict(d))\n else:\n return AttrDict(vars(d))", "def unflatten(arg):\n if hasattr(arg, \"iteritems\"):\n items = arg.iteritems()\n elif hasattr(arg, \"items\"):\n items = arg.items()\n else:\n items = arg\n\n data = {}\n holders = []\n for flat_key, val in items:\n parsed_key = _parse_key(flat_key)\n obj = data\n for depth, (key, next_key) in enumerate(zip(parsed_key, parsed_key[1:]), 1):\n if isinstance(next_key, string_type):\n holder_type = _dict_holder\n else:\n holder_type = _list_holder\n\n if key not in obj:\n obj[key] = holder_type(_unparse_key(parsed_key[:depth]))\n holders.append((obj, key))\n elif not isinstance(obj[key], holder_type):\n raise ValueError(\n \"conflicting types %s and %s for key %r\"\n % (\n _node_type(obj[key]),\n holder_type.node_type,\n _unparse_key(parsed_key[:depth]),\n )\n )\n obj = obj[key]\n\n last_key = parsed_key[-1]\n if isinstance(obj.get(last_key), _holder):\n raise ValueError(\n \"conflicting types %s and terminal for key %r\"\n % (_node_type(obj[last_key]), flat_key)\n )\n obj[last_key] = val\n\n for obj, key in reversed(holders):\n obj[key] = obj[key].getvalue()\n\n return data", "def flatten_dict(d, separator=':', _parent_key=''):\n items = []\n for k, v in d.items():\n new_key = _parent_key + separator + k if _parent_key else k\n if isinstance(v, collections.MutableMapping):\n items.extend(flatten_dict(v, separator=separator, _parent_key=new_key).items())\n else:\n items.append((new_key, v))\n return dict(items)", "def invert_dict(invertible_dict):\n inverted = {}\n for k, v in six.iteritems(invertible_dict):\n if not isinstance(v, Hashable):\n raise TypeError(u'Expected an invertible dict, but value at key {} has type {}'.format(\n k, type(v).__name__))\n if v in inverted:\n raise TypeError(u'Expected an invertible dict, but keys '\n u'{} and {} map to the same value'.format(\n inverted[v], k))\n inverted[v] = k\n return inverted", "def recursive_squeeze(dictlike):\n out = {}\n for k, v in dictlike.items():\n if isinstance(v, dict):\n out[k] = recursive_squeeze(v)\n else:\n out[k] = np.squeeze(v)\n return out", "def invert_dict(d):\n inverse = dict()\n for key in d:\n val = d[key]\n # If val is in inverse, setdefault(val,[]) will just return\n # inverse[val], so this is like saying inverse[val].append(key).\n # If val is *not* in inverse, setdefault will create the key-value\n # pair {val: []}, then return inverse[val] (which is now []).\n # Then we call append(key) on this new inverse[val], which will yield\n # inverse[val]=[key]\n inverse.setdefault(val,[]).append(key)\n return inverse", "def recursive_mapping_update(d, u):\n if u is not None:\n for k, v in u.items():\n if isinstance(v, collections.Mapping):\n r = recursive_mapping_update(d.get(k, {}), v)\n d[k] = r\n else:\n d[k] = u[k]\n return d", "def unfrozen(self):\n archive_name = self.get_archive_name()\n\n # Decompilation for all .pyc files (inside of archive or binary)\n for pyc_file in self.unpack_archive(archive_name):\n self.decompilation(pyc_file)\n\n os.chdir(self.current_path)\n\n print(\"\\nWork is done.\")", "def _app_cache_deepcopy(obj):\n if isinstance(obj, dict):\n return dict((_app_cache_deepcopy(key), _app_cache_deepcopy(val))\n for key, val in obj.items())\n elif isinstance(obj, list):\n return list(_app_cache_deepcopy(val) for val in obj)\n elif isinstance(obj, SortedDict):\n return deepcopy(obj)\n return obj", "def flatten(dictionary, sep=\".\"):\n\n def _flatten(dictionary):\n if dictionary == {}:\n return dictionary\n\n key, value = dictionary.popitem()\n if not isinstance(value, dict) or not value:\n new_dictionary = {key: value}\n new_dictionary.update(flatten(dictionary, sep=sep))\n return new_dictionary\n\n flat_sub_dictionary = flatten(value, sep=sep)\n for flat_sub_key in list(flat_sub_dictionary.keys()):\n flat_key = key + sep + flat_sub_key\n flat_sub_dictionary[flat_key] = flat_sub_dictionary.pop(flat_sub_key)\n\n new_dictionary = flat_sub_dictionary\n new_dictionary.update(flatten(dictionary, sep=sep))\n return new_dictionary\n\n return _flatten(copy.deepcopy(dictionary))", "def _dictRoundTripNormalize(self, treedict):\n for key, value in list(treedict.items()):\n if isinstance(value, dict):\n self._dictRoundTripNormalize(value)\n\n # Expand treedict[(\"group\", \"attr_name\")]\n # to treedict[\"group\"][\"attr_name\"]\n for key, value in list(treedict.items()):\n if not isinstance(key, tuple):\n continue\n # Put the attribute inside the group\n grpname, attr = key\n if not grpname:\n continue\n group = treedict.setdefault(grpname, dict())\n if isinstance(group, dict):\n del treedict[key]\n group[(\"\", attr)] = value", "def dictflip(dictionary):\n\n return {v: k for k, v in dictionary.items()}", "def del_dict_attrs(d, key):\n key_parts = key.split('.')\n if len(key_parts) > 1:\n d[key_parts[:1][0]] = del_dict_attrs(d[key_parts[:1][0]], '.'.join(key_parts[1:]))\n else:\n del d[key_parts[:1][0]]\n return d", "def remove_fc(state_dict):\n return {key: value for key, value in state_dict.items() if not key.startswith('fc.')}", "def dict_pop(d, key):\n return d.pop(key)", "def recursive_drop_falsy(d):\r\n if isinstance(d, dict):\r\n return dict((k, recursive_drop_falsy(v)) for k, v in d.items() if v)\r\n elif isinstance(d, list):\r\n return map(recursive_drop_falsy, d)\r\n elif isinstance(d, basestring):\r\n return force_bytes(d)\r\n else:\r\n return d", "def invertDictionary(input_dict):\n inverse_dict = {v: k for k, v in input_dict.items()}\n\n return inverse_dict", "def truncate_dict(dictionary: Dict, n: int) -> Dict:\n return {k: v for (k, v) in list(dictionary.items())[:n]}", "def dictkeyclean(d):\r\n new_d = {}\r\n for k, v in d.iteritems():\r\n new_d[str(k)] = v\r\n return new_d", "def flatten_dict(d):\n def items():\n for key, value in d.items():\n if isinstance(value, dict):\n for subkey, subvalue in flatten_dict(value).items():\n yield subkey, subvalue\n else:\n yield key, value\n\n return dict(items())", "def recursive_update_cfg(d, u):\n for k, v in u.iteritems():\n if isinstance(v, collections.Mapping):\n r = update(d.get(k, {}), v)\n d[k] = r\n else:\n d[k] = u[k]\n return d", "def flatten_dict(d):\n\n def expand(key, value):\n if isinstance(value, dict):\n return [(key + '.' + k, v) for k, v in flatten_dict(value).items()]\n else:\n return [(key, value)]\n\n items = [item for k, v in d.items() for item in expand(k, v)]\n return dict(items)", "def unfreeze(name, path=None, use_vt=None):\n _ensure_exists(name, path=path)\n if state(name, path=path) == \"stopped\":\n raise CommandExecutionError(f\"Container '{name}' is stopped\")\n cmd = \"lxc-unfreeze\"\n if path:\n cmd += f\" -P {shlex.quote(path)}\"\n return _change_state(cmd, name, \"running\", path=path, use_vt=use_vt)", "def _filter_dict(src_dict, key_set):\n for k in set(src_dict.keys()) - key_set:\n src_dict.pop(k)", "def update_dict(d, u, omit_new=False):\n\n for k, v in u.items():\n if k not in d and omit_new:\n continue\n\n if isinstance(v, collections.abc.Mapping):\n d[k] = update_dict(d.get(k, {}), v, omit_new)\n elif isinstance(v, list):\n d[k] = [update_dict(i, j, omit_new) if None not in (i, j) else\n i if j is None else j\n for (i, j) in itertools.zip_longest(d.get(k, []), v)]\n else:\n d[k] = v\n return d", "def update(d, u):\n for k, v in u.items():\n if isinstance(v, Mapping):\n d[k] = update(d.get(k, {}), v)\n else:\n d[k] = v\n return d", "def remove_keys(d, keys):\n pp = deepcopy(d)\n if isinstance(keys, (list, tuple)):\n for k in keys:\n pp.pop(k, None)\n else:\n pp.pop(keys, None)\n return pp", "def dict_normalization(dict_, nested=False):\n dict_norm = dict()\n if not nested:\n if dict_.values():\n d_max = max(dict_.values())\n d_min = min(dict_.values())\n if d_max - d_min == 0:\n dict_norm = {key: 1 for key in dict_}\n else:\n dict_norm = {key: (dict_[key] - d_min) / (d_max - d_min) for key in dict_}\n else:\n for key_1 in dict_:\n if dict_[key_1]:\n dict_norm[key_1] = dict()\n else: continue\n d_max = max(dict_[key_1].values())\n d_min = min(dict_[key_1].values())\n for key_2 in dict_[key_1]:\n if d_max - d_min == 0:\n dict_norm[key_1][key_2] = 1 / len(dict_[key_1])\n else:\n dict_norm[key_1][key_2] = (dict_[key_1][key_2] - d_min) / (d_max - d_min)\n return dict_norm", "def to_dict(self):\n return {k: v.to_dict() if isinstance(v, AttrDict) else v\n for k, v in self.__dict__.items() if not k.startswith('_')}", "def make_dict_unstructure_fn(\n cl: type[T],\n converter: BaseConverter,\n _cattrs_use_linecache: bool = True,\n **kwargs: AttributeOverride,\n) -> Callable[[T], dict[str, Any]]:\n origin = get_origin(cl)\n attrs = _adapted_fields(origin or cl) # type: ignore\n req_keys = _required_keys(origin or cl)\n\n mapping = {}\n if is_generic(cl):\n mapping = generate_mapping(cl, mapping)\n\n for base in getattr(origin, \"__orig_bases__\", ()):\n if is_generic(base) and not str(base).startswith(\"typing.Generic\"):\n mapping = generate_mapping(base, mapping)\n break\n\n # It's possible for origin to be None if this is a subclass\n # of a generic class.\n if origin is not None:\n cl = origin\n\n cl_name = cl.__name__\n fn_name = \"unstructure_typeddict_\" + cl_name\n globs = {}\n lines = []\n internal_arg_parts = {}\n\n # We keep track of what we're generating to help with recursive\n # class graphs.\n try:\n working_set = already_generating.working_set\n except AttributeError:\n working_set = set()\n already_generating.working_set = working_set\n if cl in working_set:\n raise RecursionError()\n working_set.add(cl)\n\n try:\n # We want to short-circuit in certain cases and return the identity\n # function.\n # We short-circuit if all of these are true:\n # * no attributes have been overridden\n # * all attributes resolve to `converter._unstructure_identity`\n for a in attrs:\n attr_name = a.name\n override = kwargs.get(attr_name, neutral)\n if override != neutral:\n break\n handler = None\n t = a.type\n nrb = get_notrequired_base(t)\n if nrb is not NOTHING:\n t = nrb\n\n if isinstance(t, TypeVar):\n if t.__name__ in mapping:\n t = mapping[t.__name__]\n else:\n handler = converter.unstructure\n elif is_generic(t) and not is_bare(t) and not is_annotated(t):\n t = deep_copy_with(t, mapping)\n\n if handler is None:\n try:\n handler = converter._unstructure_func.dispatch(t)\n except RecursionError:\n # There's a circular reference somewhere down the line\n handler = converter.unstructure\n is_identity = handler == converter._unstructure_identity\n if not is_identity:\n break\n else:\n # We've not broken the loop.\n return converter._unstructure_identity\n\n for ix, a in enumerate(attrs):\n attr_name = a.name\n override = kwargs.get(attr_name, neutral)\n if override.omit:\n lines.append(f\" res.pop('{attr_name}', None)\")\n continue\n if override.rename is not None:\n # We also need to pop when renaming, since we're copying\n # the original.\n lines.append(f\" res.pop('{attr_name}', None)\")\n kn = attr_name if override.rename is None else override.rename\n attr_required = attr_name in req_keys\n\n # For each attribute, we try resolving the type here and now.\n # If a type is manually overwritten, this function should be\n # regenerated.\n handler = None\n if override.unstruct_hook is not None:\n handler = override.unstruct_hook\n else:\n t = a.type\n nrb = get_notrequired_base(t)\n if nrb is not NOTHING:\n t = nrb\n\n if isinstance(t, TypeVar):\n if t.__name__ in mapping:\n t = mapping[t.__name__]\n else:\n handler = converter.unstructure\n elif is_generic(t) and not is_bare(t) and not is_annotated(t):\n t = deep_copy_with(t, mapping)\n\n if handler is None:\n try:\n handler = converter._unstructure_func.dispatch(t)\n except RecursionError:\n # There's a circular reference somewhere down the line\n handler = converter.unstructure\n\n is_identity = handler == converter._unstructure_identity\n\n if not is_identity:\n unstruct_handler_name = f\"__c_unstr_{ix}\"\n globs[unstruct_handler_name] = handler\n internal_arg_parts[unstruct_handler_name] = handler\n invoke = f\"{unstruct_handler_name}(instance['{attr_name}'])\"\n elif override.rename is None:\n # We're not doing anything to this attribute, so\n # it'll already be present in the input dict.\n continue\n else:\n # Probably renamed, we just fetch it.\n invoke = f\"instance['{attr_name}']\"\n\n if attr_required:\n # No default or no override.\n lines.append(f\" res['{kn}'] = {invoke}\")\n else:\n lines.append(f\" if '{kn}' in instance: res['{kn}'] = {invoke}\")\n\n internal_arg_line = \", \".join([f\"{i}={i}\" for i in internal_arg_parts])\n if internal_arg_line:\n internal_arg_line = f\", {internal_arg_line}\"\n for k, v in internal_arg_parts.items():\n globs[k] = v\n\n total_lines = [\n f\"def {fn_name}(instance{internal_arg_line}):\",\n \" res = instance.copy()\",\n *lines,\n \" return res\",\n ]\n script = \"\\n\".join(total_lines)\n\n fname = generate_unique_filename(\n cl, \"unstructure\", reserve=_cattrs_use_linecache\n )\n\n eval(compile(script, fname, \"exec\"), globs)\n\n fn = globs[fn_name]\n if _cattrs_use_linecache:\n linecache.cache[fname] = len(script), None, total_lines, fname\n finally:\n working_set.remove(cl)\n if not working_set:\n del already_generating.working_set\n\n return fn", "def deepupdate(original, update):\n for key, value in original.iteritems():\n if not key in update:\n update[key] = value\n elif isinstance(value, dict):\n deepupdate(value, update[key])\n return update", "def inverse(dict_):\n idict = dict([(value,key) for key,value in dict_.iteritems()])\n if len(idict)!=len(dict_):\n raise ValueError(\"Dictionary has no inverse (values not unique).\")\n return idict", "def swapdict(d):\n x = {}\n for k, v in d.iteritems():\n x[v] = k\n return x", "def to_dict(self):\n return {\n k: v.to_dict() if isinstance(v, AttrDict) else v\n for k, v in self.__dict__.items()\n if not k.startswith(\"_\")\n }", "def prune(bushy: dict) -> dict:\n pruned = dict()\n for key in bushy:\n if bushy[key]:\n pruned[key] = bushy[key]\n return pruned", "def repackage_state(self, state):\n state['hxs'] = state['hxs'].detach()\n state['cxs'] = state['cxs'].detach()\n return state", "def unflatten(self): \n self.assign(self.get_unflattened_circuit())\n self._expr_map = None", "def clean_dict(dictionary):\n return {k: v for k, v in dictionary.items() if v}", "def dict_deep_update(d, u, handlers=None):\n if handlers is None:\n handlers = {}\n for k, v in u.items():\n if isinstance(v, collections.Mapping):\n r = dict_deep_update(d.get(k, {}), v, handlers)\n d[k] = r\n elif k in d:\n h = handlers.get(type(v), None)\n if h is not None:\n d[k] = h(d[k], u[k])\n else:\n d[k] = u[k]\n else:\n d[k] = u[k]\n return d", "def to_dict(self):\r\n new_dict = {}\r\n for key, val in self.items():\r\n if isinstance(val, NestedDict):\r\n new_dict[key] = val.to_dict()\r\n else:\r\n new_dict[key] = val\r\n return new_dict", "def flatten(d: MutableMapping, sep: str = \".\", parent_key: str = \"\") -> dict:\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, MutableMapping):\n items.extend(flatten(v, sep=sep, parent_key=new_key).items())\n else:\n items.append((new_key, v))\n return dict(items)", "def flatten_dict(d, parent_key=\"\", sep=\"_\"):\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, MutableMapping):\n items.extend(flatten_dict(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n return dict(items)", "def subtract(dict_a, dict_b, strict=False):\n if not strict:\n return subtract_by_key(dict_a, dict_b)\n\n difference_dict = {}\n for key in dict_a:\n if key not in dict_b or dict_b[key] != dict_a[key]:\n difference_dict[key] = dict_a[key]\n\n return difference_dict", "def safe_dict(d):\r\n if isinstance(d, dict):\r\n return dict([(k.encode('utf-8'), safe_dict(v)) for k, v in d.iteritems()])\r\n elif isinstance(d, list):\r\n return [safe_dict(x) for x in d]\r\n else:\r\n return d", "def safe_update(dict_to, dict_from):\n for key, val in dict(dict_from).iteritems():\n if key in dict_to:\n raise KeyError(key)\n dict_to[key] = val\n return dict_to", "def pivot_nested_dict(nested_dict):\r\n\r\n reverse_nest_dict = {} #Create an empty dictionary\r\n for k, v in nested_dict.items(): #Iterate through each pair of elements\r\n for k2, v2 in v.items(): #Iterate through pair of values\r\n try:\r\n reverse_nest_dict[k2][k] = v2\r\n except KeyError:\r\n reverse_nest_dict[k2] = { k : v2 }\r\n return reverse_nest_dict\r\n \r\n #Create a dictionary that produces a different nested dictionary which\r\n #contains the same values\r", "def flatten_dict(dict_input):\n flattened_dict = dict()\n\n for key, value in dict_input.items():\n if isinstance(value, dict):\n new_keys = sorted(value.keys())\n for new_key in new_keys:\n entry = {key + '_' + new_key: value[new_key]}\n flattened_dict.update(entry)\n else:\n entry = {key: value}\n flattened_dict.update(entry)\n\n return flattened_dict", "def copy_obs_dict(obs):\n return {k: np.copy(v) for k, v in obs.items()}", "def flatten_dict(nested):\n flattened = {}\n for key, value in nested.items():\n if isinstance(value, Mapping):\n for subkey, subval in value.items():\n newkey = '.'.join([key, subkey])\n flattened[newkey] = subval\n flatten_dict(flattened)\n else:\n flattened[key] = value\n mappings = [isinstance(value, Mapping) for key, value in flattened.items()]\n if len(set(mappings)) == 1 and set(mappings).pop() is False:\n return flattened\n else:\n return flatten_dict(flattened)", "def nest_dict(dct, keys):\n nested_dict = dct\n for key in reversed(keys):\n nested_dict = RecursiveDict({key: nested_dict})\n return nested_dict", "def flatten_dict(d, sep=' ', parent_key=''):\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, collections.MutableMapping):\n items.extend(flatten_dict(v, sep=sep, parent_key=new_key).items())\n else:\n items.append((new_key, v))\n return dict(items)", "def dictcopy(dic):\n keys = list(dic.keys())\n values = [list(i) for i in dic.values()]\n return dict(zip(keys,values))", "def delete_keys_from_dict(self, orig_dict, keys_whitelist):\n for k in list(orig_dict.keys()):\n if k not in keys_whitelist:\n del orig_dict[k]\n\n for v in orig_dict.values():\n if isinstance(v, dict):\n self.delete_keys_from_dict(v, keys_whitelist)\n\n return orig_dict", "def flatten(self):\n flat = {}\n for d in self.dicts:\n flat.update(d)\n return flat", "def decode_dict(d):\n\n new = {}\n for key, value in d.items():\n try:\n new_value = value.decode()\n except: new_value = value\n if isinstance(new_value, str) and new_value and new_value[0] == \"\\x00\":\n new_value = new_value.encode()\n if isinstance(new_value, bytes):\n new_value = parse_binary_field(new_value)\n if isinstance(new_value, list) and new_value:\n if isinstance(new_value[0], dict):\n new_value = [decode_dict(x) for x in new_value]\n elif isinstance(new_value[0], bytes):\n new_value = [x.decode() for x in new_value]\n new[key.decode() if isinstance(key, bytes) else key] = new_value\n return new", "def _unflatten_incr_state(\n self, flat_incr_state: Dict[str, torch.Tensor]\n ) -> Dict[int, Dict[str, Dict[str, torch.Tensor]]]:\n structured_incr_state = defaultdict(lambda: defaultdict(dict))\n for key, state in flat_incr_state.items():\n layer_idx_str, attn_type, state_type = key.split('__')\n structured_incr_state[int(layer_idx_str)][attn_type][state_type] = state\n return dict({k: dict(v) for k, v in structured_incr_state.items()})\n # Turn the nested defaultdicts back into regular dicts", "def filter_dict(fdict, mask):\n\n if fdict is None:\n fdict = dict()\n\n if mask is None:\n mask = []\n\n return {k: v for (k, v) in fdict.items() if k in mask}", "def ordered_dict_to_dict(d: OrderedDict) -> dict:\n return loads(dumps(d))" ]
[ "0.678866", "0.63871247", "0.63075334", "0.6243246", "0.61387753", "0.61324066", "0.5833921", "0.56678116", "0.5610028", "0.5581237", "0.547987", "0.528664", "0.5166779", "0.51488274", "0.5131935", "0.513004", "0.5127286", "0.51143354", "0.5108335", "0.5104038", "0.50658894", "0.5040167", "0.50181496", "0.4999344", "0.49814385", "0.49768424", "0.49741423", "0.4973419", "0.49233168", "0.491484", "0.49138203", "0.48874775", "0.4845291", "0.48338786", "0.48071483", "0.48070642", "0.4794826", "0.47861212", "0.47770166", "0.47766498", "0.47748724", "0.47518197", "0.47490755", "0.47308335", "0.472897", "0.47069323", "0.46998808", "0.46948013", "0.46934924", "0.46824077", "0.4673574", "0.46612746", "0.46547586", "0.46471268", "0.46331736", "0.46262282", "0.46069288", "0.46064824", "0.45996463", "0.4591553", "0.45708337", "0.45656824", "0.45608354", "0.45595428", "0.45564926", "0.45507422", "0.45342037", "0.4513698", "0.4507084", "0.44986528", "0.4494859", "0.44940928", "0.44844913", "0.4481367", "0.44805962", "0.44750804", "0.44679326", "0.44634515", "0.44545266", "0.4453122", "0.44472495", "0.44472328", "0.4442233", "0.4441484", "0.44407386", "0.44305772", "0.44304478", "0.44259986", "0.44226974", "0.44086206", "0.44000208", "0.4397135", "0.43933767", "0.43907094", "0.4386281", "0.4384898", "0.43798634", "0.43703642", "0.43494862", "0.43472135" ]
0.80311483
0
This method is used to postprocess the form data. By default, it returns the raw `form.data` dictionary.
def process_step(self, form): #print(form.data) #print(form.data) #print(self) institution = {} inst_list = [] if self.steps.current == '1': institution['institution'] = form.data['1-0-institution'] institution['date_from'] = form.data['1-0-date_from'] institution['date_to'] = form.data['1-0-date_to'] inst_list.append(institution) inst_keys = dict(form.data.lists()) #Create dictionary dynamically for the other institutions incase more than two institutions are entered if inst_keys.get('1-NaN-institution') and type(inst_keys.get('1-NaN-institution')) is list: inst_list2 = [] #Add institutions for i,insti in enumerate(inst_keys.get('1-NaN-institution')): inst_i = {} #print(i) date_from = inst_keys['1-NaN-date_from'][i] date_to = inst_keys['1-NaN-date_to'][i] course_duration = inst_keys['1-NaN-course_duration'][i] inst_i['institution'] = insti inst_i['date_from'] = date_from inst_i['date_to'] = date_to inst_list2.append(inst_i) #print(inst_list2) inst_list.extend(inst_list2) #Create dictionary dynamically for the other institutions incase more than two institutions are entered if inst_keys.get('1-NaN-institution') and type(inst_keys.get('1-NaN-institution')) is not list: inst_0 = {} inst_0['institution'] = form.data['1-NaN-institution'] inst_0['date_from'] = form.data['1-NaN-date_from'] inst_0['date_to'] = form.data['1-NaN-date_to'] inst_0['course_duration'] = form.data['1-NaN-course_duration'] #inst_0['achievements'] = '' inst_list.append(inst_0) #Add the entered information to a session object self.request.session['institution'] = inst_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_post_data(self):\n if not self._form_data:\n self._form_data = async_to_sync(self.request.form)()\n return self._form_data", "def get_form_data(self) -> dict:\n with logging.LogCall(__file__, \"get_form_data\", self.__class__):\n return self.serialize()", "def form_data(self) -> dict:\n return copy.deepcopy(self.data)", "def get_form_data(self, request):\n return request.session[self.id]['form_data']", "def get_formdata(self):\n formdata = request.json\n return wtforms_json.MultiDict(\n wtforms_json.flatten_json(self.__class__, formdata)\n ) if formdata else None", "def req_data(self):\n return (self.request.data if hasattr(self.request, 'data') else\n self.request.POST)", "async def read_parse_form_data(self):\n # TODO: Probably there is better solution how to handle\n # request body, at least for simple urlencoded forms - by processing\n # chunks instead of accumulating payload.\n gc.collect()\n if b'Content-Length' not in self.headers:\n return {}\n # Parse payload depending on content type\n if b'Content-Type' not in self.headers:\n # Unknown content type, return unparsed, raw data\n return {}\n size = int(self.headers[b'Content-Length'])\n if size > self.params['max_body_size'] or size < 0:\n raise HTTPException(413)\n data = await self.reader.readexactly(size)\n # Use only string before ';', e.g:\n # application/x-www-form-urlencoded; charset=UTF-8\n ct = self.headers[b'Content-Type'].split(b';', 1)[0]\n try:\n if ct == b'application/json':\n return json.loads(data)\n elif ct == b'application/x-www-form-urlencoded':\n return parse_query_string(data.decode())\n except ValueError:\n # Re-generate exception for malformed form data\n raise HTTPException(400)", "def form_data(self):\n from couchforms import XMLSyntaxError\n from .utils import convert_xform_to_json, adjust_datetimes\n from corehq.form_processor.utils.metadata import scrub_form_meta\n xml = self.get_xml()\n try:\n form_json = convert_xform_to_json(xml)\n except XMLSyntaxError:\n return {}\n # we can assume all sql domains are new timezone domains\n with force_phone_timezones_should_be_processed():\n adjust_datetimes(form_json)\n\n scrub_form_meta(self.form_id, form_json)\n return form_json", "def post_data(request):\n if is_post(request):\n return request.POST\n return None", "def get_post_data(self):\n json_data = ''\n\n # check if JSON is passed as a file or as a body of POST request\n if self.request.files:\n json_data = self.request.files['file'][0][\n 'body'] # pick up only first file (index 0)\n elif self.request.body:\n json_data = self.request.body\n\n try:\n data = json.loads(json_data)\n except ValueError:\n data = None\n return data", "def deserialize_form(data):\r\n return QueryDict(query_string=unicode(data).encode('utf-8'))", "def _prepare_multipart_form_data(data):\n output = dict()\n for key in data:\n output[key] = (None, data[key])\n return output", "def form(self):\n if self._form is None:\n if self.content_type is None:\n return None\n mime_type = self.content_type.split(';')[0]\n if mime_type != 'application/x-www-form-urlencoded':\n return None\n self._form = self._parse_urlencoded(self.body)\n return self._form", "def PostData(self):\n if self._postdata is None:\n return None\n if len(self._postdata) == 0:\n return None\n return self._postdata", "def from_form_data(self, data: dict = {}):\n with logging.LogCall(__file__, \"from_form_data\", self.__class__):\n pass", "def compress(self, data_list):\n data = {}\n if data_list:\n data = dict(\n (f.name, data_list[i]) for i, f in enumerate(self.form))\n\n f = self.form.__class__(data)\n f.is_valid()\n return f.cleaned_data\n return data", "def post_process(cls, data):\n return data", "def get_plugin_form_data(self):\n form = self.get_form()\n\n return self._get_plugin_form_data(form.plugin_data_fields)", "def process_form(request):\n raw_data = request.form\n data = raw_data.copy()\n data['resources'] = request.form.getlist('resources')\n if request.remote_addr == '127.0.0.1':\n data['ip'] = '100.7.27.72'\n else:\n data['ip'] = request.remote_addr\n data['user_agent'] = request.user_agent.string\n data['@timestamp'] = datetime.utcnow()\n latitude = float(data['latitude'])\n longitude = float(data['longitude'])\n data['location'] = [latitude, longitude]\n return data", "def _get_normalized_form_data(self, form, key):\n data = {} if form.data else form.initial\n prefix = 'gc{}-'.format(key)\n\n for field_name in form.data:\n normalized_field_name = field_name[len(prefix):]\n\n if field_name in form.data and field_name.startswith(prefix) and form.data[field_name]:\n data[normalized_field_name] = form.data[field_name]\n\n for field_name in data:\n if field_name == 'quantity':\n data[field_name] = str(data[field_name])\n\n return data", "def get_processed_form_data(form, form_element_entries):\n keys_to_remove = get_ignorable_form_fields(form_element_entries)\n values_to_remove = get_ignorable_form_values()\n\n field_name_to_label_map = \\\n get_field_name_to_label_map(form, keys_to_remove, values_to_remove)\n\n keys_to_remove = list(field_name_to_label_map.keys())\n\n return (\n field_name_to_label_map,\n get_cleaned_data(form, keys_to_remove, values_to_remove)\n )", "def get_raw_data_form(self, data, view, method, request):\n # See issue #2089 for refactoring this.\n serializer = getattr(data, 'serializer', None)\n if serializer and not getattr(serializer, 'many', False):\n instance = getattr(serializer, 'instance', None)\n if isinstance(instance, Page):\n instance = None\n else:\n instance = None\n\n with override_method(view, request, method) as request:\n # Check permissions\n if not self.show_form_for_method(view, method, request, instance):\n return\n\n # If possible, serialize the initial content for the generic form\n default_parser = view.parser_classes[0]\n renderer_class = getattr(default_parser, 'renderer_class', None)\n if hasattr(view, 'get_serializer') and renderer_class:\n # View has a serializer defined and parser class has a\n # corresponding renderer that can be used to render the data.\n\n if method in ('PUT', 'PATCH'):\n serializer = view.get_serializer(instance=instance)\n else:\n serializer = view.get_serializer()\n\n # Render the raw data content\n renderer = renderer_class()\n accepted = self.accepted_media_type\n context = self.renderer_context.copy()\n context['indent'] = 4\n\n # strip HiddenField from output\n data = serializer.data.copy()\n for name, field in serializer.fields.items():\n if isinstance(field, serializers.HiddenField):\n data.pop(name, None)\n content = renderer.render(data, accepted, context)\n # Renders returns bytes, but CharField expects a str.\n content = content.decode()\n else:\n content = None\n\n # Generate a generic form that includes a content type field,\n # and a content field.\n media_types = [parser.media_type for parser in view.parser_classes]\n choices = [(media_type, media_type) for media_type in media_types]\n initial = media_types[0]\n\n class GenericContentForm(forms.Form):\n _content_type = forms.ChoiceField(\n label='Media type',\n choices=choices,\n initial=initial,\n widget=forms.Select(attrs={'data-override': 'content-type'})\n )\n _content = forms.CharField(\n label='Content',\n widget=forms.Textarea(attrs={'data-override': 'content'}),\n initial=content,\n required=False\n )\n\n return GenericContentForm()", "def _parse_request(self):\n if len(self.request.body) > 0:\n try:\n return tornado.escape.json_decode(self.request.body)\n except Exception:\n #Not Json, Using Form data\n return self.request.arguments\n else:\n return self.request.arguments", "def _get_plugin_form_data(self, fields):\n form_data = {}\n for field, default_value in fields:\n try:\n form_data.update(\n {field: self.plugin_data.get(field, default_value)}\n )\n except Exception as err:\n logger.debug(\n \"Error in class %s. Details: %s\",\n self.__class__.__name__,\n str(err)\n )\n return form_data", "def extract_data_from_form(\n self, data: JobForm, many: bool, **kwargs\n ) -> Dict[str, Any]:\n\n def slugify(text: str) -> str:\n return text.lower().strip().replace(\" \", \"-\")\n\n return {\n \"experiment_name\": slugify(data.experiment_name.data),\n \"queue\": slugify(data.queue.data),\n \"timeout\": data.timeout.data or None,\n \"entry_point\": data.entry_point.data,\n \"entry_point_kwargs\": data.entry_point_kwargs.data or None,\n \"depends_on\": data.depends_on.data or None,\n \"workflow\": data.workflow.data,\n }", "def get_form_kwargs(self):\n kwargs = {'instance': self.object}\n if self.request.method in ('POST', 'PUT'):\n kwargs.update({\n 'data': self.request.POST,\n 'files': self.request.FILES,\n })\n return kwargs", "def ProcessFormData(self, mr, post_data):\n raise MethodNotSupportedError()", "def get_data(self):\n data = self.request.body\n\n try:\n data = json.loads(data)\n except ValueError:\n data = None\n return data", "def _form_data(self, response):\n SQFI_audit_type = response.xpath(self.filters[6]).extract_first()\n SQFI_audit_type_val = response.xpath(self.filters[7]).extract_first()\n food_sector_categories = response.xpath(self.filters[8]).extract_first()\n food_sector_categories_val = response.xpath(self.filters[9]).extract()\n audit_rating = response.xpath(self.filters[10]).extract_first()\n audit_rating_val = response.xpath(self.filters[11]).extract()\n country = response.xpath(self.filters[12]).extract_first()\n country_val = response.xpath(self.filters[13]).extract()\n form_data = {\n SQFI_audit_type: SQFI_audit_type_val,\n food_sector_categories: food_sector_categories_val,\n audit_rating: audit_rating_val,\n country: country_val,\n }\n return form_data", "def _parse_form_data():\n form_data = str(request.get_data())\n if form_data[:2] == \"b'\":\n # looks like b'A=1&B=2'\n form_data = form_data[2:-1]\n for key, values in parse_qs(form_data).items():\n yield (key, values[0])", "def decode_request_content(self, datafile):\n content_type = self.headers.get(\"Content-Type\", \"notype\").lower()\n if 'application/x-www-form-urlencoded' in content_type:\n # The data is provided in a urlencoded format. Unencode it into\n # cgi FieldStorage/MiniFieldStorage objects in a form container\n form = cgi.FieldStorage(\n fp=datafile,\n headers=self.headers,\n environ=dict(REQUEST_METHOD='POST',\n CONTENT_TYPE=self.headers['Content-Type'])\n )\n itemdict = {}\n for item in form.list:\n if item.name == 'data':\n itemdict['data'] = \\\n SimpleLogRequestHandler.extract_form_fields(item)\n elif item.name == 'layout':\n # http://log4javascript.org/docs/manual.html#layouts\n itemdict['layout'] = item.value\n return itemdict\n else:\n self.send_response(501,\n \"Content-Type %r not supported\" % content_type)\n self.send_header(\"Content-length\", \"0\")\n self.end_headers()\n return None", "def get_form_kwargs(self):\n kwargs = {'initial': self.get_initial()}\n if self.request.method in ('POST', 'PUT'):\n kwargs.update({\n 'data': self.request.POST,\n 'files': self.request.FILES,\n })\n kwargs.update(self.get_additional_form_args())\n return kwargs", "def get_form_kwargs(self) -> Dict[str, Any]:\n kwargs = super().get_form_kwargs()\n\n if self.request.GET:\n kwargs['data'] = deepcopy(self.request.GET)\n\n return kwargs", "def get_postprocess(self) -> Dict:\n raise NotImplementedError", "def post_dict(self):\r\n contents = self.request_content\r\n\r\n # The POST dict will contain a list of values for each key.\r\n # None of our parameters are lists, however, so we map [val] --> val\r\n # If the list contains multiple entries, we pick the first one\r\n try:\r\n post_dict = urlparse.parse_qs(contents, keep_blank_values=True)\r\n return {\r\n key: list_val[0]\r\n for key, list_val in post_dict.items()\r\n }\r\n\r\n except:\r\n return dict()", "def pop_form(env):\n if 'wsgi.input' not in env:\n return None\n post_env = env.copy()\n post_env['QUERY_STRING'] = ''\n form = cgi.FieldStorage(\n fp=env.pop('wsgi.input'),\n environ=post_env,\n keep_blank_values=True\n )\n return {k: form[k].value for k in form}", "def get_cleaned_data(self, request, step):\n return self._get_state(request).form_data.get(step.slug, None)", "def get_context_data(self):\n return {\"form\": self.get_form()}", "def parse_post(request):\n\n fp = StringIO(request.raw_body)\n\n headers = {}\n headers['content-type'] = request.message.get('content-type')\n headers['content-length'] = request.message.get('content-length')\n\n environ = {}\n environ['REQUEST_METHOD'] = request.method\n\n boundary = request.message.get('boundary')\n\n post = cgi.FieldStorage( fp = fp\n , headers = headers\n , outerboundary = boundary\n , environ = environ\n , keep_blank_values = True\n , strict_parsing = False\n )\n\n return post", "def initial_form_data(self, request, step, form):\n return None", "def get_form_kwargs(self):\n kwargs = {\n 'initial': self.get_initial(),\n 'prefix': self.get_prefix(),\n }\n\n if self.request.method in ('POST', 'PUT'):\n kwargs.update({\n 'data': self.request.POST,\n 'files': self.request.FILES,\n })\n #print('kwargs',kwargs)\n return kwargs", "def process(self, formdata=None, obj=None, data=None, **kwargs):\n formdata = self.meta.wrap_formdata(self, formdata)\n\n if data is not None:\n # XXX we want to eventually process 'data' as a new entity.\n # Temporarily, this can simply be merged with kwargs.\n kwargs = dict(data, **kwargs)\n\n for name, field, in iteritems(self._fields):\n if obj is not None and hasattr(obj, name):\n # This if statement is the only change made to the original\n # code for BaseForm.process() - Dawn\n if name == 'studies':\n field.process(formdata, list(obj.studies.values()))\n else:\n field.process(formdata, getattr(obj, name))\n elif name in kwargs:\n field.process(formdata, kwargs[name])\n else:\n field.process(formdata)", "def forms(self):\r\n forms = FormsDict()\r\n for name, item in self.POST.iterallitems():\r\n if not hasattr(item, 'filename'):\r\n forms[name] = item\r\n return forms", "def clean(self) -> Dict:\n # The form must be multipart\n if not self.is_multipart():\n self.add_error(\n None,\n _('CSV upload form is not multiform'),\n )\n return {}\n\n form_data = super().clean()\n\n if form_data['skip_lines_at_top'] < 0:\n self.add_error(\n 'skip_lines_at_top',\n _('This number has to be zero or positive'),\n )\n return form_data\n\n if form_data['skip_lines_at_bottom'] < 0:\n self.add_error(\n 'skip_lines_at_bottom',\n _('This number has to be zero or positive'),\n )\n return form_data\n\n # Process CSV file using pandas read_csv\n try:\n self.data_frame = services.load_df_from_csvfile(\n TextIOWrapper(\n self.files['data_file'].file,\n encoding=self.data.encoding),\n self.cleaned_data['skip_lines_at_top'],\n self.cleaned_data['skip_lines_at_bottom'])\n except Exception as exc:\n self.add_error(\n None,\n _('File could not be processed ({0})').format(str(exc)))\n return form_data\n\n # Check the validity of the data frame\n self.validate_data_frame()\n\n return form_data", "def _template_data(self):\n return {\"form\": self.form.render()}", "def json_post_to_dict(form):\n message = str(form.json_message.data)\n try:\n dict_post = json.loads(message)\n except json.decoder.JSONDecodeError as e:\n print(\"json_post_to_dict: json decoder failed to parse message\")\n print(e)\n return None\n return dict_post", "def get_form_kwargs(self):\n\n kwargs = super().get_form_kwargs()\n kwargs.update({\n 'data': self.request.GET,\n })\n\n return kwargs", "def minimal_form_data():\n\n form_data = { \n 'status': '0',\n 'title': 'Recurso de teste',\n 'description': 'Recurso para testes',\n 'abstract': 'Resumo',\n \n 'main-descriptor-content_type-object_id-TOTAL_FORMS': '0', \n 'main-descriptor-content_type-object_id-INITIAL_FORMS': '0',\n\n 'main-keyword-content_type-object_id-TOTAL_FORMS': '0', \n 'main-keyword-content_type-object_id-INITIAL_FORMS': '0',\n\n 'main-resourcethematic-content_type-object_id-TOTAL_FORMS': '0',\n 'main-resourcethematic-content_type-object_id-INITIAL_FORMS': '0',\n }\n\n return form_data", "def get_form_kwargs(self):\n kwargs = {\n 'initial': self.get_initial(),\n 'prefix': self.get_prefix(),\n }\n\n if self.request.method in ('POST', 'PUT'):\n kwargs.update({\n 'data': self.request.POST,\n 'files': self.request.FILES,\n })\n return kwargs", "def post(self):\r\n data = request.form\r\n return create(data=data)", "def formdata_for(self, skip):\n formdata = self.formdata.copy()\n # skip=0 doesn't get put in the query\n if skip:\n formdata['skip'] = int(round(skip))\n return formdata", "def option_post_form(self):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionPostForm/')))", "def post(self):\n file_ = self.verify_param('file', cgi.FieldStorage)\n data, filemask = self.build_post_data(file_)\n return data, filemask", "def form_data(self) -> Optional[dict]:\n self.data[\"id\"] = self.id\n self.data[\"week\"] = self.week\n self.data[\"time\"] = self.time\n self.data[\"name\"] = self._service.filter(\"id\", self.id).first()[0].get(\"name\")\n\n return self.data", "def prepare_data_for_requests_post(username, password, company_id_form=\"aqn.accountsight.com\"):\n raise NotImplementedError\n # data = {\"MIME Type\": \"application/x-www-form-urlencoded\",\n # \"companyIdForm\": company_id_form,\n # \"userName\": username,\n # \"userPwd\": password}\n #\n # # format dictionary as JSON\n # data = json.dumps(data)\n # # Convert to String\n # data = str(data)\n # # Convert string to byte\n # data = data.encode('utf-8')\n # return data", "def _parse_input(self):\r\n def _convert(item):\r\n if isinstance(item, list):\r\n return [converters.to_unicode(i.value) for i in item]\r\n if item.filename:\r\n return MultipartFile(item)\r\n return converters.to_unicode(item.value)\r\n fs = cgi.FieldStorage(fp=self._environ['wsgi.input'], environ=self._environ, keep_blank_values=True)\r\n inputs = dict()\r\n for key in fs:\r\n inputs[key] = _convert(fs[key])\r\n return inputs", "def _process(self):\n self.output[\"data\"] = get_form_data(self.kwargs[\"collect\"].ona_scan_form_pk)", "def request_data():\n if request.method in ('POST', \"PUT\"):\n return request.get_json(force=True)\n else:\n return request.values", "def clean_request_data(self):\n clean_data = self.request.registry.get(CLEAN_DATA, {})\n return clean_data", "def form_data(self, clear=[], **kwargs):\n form_data = {\n 'payer_name': 'William Williams',\n 'billing_address': '1 Main Street',\n 'billing_city': 'Anytown',\n 'country': 'USA',\n 'billing_state': 'MD',\n 'billing_zip': '20852',\n 'payment_type': 'CreditCard',\n 'project_code': '15-4FF',\n 'payment_amount': '3000',\n 'information_consent': True,\n }\n for key in clear:\n del form_data[key]\n for key, value in kwargs.items():\n form_data[key] = value\n return form_data", "def process_formdata(self, valuelist):\n if valuelist:\n self.data = self.get_tags_from_string(valuelist[0])\n else:\n self.data = []", "def _clean(self):\n return self._cleaned_data", "def test_clean_returns_cleaned_data(self):\n original_data = self.form.data\n original_fields = self.form.fields\n original_computed_fields = self.form.computed_fields\n original_errors = getattr(self.form, '_errors', None)\n original_cleaned_data = getattr(self.form, 'cleaned_data', None)\n self.form.data = original_data.copy()\n self.form.fields = original_fields.copy()\n self.form.computed_fields = original_computed_fields.copy()\n self.form._errors = ErrorDict() if original_errors is None else original_errors.copy()\n new_cleaned_data = {self.form.name_for_user: 'test_value', self.form.name_for_email: 'test_value'}\n new_cleaned_data[self.form.USERNAME_FLAG_FIELD] = False\n self.form.cleaned_data = new_cleaned_data.copy()\n expected_fields = {**original_fields, **original_computed_fields}\n\n cleaned_data = self.form.clean()\n self.assertDictEqual(new_cleaned_data, cleaned_data)\n self.assertDictEqual(expected_fields, self.form.fields)\n\n self.form.data = original_data\n self.form.fields = original_fields\n self.form.computed_fields = original_computed_fields\n self.form._errors = original_errors\n self.form.cleaned_data = original_cleaned_data\n if original_errors is None:\n del self.form._errors\n if original_cleaned_data is None:\n del self.form.cleaned_data", "def extract(environ, empty=False, err=False):\n formdata = cgi.parse(environ[\"wsgi.input\"], environ, empty, err)\n # Remove single entries from lists\n for key, value in iter(formdata.items()):\n if len(value) == 1:\n formdata[key] = value[0]\n return formdata", "def post_data_parser(post_data):\n post_data_json = {}\n for parameter in post_data.rsplit(\"&\"):\n post_data_json[parameter.rsplit(\"=\")[0]] = parameter.rsplit(\"=\")[1]\n return post_data_json", "def _doProcess(self, form, write, request):\n args = request.args.copy()\n kw = {}\n for field in form:\n inputType, displayName, inputName, inputValue = field[:4]\n if inputType == 'checkbox':\n if request.args.has_key('__checkboxes__'):\n if inputName in request.args['__checkboxes__']:\n formData = 1\n else:\n formData = 0\n else:\n formData = 0\n elif inputType in ['checkgroup', 'radio', 'multimenu']:\n if args.has_key(inputName):\n formData = args[inputName]\n del args[inputName]\n else:\n formData = []\n else:\n if not args.has_key(inputName):\n raise FormInputError(\"missing field %s.\" % repr(inputName))\n formData = args[inputName]\n del args[inputName]\n if not len(formData) == 1:\n raise FormInputError(\"multiple values for field %s.\" %repr(inputName))\n formData = formData[0]\n method = self.formParse.get(inputType)\n if method:\n try:\n formData = method(formData)\n except:\n raise FormInputError(\"%s: %s\" % (displayName, \"error\"))\n kw[inputName] = formData\n submitAction = args.get('submit')\n if submitAction:\n submitAction = submitAction[0]\n for field in ['submit', '__formtype__', '__checkboxes__']:\n if args.has_key(field):\n del args[field]\n if args and not self.formAcceptExtraArgs:\n raise FormInputError(\"unknown fields: %s\" % repr(args))\n return apply(self.process, (write, request, submitAction), kw)", "def process_request(self, request):\n\n # Does the request contain a JSON payload?\n content_type = request.META.get('CONTENT_TYPE', '')\n if content_type != '' and 'application/json' in content_type:\n\n # Ignore empty payloads (e.g. for deletes)\n content_length = 0\n if request.META.get('CONTENT_LENGTH', '') != '':\n content_length = int(request.META.get('CONTENT_LENGTH', 0))\n if content_length > 0:\n try:\n # Replace request.POST with flattened dictionary from JSON\n decoded_dict = simplejson.loads(request.raw_post_data)\n request.POST = request.POST.copy()\n request.POST = self._flatten_dict(decoded_dict)\n except:\n return HttpResponse('Invalid JSON', status=400)", "def process_data(self, data):\n return data", "def post(self):\n data = request.json\n return save_new_post(data=data)", "def serialize_form(self, form):\n data = []\n for field_name, field in form.fields.items():\n if field_name in form.cleaned_data:\n form_value = form.cleaned_data[field_name]\n display_value = None\n if isinstance(form_value, models.Model):\n ctype = ContentType.objects.get_for_model(form_value)\n form_value = '{0}{1}.{2}:{3}'.format(\n _CONTENT_TYPE_PREFIX,\n ctype.app_label,\n ctype.model,\n form_value.pk\n )\n elif isinstance(form_value, UploadedFile):\n file_name = _fs.get_available_name(form_value.name)\n file_path = _fs.path(file_name)\n with open(file_path, 'wb+') as destination:\n for chunk in form_value.chunks():\n destination.write(chunk)\n form_value = file_path\n display_value = file_name\n data.append({\n 'name': field_name,\n 'label': force_text(field.label) if field.label else None,\n 'value': form_value,\n 'display_value': display_value,\n })\n return data", "def clean(self):\n print(self.data)\n print(self.errors)\n cleaned_data = self.cleaned_data\n print(cleaned_data)\n return cleaned_data", "def post_dict(self):\n\n if isinstance(self.request_content, bytes):\n contents = self.request_content.decode('utf-8')\n else:\n contents = self.request_content\n\n # The POST dict will contain a list of values for each key.\n # None of our parameters are lists, however, so we map [val] --> val\n # If the list contains multiple entries, we pick the first one\n try:\n post_dict = six.moves.urllib.parse.parse_qs(contents, keep_blank_values=True)\n return {\n key: list_val[0]\n for key, list_val in post_dict.items()\n }\n\n except: # lint-amnesty, pylint: disable=bare-except\n return dict()", "def getFormData(page):\n soup = BeautifulSoup(page, 'html.parser')\n viewstate = soup.find('input', {'id': '__VIEWSTATE' })['value']\n generator = soup.find('input', {'id': '__VIEWSTATEGENERATOR'})['value']\n validation = soup.find('input', {'id': '__EVENTVALIDATION' })['value']\n return (viewstate, generator, validation)", "def convert_for_form(data):\n if \"name\" in data:\n data[\"full_name\"] = data[\"name\"].get(\"value\")\n try:\n data[\"given_names\"] = data[\"name\"].get(\n \"value\").split(\",\")[1].strip()\n except IndexError:\n data[\"given_names\"] = \"\"\n data[\"family_name\"] = data[\"name\"].get(\"value\").split(\",\")[0].strip()\n data[\"display_name\"] = data[\"name\"].get(\"preferred_name\")\n data[\"status\"] = data[\"name\"].get(\"status\", \"\").lower()\n if \"urls\" in data:\n data[\"websites\"] = []\n for url in data[\"urls\"]:\n if \"description\" not in url:\n data[\"websites\"].append({\"webpage\": url[\"value\"]})\n else:\n if url[\"description\"].lower() == \"twitter\":\n data[\"twitter_url\"] = url[\"value\"]\n elif url[\"description\"].lower() == \"blog\":\n data[\"blog_url\"] = url[\"value\"]\n elif url[\"description\"].lower() == \"linkedin\":\n data[\"linkedin_url\"] = url[\"value\"]\n del data[\"urls\"]\n if \"field_categories\" in data:\n data[\"research_field\"] = data['field_categories']\n if \"positions\" in data:\n data[\"institution_history\"] = []\n for position in data[\"positions\"]:\n if not any(\n [\n key in position for key in ('name', 'rank',\n 'start_year', 'end_year')\n ]\n ):\n if 'email' in position:\n # Only email available, take as public_email\n data[\"public_email\"] = position.get(\"email\")\n continue\n pos = {}\n pos[\"name\"] = position.get(\"institution\", {}).get(\"name\")\n pos[\"rank\"] = position.get(\"rank\", \"\")\n pos[\"start_year\"] = position.get(\"start_date\", \"\")\n pos[\"end_year\"] = position.get(\"end_date\", \"\")\n pos[\"current\"] = True if position.get(\"status\") else False\n pos[\"old_email\"] = position.get(\"old_email\", \"\")\n if position.get(\"email\"):\n pos[\"email\"] = position.get(\"email\", \"\")\n if not data.get(\"public_email\"):\n data[\"public_email\"] = position.get(\"email\")\n data[\"institution_history\"].append(pos)\n data[\"institution_history\"].reverse()\n if 'advisors' in data:\n advisors = data['advisors']\n data['advisors'] = []\n for advisor in advisors:\n adv = {}\n adv[\"name\"] = advisor.get(\"name\", \"\")\n adv[\"degree_type\"] = advisor.get(\"degree_type\", \"\")\n data[\"advisors\"].append(adv)\n if \"ids\" in data:\n for id in data[\"ids\"]:\n try:\n if id[\"type\"] == \"ORCID\":\n data[\"orcid\"] = id[\"value\"]\n elif id[\"type\"] == \"BAI\":\n data[\"bai\"] = id[\"value\"]\n elif id[\"type\"] == \"INSPIRE\":\n data[\"inspireid\"] = id[\"value\"]\n except KeyError:\n # Protect against cases when there is no value in metadata\n pass", "def _get_data(self):\n data = {}\n\n for name, field in self._get_fields().items():\n if isinstance(field, fields.Factory):\n # skip for factories for now\n continue\n value = getattr(self, name)\n raw_value = field.to_raw(value)\n if isinstance(field, fields.Secret):\n data[f\"__{name}\"] = raw_value\n else:\n data[name] = raw_value\n\n return data", "def parse_post_request(request):\n # type: (django.http.HttpRequest) -> Dict[str, Any]\n request_data = json.loads(request.body)\n parameters_dict = {\n PARAMETER_MESSAGE: request_data.get('message'),\n PARAMETER_ENTITY_NAME: request_data.get('entity_name'),\n PARAMETER_STRUCTURED_VALUE: request_data.get('structured_value'),\n PARAMETER_FALLBACK_VALUE: request_data.get('fallback_value'),\n PARAMETER_BOT_MESSAGE: request_data.get('bot_message'),\n PARAMETER_TIMEZONE: request_data.get('timezone'),\n PARAMETER_LANGUAGE_SCRIPT: request_data.get('language_script', ENGLISH_LANG),\n PARAMETER_SOURCE_LANGUAGE: request_data.get('source_language', ENGLISH_LANG),\n PARAMETER_MIN_DIGITS: request_data.get('min_number_digits'),\n PARAMETER_MAX_DIGITS: request_data.get('max_number_digits'),\n PARAMETER_NUMBER_UNIT_TYPE: request_data.get('unit_type'),\n PARAMETER_LOCALE: request_data.get('locale'),\n PARAMETER_RANGE_ENABLED: request_data.get('range_enabled')\n }\n\n return parameters_dict", "def get_register_form_data(cls, pipeline_kwargs):\r\n # Details about the user sent back from the provider.\r\n details = pipeline_kwargs.get('details')\r\n\r\n # Get the username separately to take advantage of the de-duping logic\r\n # built into the pipeline. The provider cannot de-dupe because it can't\r\n # check the state of taken usernames in our system. Note that there is\r\n # technically a data race between the creation of this value and the\r\n # creation of the user object, so it is still possible for users to get\r\n # an error on submit.\r\n suggested_username = pipeline_kwargs.get('username')\r\n\r\n return {\r\n 'email': cls.get_email(details) or '',\r\n 'name': cls.get_name(details) or '',\r\n 'username': suggested_username,\r\n }", "def get_extra_payload(form):", "def prepare_pipeline_dict(request_body, user):\n form_data = json.loads(request_body)\n form_data[\"user\"] = user.pk\n return form_data", "def get_cleaned_data(form, keys_to_remove=[], values_to_remove=[]):\n if not values_to_remove:\n values_to_remove = get_ignorable_form_values()\n\n cleaned_data = copy.copy(form.cleaned_data)\n cleaned_data = clean_dict(\n cleaned_data,\n keys=list(set(cleaned_data.keys()) - set(keys_to_remove)),\n values=values_to_remove\n )\n\n ordered_cleaned_data = OrderedDict()\n for key in form.fields.keys():\n if key in cleaned_data:\n ordered_cleaned_data[key] = cleaned_data[key]\n\n return ordered_cleaned_data", "def clean(self) -> Dict:\n form_data = super().clean()\n\n # # Process Excel file using pandas read_excel\n try:\n self.data_frame = services.load_df_from_excelfile(\n self.files['data_file'],\n form_data['sheet'])\n except Exception as exc:\n self.add_error(\n None,\n _('File could not be processed: {0}').format(str(exc)))\n return form_data\n\n # Check the validity of the data frame\n self.validate_data_frame()\n\n return form_data", "def getform():\n form = cgi.FieldStorage()\n host = form.getvalue('host')\n user = form.getvalue('user')\n passwd = form.getvalue('passwd')\n cert = form.getvalue('cert')\n proxy = form.getvalue('proxy')\n name = form.getvalue('name')\n return (host, user, passwd, cert, proxy, name)", "def get_dynamic_form_fields(self) -> Mapping[str, Any] | None:\n form_fields: Mapping[str, Any] | list[Any] | None = self.data.get(\"dynamic_form_fields\")\n if not form_fields:\n return None\n\n # Although this can be done with dict comprehension, looping for clarity.\n if isinstance(form_fields, list):\n fields = {}\n for field in form_fields:\n if \"name\" in field:\n fields[field[\"name\"]] = field\n return fields\n return form_fields", "def deliver_post(data, access=None):\n\n schema = get_post_schema(data)\n return deliver_fields(schema, data, access)", "def form(self):\n\t\treturn self._form", "def postprocess(self, inputs: Dict[str, Any], **kwargs) -> Dict[str, Any]:\n return inputs", "def post_contents(self):\r\n return self._post", "def payload_from_form(form, prefix='', delete=False):\n\n prefix = f'{prefix}-' if prefix else ''\n payload = {f'{prefix}{k}': form[k].value() for k, v in form.fields.items() if form[k].value()}\n if getattr(form.instance, 'id'):\n payload['id'] = form.instance.id\n\n if delete:\n payload['delete'] = True\n return payload", "def get_form_kwargs(self, form_key):\n kwargs = {\n \"initial\": {\"multiform_key\": form_key, **self.get_initial(form_key)},\n }\n if (\n self.request.method in [\"POST\", \"PUT\"]\n and self.request.POST[\"multiform_key\"] == form_key\n ):\n kwargs.update(\n {\"data\": self.request.POST, \"files\": self.request.FILES,}\n )\n return kwargs", "def postprocess(self, data, pagination):\n self.inject_data_hook(data)\n # Serialize ``data`` to python data structures\n python_data = self.serialize_to_python(data)\n # finalize any pending data processing\n self.finalize_pending(data)\n # Package the python_data to a dictionary\n return self.package(python_data, pagination)", "def __call__(self):\n \n form, error, appstruct = self.validate()\n \n data = None\n if error is not None:\n data = self.failure(error)\n if appstruct is not None:\n data = self.success(appstruct)\n return data if data is not None else {}", "def form_data(self, password1, password2=None):\n form_data = {'old_password': self.pwd}\n if password2 is None:\n password2 = password1\n form_data[self.password_field + '1'] = password1\n form_data[self.password_field + '2'] = password2\n return form_data", "def validate(self, data):\n # calling subserializer validate method (fields, and presets)\n data = super(FormidableSerializer, self).validate(data)\n # we check every field define in presets are define inside the form.\n if 'fields' in data and 'presets' in data:\n data = self.check_presets_cohesion(data)\n return data", "def get_form_kwargs(self):\n self.object = self.get_object()\n kwargs = super().get_form_kwargs()\n return kwargs", "def process_form_submission(self, form):\n return self.lookup_response_class.process_form_submission(self, form)", "def serialize_to_python(self, data):\n # NOTE: The request level field selection doesn not work if the\n # handler's ``template`` attribute uses ``django-preserialize``'s\n # pseudo selectors\n # See\n # <https://github.com/bruth/django-preserialize#my-model-has-a-ton-of-fields-and-i-dont-want-to-type-them-all-out-what-do-i-do>\n # It only works when the ``fields`` are defined one by one in a list.\n field_selection = set(self.request.GET.getlist('field'))\n if field_selection:\n intersection = field_selection.intersection(\n set(self.template['fields'])\n )\n template = {key: value for key, value in self.template.items()}\n template['fields'] = intersection\n return preserializer.serialize(data, **template)\n\n return preserializer.serialize(data, **self.template)", "def ingest_form_vars(request):\n data = {}\n for param, value in request.arguments.items():\n for i, item in enumerate(value):\n item = item.decode('utf-8')\n item = strings.as_numeric(item)\n value[i] = item\n data[param] = value[0] if len(value) == 1 else value\n return data", "def complete_form_data():\n\n missing_fields = {\n 'link' : 'http://bvsalud.org',\n 'originator' : 'BIREME',\n 'source_type': 1,\n 'source_language': 1,\n 'originator_location' : 1,\n\n 'main-descriptor-content_type-object_id-TOTAL_FORMS' : '1',\n\n 'main-descriptor-content_type-object_id-0-id' : '',\n 'main-descriptor-content_type-object_id-0-text' : 'malaria',\n 'main-descriptor-content_type-object_id-0-code' : '^d8462',\n 'main-descriptor-content_type-object_id-0-status' : '0',\n\n 'main-resourcethematic-content_type-object_id-TOTAL_FORMS' : '1',\n 'main-resourcethematic-content_type-object_id-0-thematic_area' : '1',\n 'main-resourcethematic-content_type-object_id-0-status' : '0',\n }\n\n complete_form_data = minimal_form_data()\n complete_form_data.update(missing_fields)\n\n return complete_form_data", "def decode_input_data(self, rawdata):\n return self.get_content_type().loads(rawdata, self)", "def _getPostData(data):\n\tparams = data.split(\"\\r\\n\\r\\n\")[1]\n\treturn params.split(\"&\")", "def get_session_form_data_as_dict(self, request, job_name):\n data_dict = OrderedDict()\n for value_dict in self.get_session_form_data_as_list(request, job_name):\n data_dict[value_dict['name']] = value_dict['value']\n return data_dict" ]
[ "0.79507184", "0.76475173", "0.738958", "0.73285085", "0.7258168", "0.6955681", "0.6792374", "0.65343493", "0.65144867", "0.6490603", "0.6459408", "0.6415936", "0.63819605", "0.63031137", "0.6298642", "0.6277921", "0.62373143", "0.6232082", "0.62192225", "0.60904664", "0.6056065", "0.6030949", "0.60233426", "0.59961677", "0.5961773", "0.5942794", "0.59405684", "0.5911468", "0.5902201", "0.58929735", "0.5868904", "0.5819991", "0.57929116", "0.5771258", "0.57638115", "0.5761413", "0.5756662", "0.5751492", "0.5738831", "0.5735935", "0.5733567", "0.57205945", "0.57038295", "0.56908286", "0.5674853", "0.56596625", "0.5657592", "0.5654229", "0.56441927", "0.56214845", "0.56173366", "0.5604944", "0.55999845", "0.55869734", "0.5584914", "0.5581603", "0.5567569", "0.5565684", "0.55569243", "0.5556009", "0.5545823", "0.5509508", "0.55063385", "0.547971", "0.54491097", "0.5447577", "0.5441229", "0.54289657", "0.54244065", "0.54194754", "0.5417856", "0.5408662", "0.5407582", "0.5394647", "0.5386529", "0.53654724", "0.535761", "0.535322", "0.5351853", "0.5345795", "0.53357124", "0.5326908", "0.5321672", "0.5315899", "0.530742", "0.53065944", "0.5304472", "0.5286497", "0.5285198", "0.5283171", "0.5274629", "0.5269422", "0.52693063", "0.52627575", "0.5259702", "0.52590364", "0.5253005", "0.52503073", "0.5234241", "0.52318424", "0.52236915" ]
0.0
-1
Ensure that awsmarketplace actions from all the different awsmarketplace SAR pages are present in the IAM definition.
def test_services_with_multiple_pages_aws_marketplace(self): # Overlap: AWS Marketplace, Marketplace Catalog, and AWS Marketplace Entitlement service, AWS Marketplace Image Building Service, AWS Marketplace Metering Service, AWS Marketplace Private Marketplace, and AWS Marketplace Procurement Systems # AWS Marketplace: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplace.html self.assertTrue("aws-marketplace:AcceptAgreementApprovalRequest" in self.all_actions) # AWS Marketplace Catalog: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplacecatalog.html self.assertTrue("aws-marketplace:CancelChangeSet" in self.all_actions) # AWS Marketplace Entitlement Service: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceentitlementservice.html self.assertTrue("aws-marketplace:GetEntitlements" in self.all_actions) # AWS Marketplace Image Building Service: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceimagebuildingservice.html self.assertTrue("aws-marketplace:DescribeBuilds" in self.all_actions) # AWS Marketplace Metering Service: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplacemeteringservice.html self.assertTrue("aws-marketplace:BatchMeterUsage" in self.all_actions) # AWS Marketplace Private Marketplace: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceprivatemarketplace.html self.assertTrue("aws-marketplace:AssociateProductsWithPrivateMarketplace" in self.all_actions) # AWS Marketplace Procurement Systems: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceprocurementsystemsintegration.html self.assertTrue("aws-marketplace:DescribeProcurementSystemConfiguration" in self.all_actions) results = get_actions_for_service("aws-marketplace") actions = [ "aws-marketplace:AcceptAgreementApprovalRequest", "aws-marketplace:BatchMeterUsage", "aws-marketplace:CancelAgreementRequest", "aws-marketplace:CancelChangeSet", "aws-marketplace:CompleteTask", "aws-marketplace:DescribeAgreement", "aws-marketplace:DescribeBuilds", "aws-marketplace:DescribeChangeSet", "aws-marketplace:DescribeEntity", "aws-marketplace:DescribeProcurementSystemConfiguration", "aws-marketplace:DescribeTask", "aws-marketplace:GetAgreementApprovalRequest", "aws-marketplace:GetAgreementRequest", "aws-marketplace:GetAgreementTerms", "aws-marketplace:GetEntitlements", "aws-marketplace:ListAgreementApprovalRequests", "aws-marketplace:ListAgreementRequests", "aws-marketplace:ListBuilds", "aws-marketplace:ListChangeSets", "aws-marketplace:ListEntities", "aws-marketplace:ListTasks", "aws-marketplace:MeterUsage", "aws-marketplace:PutProcurementSystemConfiguration", "aws-marketplace:RegisterUsage", "aws-marketplace:RejectAgreementApprovalRequest", "aws-marketplace:ResolveCustomer", "aws-marketplace:SearchAgreements", "aws-marketplace:StartBuild", "aws-marketplace:StartChangeSet", "aws-marketplace:Subscribe", "aws-marketplace:Unsubscribe", "aws-marketplace:UpdateAgreementApprovalRequest", "aws-marketplace:UpdateTask", "aws-marketplace:ViewSubscriptions", ] for action in actions: self.assertTrue(action in results)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_actions_with_arn_type_and_access_level_case_3(self):\n desired_output = [\n 's3:PutAccountPublicAccessBlock',\n 's3:PutAccessPointPublicAccessBlock'\n ]\n output = get_actions_with_arn_type_and_access_level(\n # \"ram\", \"resource-share\", \"Write\"\n \"s3\", \"*\", \"Permissions management\"\n )\n print(output)\n for item in desired_output:\n self.assertTrue(item in output)\n # self.assertListEqual(desired_output, output)", "def test_kafka_action_names_overlap_issue(self):\n # Kafka actions used to be in two pages but are now one. This verifies the current state.\n # results = get_actions_for_service(\"kafka\")\n # print(results)\n actions = [\n \"kafka:BatchAssociateScramSecret\",\n \"kafka:BatchDisassociateScramSecret\",\n \"kafka:CreateClusterV2\",\n \"kafka:DeleteConfiguration\",\n \"kafka:DescribeClusterV2\",\n \"kafka:ListClustersV2\",\n \"kafka:ListConfigurationRevisions\",\n \"kafka:ListKafkaVersions\",\n \"kafka:ListScramSecrets\",\n \"kafka:RebootBroker\",\n \"kafka:UpdateBrokerType\",\n \"kafka:UpdateConfiguration\",\n \"kafka:UpdateConnectivity\",\n \"kafka:UpdateSecurity\"\n ]\n\n for action in actions:\n self.assertTrue(action in self.all_actions)", "def test_services_with_multiple_pages_apigateway(self):\n # API Gateway Management V1: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonapigatewaymanagement.html\n self.assertTrue(\"apigateway:AddCertificateToDomain\" in self.all_actions)\n self.assertTrue(\"apigateway:RemoveCertificateFromDomain\" in self.all_actions)\n self.assertTrue(\"apigateway:SetWebACL\" in self.all_actions)\n # API Gateway Management V2: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonapigatewaymanagement.html\n # API Gateway V2 doesn't have any unique actions in but it does have some unique resource types. Let's make sure those resource types are in the IAM Definition.\n # Resource types unique to API Gateway V2:\n resource_types = get_arn_types_for_service(\"apigateway\")\n resource_types = list(resource_types.keys())\n self.assertTrue(\"AccessLogSettings\" in resource_types)\n # Resource types unique to API Gateway V1:\n self.assertTrue(\"RestApi\" in resource_types)", "def test_other_iam_data_fixes_in_GH_393(self):\n # Cassandra: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonkeyspacesforapachecassandra.html\n results = get_actions_for_service(\"cassandra\")\n self.assertTrue(\"cassandra:Restore\" in results)\n # Comprehend Medical: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazoncomprehendmedical.html\n results = get_actions_for_service(\"comprehendmedical\")\n # print(results)\n actions = [\n \"comprehendmedical:DescribeEntitiesDetectionV2Job\",\n \"comprehendmedical:DescribeICD10CMInferenceJob\",\n \"comprehendmedical:DescribePHIDetectionJob\",\n \"comprehendmedical:DescribeRxNormInferenceJob\",\n # \"comprehendmedical:DescribeSNOMEDCTInferenceJob\", # Not in SAR\n \"comprehendmedical:DetectEntitiesV2\",\n \"comprehendmedical:InferICD10CM\",\n \"comprehendmedical:InferRxNorm\",\n # \"comprehendmedical:InferSNOMEDCT\", # Not in SAR\n \"comprehendmedical:ListEntitiesDetectionV2Jobs\",\n \"comprehendmedical:ListICD10CMInferenceJobs\",\n \"comprehendmedical:ListPHIDetectionJobs\",\n \"comprehendmedical:ListRxNormInferenceJobs\",\n # \"comprehendmedical:ListSNOMEDCTInferenceJobs\", # Not in SAR\n \"comprehendmedical:StartEntitiesDetectionV2Job\",\n \"comprehendmedical:StartICD10CMInferenceJob\",\n \"comprehendmedical:StartPHIDetectionJob\",\n \"comprehendmedical:StartRxNormInferenceJob\",\n \"comprehendmedical:StopEntitiesDetectionV2Job\",\n \"comprehendmedical:StopICD10CMInferenceJob\",\n ]\n for action in actions:\n # if action not in results:\n # print(action)\n self.assertTrue(action in results)\n # Compute Optimizer\n results = get_actions_for_service(\"compute-optimizer\")\n actions = [\n \"compute-optimizer:DeleteRecommendationPreferences\",\n \"compute-optimizer:ExportEBSVolumeRecommendations\",\n \"compute-optimizer:ExportLambdaFunctionRecommendations\",\n \"compute-optimizer:GetEffectiveRecommendationPreferences\",\n \"compute-optimizer:GetEnrollmentStatusesForOrganization\",\n \"compute-optimizer:GetLambdaFunctionRecommendations\",\n \"compute-optimizer:GetRecommendationPreferences\",\n \"compute-optimizer:PutRecommendationPreferences\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # DataSync\n results = get_actions_for_service(\"datasync\")\n actions = [\n \"datasync:UpdateLocationNfs\",\n \"datasync:UpdateLocationObjectStorage\",\n \"datasync:UpdateLocationSmb\",\n \"datasync:UpdateTaskExecution\"\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # Account Management\n results = get_actions_for_service(\"account\")\n actions = [\n \"account:DeleteAlternateContact\",\n \"account:GetAlternateContact\",\n \"account:PutAlternateContact\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # AWS IAM Access Analyzer\n results = get_actions_for_service(\"access-analyzer\")\n actions = [\n \"access-analyzer:CancelPolicyGeneration\",\n \"access-analyzer:CreateAccessPreview\",\n \"access-analyzer:GetAccessPreview\",\n \"access-analyzer:GetGeneratedPolicy\",\n \"access-analyzer:ListAccessPreviewFindings\",\n \"access-analyzer:ListAccessPreviews\",\n \"access-analyzer:ListPolicyGenerations\",\n \"access-analyzer:StartPolicyGeneration\",\n \"access-analyzer:ValidatePolicy\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # Elemental Activations\n results = get_actions_for_service(\"elemental-activations\")\n actions = [\n \"elemental-activations:CompleteAccountRegistration\",\n \"elemental-activations:StartAccountRegistration\"\n ]\n for action in actions:\n self.assertTrue(action in results)\n # OpenSearch\n results = get_actions_for_service(\"es\")\n actions = [\n \"es:DescribeDomainChangeProgress\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # Location\n results = get_actions_for_service(\"geo\")\n actions = [\n \"geo:CalculateRouteMatrix\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # Amazon Managed Grafana\n results = get_actions_for_service(\"grafana\")\n actions = [\n \"grafana:DescribeWorkspaceAuthentication\",\n \"grafana:UpdateWorkspaceAuthentication\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # EC2 Image Builder\n results = get_actions_for_service(\"imagebuilder\")\n actions = [\n \"imagebuilder:ImportVmImage\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # Timestream\n results = get_actions_for_service(\"timestream\")\n actions = [\n \"timestream:CreateScheduledQuery\",\n \"timestream:DeleteScheduledQuery\",\n \"timestream:DescribeScheduledQuery\",\n \"timestream:ExecuteScheduledQuery\",\n \"timestream:ListScheduledQueries\",\n \"timestream:UpdateScheduledQuery\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # AWS Transfer Family\n results = get_actions_for_service(\"transfer\")\n actions = [\n \"transfer:CreateAccess\",\n \"transfer:CreateWorkflow\",\n \"transfer:DeleteAccess\",\n \"transfer:DeleteWorkflow\",\n \"transfer:DescribeAccess\",\n \"transfer:DescribeExecution\",\n \"transfer:DescribeWorkflow\",\n \"transfer:ListAccesses\",\n \"transfer:ListExecutions\",\n \"transfer:ListWorkflows\",\n \"transfer:SendWorkflowStepState\",\n \"transfer:UpdateAccess\",\n ]\n for action in actions:\n self.assertTrue(action in results)", "def test_get_actions_with_arn_type_and_access_level_case_5(self):\n\n output = get_actions_with_arn_type_and_access_level(\n \"s3\", \"object\", \"List\"\n )\n self.assertTrue(\"s3:ListMultipartUploadParts\" in output)", "def test_get_actions_with_arn_type_and_access_level_case_2(self):\n desired_output = [\n 'ssm:DeleteParameter',\n 'ssm:DeleteParameters',\n 'ssm:LabelParameterVersion',\n 'ssm:PutParameter'\n]\n output = get_actions_with_arn_type_and_access_level(\n \"ssm\", \"parameter\", \"Write\"\n )\n for item in desired_output:\n self.assertTrue(item in output)", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_machine_policy_template_action_spaces(self):\n pass", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_deployment_template_action_spaces(self):\n pass", "def legal_actions(self):\n raise NotImplementedError", "def get_legal_actions(self):\n pass", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_machine_policy_delete_action_spaces(self):\n pass", "def test_actions(self, actions):\n try:\n for action in actions:\n self.get_action(action['type'])(**action)\n except Exception as e:\n print('Exception: {}'.format(str(e)))", "def test_get_actions_with_arn_type_and_access_level_case_4(self):\n desired_output = [\n 'secretsmanager:ListSecrets'\n ]\n output = get_actions_with_arn_type_and_access_level(\n \"secretsmanager\", \"*\", \"List\"\n )\n self.assertListEqual(desired_output, output)", "def _get_legal_actions(self):\n raise NotImplementedError", "def decide_place(self, action):\n pass", "def action_space(self, curr_state):\n # Action space - allowed (position, value) combinations for the agent and environment given the current state\n\n agent_actions = list(product(self.allowed_positions(curr_state), self.allowed_values(curr_state)[0]))\n env_actions = list(product(self.allowed_positions(curr_state), self.allowed_values(curr_state)[1]))\n return (agent_actions, env_actions)", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_deployment_preview_action_spaces(self):\n pass", "def valid_actions(self) -> List[str]:\n return list(self.action_map().keys())", "def test_services_with_multiple_pages_kinesis_analytics(self):\n # Kinesis Analytics V1\n results = get_actions_for_service(\"kinesisanalytics\")\n actions = [\n \"kinesisanalytics:GetApplicationState\", # Only in v1, not v2\n \"kinesisanalytics:ListApplications\", # In both\n ]\n for action in actions:\n self.assertTrue(action in results)", "def action_space(self, curr_state):\n\n agent_actions = product(self.allowed_positions(curr_state), self.allowed_values(curr_state)[0])\n env_actions = product(self.allowed_positions(curr_state), self.allowed_values(curr_state)[1])\n return (agent_actions, env_actions)", "def action_space(self, curr_state):\n\n agent_actions = product(self.allowed_positions(curr_state), self.allowed_values(curr_state)[0])\n env_actions = product(self.allowed_positions(curr_state), self.allowed_values(curr_state)[1])\n return (agent_actions, env_actions)", "def get_actions(self, request):\n return super(OrganizationAdmin, self).get_actions(request)", "def get_legal_actions(self, block_=None):\n return self._get_move_actions(block_) + self._get_mine_actions() + \\\n self._get_placement_actions(block_)", "def get_action_meanings(self) -> list[str]:\n keys = ale_py.Action.__members__.values()\n values = ale_py.Action.__members__.keys()\n mapping = dict(zip(keys, values))\n return [mapping[action] for action in self._action_set]", "def _get_placement_actions(self, exclude=None):\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({\n 'func': '_place',\n 'args': (pos,),\n 'kwargs': {'exclude': exclude}\n })\n\n return rtn", "def test_autocomplete_locations_urls(self):\n r = self.base_check_request(\"get\", \"autocomplete/locations/\")\n self.assertIsInstance(r, list)\n self.assertEqual(len(r), 10, \"Invalid default count\")\n\n ac_keys = ['ancestors', 'id', 'is_region', 'name', 'prepositional_name',\n 'slug', 'text_for_apartments_search',\n 'text_for_complexes_search', 'type_name']\n # ac_keys_full = ac_keys + [\"metro_stations\"]\n for ac in r:\n # check response objects structure\n self.assertListEqual(sorted(list(ac.keys())), ac_keys)\n\n # check response types\n # self.check_list_item_keys(ac[\"ancestors\"], ac_keys_full)\n self.assertIsInstance(ac['id'], int)\n self.assertIsInstance(ac['is_region'], bool)\n self.assertIsInstance(ac['name'], str)\n self.assertIsInstance(ac['prepositional_name'], str)\n self.assertIsInstance(ac['slug'], str)\n self.assertIsInstance(ac['text_for_apartments_search'], (str, type(None)))\n self.assertIsInstance(ac['text_for_complexes_search'], (str, type(None)))\n self.assertIsInstance(ac['type_name'], str)", "def test_intent_support(self):\n dispatcher = self.get_dispatcher()\n for intent in self.get_intents():\n self.assertIsNot(dispatcher(intent), None)", "def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n # -------------------------------\n # S3 Bucket for Manifests\n # -------------------------------\n\n qs_gov_bucket = s3.Bucket(\n self,\n id=f\"{cf.PROJECT}-ManifestBucket\",\n )\n bucket_name = qs_gov_bucket.bucket_name\n\n # -------------------------------\n # IAM\n # -------------------------------\n\n list_roles_policy = iam.ManagedPolicy(\n self,\n id=f\"{cf.PROJECT}-ListRolesPolicy\",\n description=None,\n managed_policy_name=None,\n path=\"/\",\n statements=[\n iam.PolicyStatement(\n effect=iam.Effect.ALLOW,\n resources=[\"*\"],\n actions=[\"iam:ListRoles\", \"iam:ListAccountAliases\"],\n )\n ],\n )\n\n federated_quicksight_policy = iam.ManagedPolicy(\n self,\n id=f\"{cf.PROJECT}-FederatedQuickSightPolicy\",\n managed_policy_name=f\"{cf.PROJECT}-FederatedQuickSightPolicy\",\n statements=[\n iam.PolicyStatement(\n effect=iam.Effect.ALLOW,\n resources=[\n f\"arn:aws:iam::{cf.ACCOUNT}:saml-provider/{cf.OKTA_IDP_NAME}\"\n ],\n actions=[\"sts:AssumeRoleWithSAML\"],\n conditions={\n \"StringEquals\": {\n \"saml:aud\": \"https://signin.aws.amazon.com/saml\"\n }\n },\n )\n ],\n )\n\n okta_federated_principal = iam.FederatedPrincipal(\n federated=f\"arn:aws:iam::{cf.ACCOUNT}:saml-provider/{cf.OKTA_IDP_NAME}\",\n assume_role_action=\"sts:AssumeRoleWithSAML\",\n conditions={\n \"StringEquals\": {\"SAML:aud\": \"https://signin.aws.amazon.com/saml\"}\n },\n )\n\n federated_quicksight_role = iam.Role(\n self,\n id=f\"{cf.PROJECT}-{cf.OKTA_ROLE_NAME}\",\n role_name=f\"{cf.PROJECT}-{cf.OKTA_ROLE_NAME}\",\n assumed_by=okta_federated_principal,\n description=\"Allow Okta to Federate Login & User Creation to QuickSight\",\n managed_policies=[federated_quicksight_policy],\n )\n\n\n iam.User(\n self,\n id=f\"{cf.PROJECT}-OktaSSOUser\",\n user_name=f\"{cf.PROJECT}-OktaSSOUser\",\n managed_policies=[list_roles_policy],\n )\n\n\n # -------------------------------\n # Lambda Functions\n # -------------------------------\n\n # iam role for Lambdas\n\n qs_governance_policy = iam.ManagedPolicy(\n self,\n id=f\"{cf.PROJECT}-QuickSightGovernancePolicy\",\n managed_policy_name=f\"{cf.PROJECT}-QuickSightGovernancePolicy\",\n statements=[\n iam.PolicyStatement(\n effect=iam.Effect.ALLOW,\n resources=[\n f\"arn:aws:secretsmanager:{cf.REGION}:{cf.ACCOUNT}:secret:{cf.OKTA_SECRET}*\"\n ],\n actions=[\n \"secretsmanager:GetSecretValue\",\n ],\n ),\n iam.PolicyStatement(\n effect=iam.Effect.ALLOW,\n resources=[\"*\"],\n actions=[\"quicksight:*\", \"ds:*\"],\n ),\n iam.PolicyStatement(\n effect=iam.Effect.ALLOW,\n resources=[f\"arn:aws:s3:::{bucket_name}/*\"],\n actions=[\"s3:Get*\", \"s3:Put*\"],\n ),\n ],\n )\n\n quicksight_permission_mapping_role = iam.Role(\n self,\n id=f\"{cf.PROJECT}-QuickSightPermissionMappingRole\",\n assumed_by=iam.ServicePrincipal(\"lambda.amazonaws.com\"),\n managed_policies=[\n iam.ManagedPolicy.from_aws_managed_policy_name(\n \"service-role/AWSLambdaBasicExecutionRole\"\n ),\n qs_governance_policy,\n ],\n )\n\n # Lambdas\n\n get_okta_info_lambda = _lambda.Function(\n self,\n id=f\"{cf.PROJECT}-GetOktaInfo\",\n handler=\"get_okta_info.handler\",\n role=quicksight_permission_mapping_role,\n runtime=_lambda.Runtime.PYTHON_3_8,\n code=_lambda.Code.asset(os.path.join(cf.PATH_SRC, \"pkg\")),\n function_name=f\"{cf.PROJECT}-GetOktaInfo\",\n environment={\n \"OKTA_SECRET\": cf.OKTA_SECRET,\n \"OKTA_ROLE_NAME\": cf.OKTA_ROLE_NAME,\n \"QS_GOVERNANCE_BUCKET\": bucket_name,\n \"QS_USER_GOVERNANCE_KEY\": cf.QS_USER_GOVERNANCE_KEY,\n },\n memory_size=256,\n timeout=core.Duration.seconds(180),\n )\n\n # Lamda Okta to QuickSight Mappers\n\n qs_user_governance_lambda = _lambda.Function(\n self,\n id=f\"{cf.PROJECT}-QSUserGovernance\",\n handler=\"qs_user_gov.handler\",\n role=quicksight_permission_mapping_role,\n runtime=_lambda.Runtime.PYTHON_3_8,\n code=_lambda.Code.asset(os.path.join(cf.PATH_SRC, \"pkg\")),\n function_name=f\"{cf.PROJECT}-QSUserGovernance\",\n environment={\n \"OKTA_ROLE_NAME\": f\"{cf.PROJECT}-{cf.OKTA_ROLE_NAME}\",\n \"QS_GOVERNANCE_BUCKET\": bucket_name,\n \"QS_USER_GOVERNANCE_KEY\": cf.QS_USER_GOVERNANCE_KEY,\n \"OKTA_GROUP_QS_PREFIX\": cf.OKTA_GROUP_QS_PREFIX,\n \"QS_ADMIN_OKTA_GROUP\": cf.QS_ADMIN_OKTA_GROUP,\n \"QS_AUTHOR_OKTA_GROUP\": cf.QS_AUTHOR_OKTA_GROUP,\n \"QS_READER_OKTA_GROUP\": cf.QS_READER_OKTA_GROUP\n },\n memory_size=256,\n timeout=core.Duration.seconds(180),\n )\n\n qs_asset_governance_lambda = _lambda.Function(\n self,\n id=f\"{cf.PROJECT}-QSAssetGovernance\",\n handler=\"qs_asset_gov.handler\",\n role=quicksight_permission_mapping_role,\n runtime=_lambda.Runtime.PYTHON_3_8,\n code=_lambda.Code.asset(os.path.join(cf.PATH_SRC, \"pkg\")),\n function_name=f\"{cf.PROJECT}-QSAssetGovernance\",\n environment={\n \"QS_GOVERNANCE_BUCKET\": bucket_name,\n \"QS_ASSET_GOVERNANCE_KEY\": cf.QS_ASSET_GOVERNANCE_KEY,\n },\n memory_size=256,\n timeout=core.Duration.seconds(180),\n )\n\n # -------------------------------\n # Events\n # -------------------------------\n\n qs_user_governance_lambda.add_event_source(\n lambda_event_sources.S3EventSource(\n bucket=qs_gov_bucket,\n events=[s3.EventType.OBJECT_CREATED],\n filters=[s3.NotificationKeyFilter(prefix=cf.QS_USER_GOVERNANCE_KEY)],\n )\n )\n\n qs_asset_governance_lambda.add_event_source(\n lambda_event_sources.S3EventSource(\n bucket=qs_gov_bucket,\n events=[s3.EventType.OBJECT_CREATED],\n filters=[s3.NotificationKeyFilter(prefix=cf.QS_ASSET_GOVERNANCE_KEY)],\n )\n )\n\n lambda_schedule = events.Schedule.rate(core.Duration.days(1))\n get_okta_info_target = events_targets.LambdaFunction(\n handler=get_okta_info_lambda\n )\n events.Rule(\n self,\n id=f\"{cf.PROJECT}-GetOktaInfoScheduledEvent\",\n description=\"The once per day CloudWatch event trigger for the Lambda\",\n enabled=True,\n schedule=lambda_schedule,\n targets=[get_okta_info_target],\n )\n\n # -------------------------------\n # S3 Object Deployment - QS Asset Manifest\n # -------------------------------\n\n asset_manifest_deploy = s3_deploy.BucketDeployment(\n self,\n id=f\"{cf.PROJECT}-AssetManifestDeploy\",\n sources=[s3_deploy.Source.asset(\n os.path.join(cf.PATH_ROOT, 'qs_config')\n )],\n destination_bucket=qs_gov_bucket\n )", "def get_actions(self, request):\n actions = super().get_actions(request)\n if not settings.PUBLISHER_CODE:\n del actions['create_cwr']\n if 'delete_selected' in actions:\n del actions['delete_selected']\n return actions", "def check_action_sanity(self):\n for action in crest.get_all_actions(self.model):\n assert action._name is not None, f\"There is an Action in {action._parent._name} ({action._parent.__class__.__name__}) whose name is 'None'\"\n assert action._name != \"\", f\"There is an Action in {action._parent._name} ({action._parent.__class__.__name__}) whose name is empty string\"\n\n assert isinstance(action.transition, crest.Transition), f\"Action {action._name}'s state is not a crest.Transition. It is: {action.transition} ({action.transition.__class__})\"\n assert action.state in crest.get_transitions(action._parent), f\"Action's transition {action.transition._name} ({action.transition}) is not in the transitions of entity {action._parent._name} ({action._parent})\"\n\n assert isinstance(action.target, crest.Port), f\"Action {action._name}'s target is not a crest.Port\"\n assert action.target in api.get_targets(action._parent), f\"Action's target {action.target._name} ({action.target}) is not in the targets of entity {action._parent._name} ({action._parent})\"\n\n assert isinstance(action.function, (crestml.LearnedFunction, types.FunctionType)), f\"Action {action._name}'s function needs to be of type types.FunctionType or crestdsl.ml.LearnedFunction\"\n assert 'self' in inspect.signature(action.function).parameters, f\"Action {action._name}'s function has no self parameter. entity: {action._parent._name} ({action._parent.__class__.__name__})\"\n assert len(inspect.signature(action.function).parameters) == 1, f\"An action should have only one one argument 'self'\"\n\n for port in SH.get_read_ports_from_update(action.function, action):\n assert port in api.get_sources(action._parent), f\"Action {action._name} seems to be reading a port {port._name} ({port}) which is not in the sources of its entity {action._parent._name} ({action._parent})\"", "def test_services_with_multiple_pages_lex(self):\n # Lex V1: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonlex.html\n self.assertTrue(\"lex:DeleteUtterances\" in self.all_actions)\n # Lex V2: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonlexv2.html\n self.assertTrue(\"lex:ListBotLocales\" in self.all_actions)\n results = get_actions_for_service(\"lex\")\n actions = [\n \"lex:CreateIntentVersion\",\n \"lex:CreateSlotTypeVersion\",\n \"lex:DeleteBotChannelAssociation\",\n \"lex:DeleteIntentVersion\",\n \"lex:DeleteSlotTypeVersion\",\n \"lex:GetBot\",\n \"lex:GetBotAlias\",\n \"lex:GetBotAliases\",\n \"lex:GetBotChannelAssociation\",\n \"lex:GetBotChannelAssociations\",\n \"lex:GetBotVersions\",\n \"lex:GetBots\",\n \"lex:GetBuiltinIntent\",\n \"lex:GetBuiltinIntents\",\n \"lex:GetBuiltinSlotTypes\",\n \"lex:GetExport\",\n \"lex:GetImport\",\n \"lex:GetIntent\",\n \"lex:GetIntentVersions\",\n \"lex:GetIntents\",\n \"lex:GetMigration\",\n \"lex:GetMigrations\",\n \"lex:GetSlotType\",\n \"lex:GetSlotTypeVersions\",\n \"lex:GetSlotTypes\",\n \"lex:GetUtterancesView\",\n \"lex:PostContent\",\n \"lex:PostText\",\n \"lex:PutBot\",\n \"lex:PutBotAlias\",\n \"lex:PutIntent\",\n \"lex:PutSlotType\",\n \"lex:StartMigration\",\n ]\n for action in actions:\n self.assertTrue(action in results)", "def get_available_actions(self, state):\n pass", "def test_correct_data_under_places(self):\n load_to_datastore(self.places_sofia, self.metadata_sofia)\n CommonAssertions.check_correct_data_under_places(tester=self, places=self.places_sofia,\n metadata=self.metadata_sofia)", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_deployment_preview_action_spaces_0(self):\n pass", "def test_required_unknown():\n parser=argparse.ArgumentParser()\n parser.add_argument('--region',\n help='Enter a region like us-east-2.',\n dest=\"region\",\n action=ValidateRegion,\n required=True)\n parser.add_argument('--output',\n help='pretty, json, yaml',\n dest=\"output\",\n action=Validateoutput,\n nargs=\"?\",\n default=\"yaml\"\n )\n parser.add_argument('--filter-types',\n help='eg: AWS::IAM::Role or AWS::EC2::Instance. Using \"ALL\" with no quotes and we will run it for all current supported resource types',\n nargs='+',\n dest=\"types\",\n action=Validatefilter,\n required=True)\n parser.add_argument('--tag_keys',\n help='Allows you to exclude particular AWS Resources based on the presence of a particular tag key on the resource. This will only be applied to AWS Resources that support tagging. Valid values: any string that is a valid tag - multiple values can be supplied.',\n dest=\"tags\")\n \n #This should raise an error since this will cause a SystemExit since bad params were passed in \n args = [\"--region\", \"NADA\",'--output', \"NADA\",'--filter-types',\"NADA\"]\n with pytest.raises(SystemExit):\n parser.parse_args(args)\n \n \n \n \n #This should NOT raise an error since good params were passed into the parser\n args = [\"--region\", \"us-east-1\",'--output', \"yaml\",'--filter-types',\"AWS::EC2::Instance\"] \n with not_raises(SystemExit):\n parser.parse_args(args)", "def test_undefined_action_is_logged(self):\n create_file(self.authz_file, textwrap.dedent(\"\"\"\\\n [groups]\n administrators = éat\n [wiki:WikiStart]\n änon = UNKNOWN_VIEW, TEST_CREATE, !TEST_MODIFY\n [milestone:milestone1]\n * = UNKNOWN_MODIFY, !TEST_VIEW\n \"\"\"))\n authz_policy = AuthzPolicy(self.env)\n authz_policy.parse_authz()\n\n self.assertEqual(2, len(self.env.log_messages))\n self.assertIn(('WARNING',\n 'The action UNKNOWN_VIEW in the [wiki:WikiStart] '\n 'section of trac-authz-policy is not a valid action.'),\n self.env.log_messages)\n self.assertIn(('WARNING',\n 'The action UNKNOWN_MODIFY in the [milestone:milestone1] '\n 'section of trac-authz-policy is not a valid action.'),\n self.env.log_messages)", "def test_search_form_apartments_urls(self):\n r_keys = ['balcony_types', 'bathroom_type', 'building_floors_max',\n 'building_floors_min', 'building_type', 'decoration',\n 'elevators_type', 'floor_max', 'floor_min', 'infrastructure',\n 'living_area_max', 'living_area_min', 'metro_stations',\n 'price_per_m_max', 'price_per_m_min', 'regions', 'rooms_count',\n 'total_area_max', 'total_area_min']\n r = self.check_request_keys(\"get\", \"search-forms/apartments/\", r_keys)\n\n self.check_list_item_keys(r[\"balcony_types\"], ['id', 'name'])\n self.check_list_item_keys(r[\"bathroom_type\"], ['id', 'name'])\n self.assertIsInstance(r['building_floors_max'], int)\n self.assertIsInstance(r['building_floors_min'], int)\n self.check_list_item_keys(r[\"building_type\"], ['id', 'name'])\n self.assertIsInstance(r['decoration'], list)\n self.assertEqual(r['decoration'], [])\n self.check_list_item_keys(r[\"elevators_type\"], ['id', 'name'])\n self.assertIsInstance(r['floor_max'], int)\n self.assertIsInstance(r['floor_min'], int)\n self.assertIsInstance(r['infrastructure'], list)\n self.assertEqual(r['infrastructure'], [])\n self.assertIsInstance(r['living_area_max'], int)\n self.assertIsInstance(r['living_area_min'], int)\n self.check_list_item_keys(r[\"metro_stations\"], ['id', 'name'])\n self.assertIsInstance(r['price_per_m_max'], int)\n self.assertIsInstance(r['price_per_m_min'], int)\n self.check_list_item_keys(r[\"regions\"], ['format', 'id', 'locations', 'name', 'slug', 'typeBeforeLocation',\n 'typeName', 'typePrepositionalShortName', 'typeShortName'])\n self.check_list_items_type(r['rooms_count'], int)\n self.assertIsInstance(r['total_area_max'], int)\n self.assertIsInstance(r['total_area_min'], int)", "def test_cohorts_management_a11y(self):\n self.cohort_management_page.a11y_audit.config.set_rules({\n \"ignore\": [\n 'aria-valid-attr', # TODO: LEARNER-6611 & LEARNER-6865\n 'region', # TODO: AC-932\n ]\n })\n self.cohort_management_page.a11y_audit.check_for_accessibility_errors()", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_library_variable_set_usage_list_action_spaces(self):\n pass", "def InitActionCheck(initActionList, init):\n for actions in initActionList:\n action_class = getNameFromIRI(actions.is_a[0].iri)\n # if the action is a SpeedAction class\n if action_class == \"SpeedAction\":\n action_entity_ref = getNameFromIRI(actions.has_entity_ref[0].iri)\n target_speed = actions.has_target_speed[0]\n ontology_transition_dynamics = actions.has_transition_dynamics[0]\n xosc_transition_dynamics = checkTransitionDynamics(ontology_transition_dynamics)\n init.add_init_action(action_entity_ref, xosc.AbsoluteSpeedAction(target_speed, xosc_transition_dynamics))\n continue\n #if the action is TeleportAction\n if action_class == \"TeleportAction\":\n action_entity_ref = getNameFromIRI(actions.has_entity_ref[0].iri)\n # if the action has position as parameter set\n s: int = 0\n offset = 0\n lane_id = 0\n road_id = 0\n if len(actions.has_position) != 0:\n position = actions.has_position[0]\n if len(position.has_s) != 0:\n s = position.has_s[0]\n\n if len(position.has_offset) != 0:\n offset = position.has_offset[0]\n\n if len(position.has_lane_id) != 0:\n lane_id = position.has_lane_id[0]\n\n if len(position.has_road_id) != 0:\n road_id = position.has_road_id[0]\n\n init.add_init_action(action_entity_ref, xosc.TeleportAction(xosc.LanePosition(s, offset, lane_id, road_id)))\n continue\n if action_class == \"EnvironmentAction\": # if the action is an EnvironmentAction\n xosc_environment_action = checkEnvironmentAction(actions)\n init.add_global_action(xosc_environment_action)\n return init", "def aws_elasticsearch_public_access_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:\n # ISO Time\n iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()\n for response in describe_es_os_domains(cache, session):\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(response,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n esDomainName = response[\"DomainStatus\"][\"DomainName\"]\n esVersion = response[\"DomainStatus\"][\"ElasticsearchVersion\"]\n domainId = response[\"DomainStatus\"][\"DomainId\"]\n domainArn = response[\"DomainStatus\"][\"ARN\"]\n # Determine if ES has Cognito Enabled\n try:\n cognitoEnabledCheck = str(response[\"DomainStatus\"][\"CognitoOptions\"][\"Enabled\"])\n except KeyError:\n cognitoEnabledCheck = False\n # Determine if ES is in a VPC\n try:\n vpcId = str(response[\"VPCOptions\"][\"VPCId\"])\n except KeyError:\n vpcId = \"NO_VPC\"\n # Determine if there is a policy and then parse through it. If the \"AWS\": \"*\" principal is allowed (anonymous access) without\n # any conditions we can assume there is not anything else to stop them\n try:\n policyDoc = response[\"AccessPolicies\"]\n policyJson = json.loads(policyDoc.encode().decode(\"unicode_escape\"))\n hasPolicy = True\n for sid in policyJson[\"Statement\"]:\n try:\n conditionCheck = str(sid[\"Condition\"])\n hasCondition = True\n except:\n conditionCheck = \"\"\n hasCondition = False\n if str(sid[\"Principal\"]) == '{\"AWS\": \"*\"}' and hasCondition is False:\n policyAllowAnon = True\n else:\n policyAllowAnon = False\n except KeyError or ValueError:\n policyDoc = \"\"\n policyJson = \"NO_POLICY\"\n policyAllowAnon = \"NO_POLICY\"\n hasPolicy = False\n # Full Public Check\n if policyAllowAnon is True and vpcId == \"NO_VPC\" and cognitoEnabledCheck is False:\n fullPublic = True\n else:\n fullPublic = False\n # This is a failing check\n if fullPublic is True:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{domainArn}/elasticsearch-public-access-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": domainArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices\",\n \"Effects/Data Exposure\"\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"CRITICAL\"},\n \"Confidence\": 99,\n \"Title\": \"[OpenSearch.9] OpenSearch/AWS ElasticSearch Service domains should not be exposed to the public\",\n \"Description\": \"OpenSearch/AWS ElasticSearch Service domain \"\n + esDomainName\n + \" is open to public due to not using a VPC, Cognito, or any additional conditions within the resource policy. Public access will allow malicious actors to attack the confidentiality, integrity or availability of documents indexed in your Domain. Refer to the remediation instructions if this configuration is not intended.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For information on protecting Domains with a Resource-based Policy refer to the Identity and Access Management in Amazon Elasticsearch Service section of the Amazon Elasticsearch Service Developer Guide\",\n \"Url\": \"https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-ac.html\"\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Analytics\",\n \"AssetService\": \"Amazon OpenSearch Service\",\n \"AssetComponent\": \"Search Domain\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsOpenSearchServiceDomain\",\n \"Id\": domainArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsOpenSearchServiceDomain\": {\n \"Id\": domainId,\n \"DomainName\": esDomainName,\n \"EngineVersion\": esVersion\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.AC-3\",\n \"NIST CSF V1.1 PR.AC-4\",\n \"NIST CSF V1.1 PR.DS-5\",\n \"NIST SP 800-53 Rev. 4 AC-1\",\n \"NIST SP 800-53 Rev. 4 AC-2\",\n \"NIST SP 800-53 Rev. 4 AC-3\",\n \"NIST SP 800-53 Rev. 4 AC-4\",\n \"NIST SP 800-53 Rev. 4 AC-5\",\n \"NIST SP 800-53 Rev. 4 AC-6\",\n \"NIST SP 800-53 Rev. 4 AC-14\",\n \"NIST SP 800-53 Rev. 4 AC-16\",\n \"NIST SP 800-53 Rev. 4 AC-17\",\n \"NIST SP 800-53 Rev. 4 AC-19\",\n \"NIST SP 800-53 Rev. 4 AC-20\",\n \"NIST SP 800-53 Rev. 4 AC-24\",\n \"NIST SP 800-53 Rev. 4 PE-19\",\n \"NIST SP 800-53 Rev. 4 PS-3\",\n \"NIST SP 800-53 Rev. 4 PS-6\",\n \"NIST SP 800-53 Rev. 4 SC-7\",\n \"NIST SP 800-53 Rev. 4 SC-8\",\n \"NIST SP 800-53 Rev. 4 SC-13\",\n \"NIST SP 800-53 Rev. 4 SC-15\",\n \"NIST SP 800-53 Rev. 4 SC-31\",\n \"NIST SP 800-53 Rev. 4 SI-4\",\n \"AICPA TSC CC6.3\",\n \"AICPA TSC CC6.6\",\n \"AICPA TSC CC7.2\",\n \"ISO 27001:2013 A.6.1.2\",\n \"ISO 27001:2013 A.6.2.1\",\n \"ISO 27001:2013 A.6.2.2\",\n \"ISO 27001:2013 A.7.1.1\",\n \"ISO 27001:2013 A.7.1.2\",\n \"ISO 27001:2013 A.7.3.1\",\n \"ISO 27001:2013 A.8.2.2\",\n \"ISO 27001:2013 A.8.2.3\",\n \"ISO 27001:2013 A.9.1.1\",\n \"ISO 27001:2013 A.9.1.2\",\n \"ISO 27001:2013 A.9.2.3\",\n \"ISO 27001:2013 A.9.4.1\",\n \"ISO 27001:2013 A.9.4.4\",\n \"ISO 27001:2013 A.9.4.5\",\n \"ISO 27001:2013 A.10.1.1\",\n \"ISO 27001:2013 A.11.1.4\",\n \"ISO 27001:2013 A.11.1.5\",\n \"ISO 27001:2013 A.11.2.1\",\n \"ISO 27001:2013 A.11.2.6\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.1.3\",\n \"ISO 27001:2013 A.13.2.1\",\n \"ISO 27001:2013 A.13.2.3\",\n \"ISO 27001:2013 A.13.2.4\",\n \"ISO 27001:2013 A.14.1.2\",\n \"ISO 27001:2013 A.14.1.3\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{domainArn}/elasticsearch-public-access-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": domainArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices\",\n \"Effects/Data Exposure\"\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"CRITICAL\"},\n \"Confidence\": 99,\n \"Title\": \"[OpenSearch.9] OpenSearch/AWS ElasticSearch Service domains should not be exposed to the public\",\n \"Description\": \"OpenSearch/AWS ElasticSearch Service domain \"\n + esDomainName\n + \" is not to the public due to using a VPC, Cognito, or any additional conditions within the resource policy.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For information on protecting Domains with a Resource-based Policy refer to the Identity and Access Management in Amazon Elasticsearch Service section of the Amazon Elasticsearch Service Developer Guide\",\n \"Url\": \"https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-ac.html\"\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Analytics\",\n \"AssetService\": \"Amazon OpenSearch Service\",\n \"AssetComponent\": \"Search Domain\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsOpenSearchServiceDomain\",\n \"Id\": domainArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsOpenSearchServiceDomain\": {\n \"Id\": domainId,\n \"DomainName\": esDomainName,\n \"EngineVersion\": esVersion\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.AC-3\",\n \"NIST CSF V1.1 PR.AC-4\",\n \"NIST CSF V1.1 PR.DS-5\",\n \"NIST SP 800-53 Rev. 4 AC-1\",\n \"NIST SP 800-53 Rev. 4 AC-2\",\n \"NIST SP 800-53 Rev. 4 AC-3\",\n \"NIST SP 800-53 Rev. 4 AC-4\",\n \"NIST SP 800-53 Rev. 4 AC-5\",\n \"NIST SP 800-53 Rev. 4 AC-6\",\n \"NIST SP 800-53 Rev. 4 AC-14\",\n \"NIST SP 800-53 Rev. 4 AC-16\",\n \"NIST SP 800-53 Rev. 4 AC-17\",\n \"NIST SP 800-53 Rev. 4 AC-19\",\n \"NIST SP 800-53 Rev. 4 AC-20\",\n \"NIST SP 800-53 Rev. 4 AC-24\",\n \"NIST SP 800-53 Rev. 4 PE-19\",\n \"NIST SP 800-53 Rev. 4 PS-3\",\n \"NIST SP 800-53 Rev. 4 PS-6\",\n \"NIST SP 800-53 Rev. 4 SC-7\",\n \"NIST SP 800-53 Rev. 4 SC-8\",\n \"NIST SP 800-53 Rev. 4 SC-13\",\n \"NIST SP 800-53 Rev. 4 SC-15\",\n \"NIST SP 800-53 Rev. 4 SC-31\",\n \"NIST SP 800-53 Rev. 4 SI-4\",\n \"AICPA TSC CC6.3\",\n \"AICPA TSC CC6.6\",\n \"AICPA TSC CC7.2\",\n \"ISO 27001:2013 A.6.1.2\",\n \"ISO 27001:2013 A.6.2.1\",\n \"ISO 27001:2013 A.6.2.2\",\n \"ISO 27001:2013 A.7.1.1\",\n \"ISO 27001:2013 A.7.1.2\",\n \"ISO 27001:2013 A.7.3.1\",\n \"ISO 27001:2013 A.8.2.2\",\n \"ISO 27001:2013 A.8.2.3\",\n \"ISO 27001:2013 A.9.1.1\",\n \"ISO 27001:2013 A.9.1.2\",\n \"ISO 27001:2013 A.9.2.3\",\n \"ISO 27001:2013 A.9.4.1\",\n \"ISO 27001:2013 A.9.4.4\",\n \"ISO 27001:2013 A.9.4.5\",\n \"ISO 27001:2013 A.10.1.1\",\n \"ISO 27001:2013 A.11.1.4\",\n \"ISO 27001:2013 A.11.1.5\",\n \"ISO 27001:2013 A.11.2.1\",\n \"ISO 27001:2013 A.11.2.6\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.1.3\",\n \"ISO 27001:2013 A.13.2.1\",\n \"ISO 27001:2013 A.13.2.3\",\n \"ISO 27001:2013 A.13.2.4\",\n \"ISO 27001:2013 A.14.1.2\",\n \"ISO 27001:2013 A.14.1.3\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding", "def provision_create(ec2_conn, iam_conn, interana_account_id, s3_bucket_path, interana_user):\n try:\n user, all_policies = check_account_setup(iam_conn, interana_user)\n except Exception, e:\n print \"Warning could not verify user interana_user {} because {}\".format(interana_user, e)\n\n infile = 's3_bucket_list.policy.template'\n outfile = 's3_bucket_list.policy'\n\n bucket_name, bucket_prefix = get_bucket_name_prefix(s3_bucket_path)\n\n all_lines = ''\n with open(infile, 'r') as tmp_fh, open(outfile, 'w') as out_fh:\n for line in tmp_fh:\n re_proxy = re.compile('<INTERANA_ACCOUNT_ID>')\n translate = re_proxy.sub(interana_account_id, line)\n\n re_proxy = re.compile('<BUCKET_NAME>')\n translate = re_proxy.sub(bucket_name, translate)\n\n re_proxy = re.compile('<BUCKET_PREFIX>')\n translate = re_proxy.sub(bucket_prefix, translate)\n\n out_fh.write(translate)\n all_lines += translate.strip()\n\n if len(bucket_prefix) < 1:\n with open(outfile, 'r') as in_fh:\n policy = json.load(in_fh)\n del policy['Statement'][1]['Condition']\n all_lines = json.dumps(policy)\n print \"Download file to check GetObject Access {}\".format(outfile)\n with open(outfile, 'w') as out_fh:\n json.dump(policy, out_fh, indent=4)\n\n print \"****policy file {}***\".format(outfile)\n\n print json.dumps(json.loads(all_lines), indent=True)", "def test_all_actions_setup(self, mocked_find):\n\n setup_identity_cache()\n\n mocked_find.side_effect = KeyError(\"Error forced for testing\")\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_503_SERVICE_UNAVAILABLE)\n\n new_task = Task.objects.all()[0]\n\n class_conf = new_task.config\n expected_action_names = CreateProjectAndUser.default_actions[:]\n expected_action_names += class_conf.additional_actions\n\n actions = new_task.actions\n observed_action_names = [a.action_name for a in actions]\n self.assertEqual(observed_action_names, expected_action_names)", "def testAmenities(self):\n place = Place()\n self.assertTrue(hasattr(place, \"amenity_ids\"))\n self.assertEqual(type(place.amenity_ids), list)\n self.assertEqual(len(place.amenity_ids), 0)", "def getActions(self, state): \n util.raiseNotDefined()", "def get_dropbox_policy ( bucket_name, requires_aspera = False ) :\n if requires_aspera :\n return \"\"\"{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Sid\": \"GrantUploadDownloadPermissionsToBucket\",\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:AbortMultipartUpload\",\n \"s3:DeleteObject\",\n \"s3:DeleteObjectVersion\",\n \"s3:GetBucketAcl\",\n \"s3:GetBucketLocation\",\n \"s3:GetBucketLogging\",\n \"s3:GetBucketNotification\",\n \"s3:GetBucketPolicy\",\n \"s3:GetBucketRequestPayment\",\n \"s3:GetBucketTagging\",\n \"s3:GetBucketVersioning\",\n \"s3:GetBucketWebsite\",\n \"s3:GetLifecycleConfiguration\",\n \"s3:GetObject\",\n \"s3:GetObjectAcl\",\n \"s3:GetObjectTorrent\",\n \"s3:GetObjectVersion\",\n \"s3:GetObjectVersionAcl\",\n \"s3:GetObjectVersionTorrent\",\n \"s3:ListAllMyBuckets\",\n \"s3:ListBucket\",\n \"s3:ListBucketMultipartUploads\",\n \"s3:ListBucketVersions\",\n \"s3:ListMultipartUploadParts\",\n \"s3:PutBucketVersioning\",\n \"s3:PutObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\"\"\" + bucket_name + \"\"\"\",\n \"arn:aws:s3:::\"\"\" + bucket_name + \"\"\"/*\"\n ]\n },\n {\n \"Sid\": \"AllowAsperaRootLevelListingOfTheBucket\",\n \"Action\": [\"s3:ListBucket\"],\n \"Effect\": \"Allow\",\n \"Resource\": [\n \"arn:aws:s3:::\"\"\" + bucket_name + \"\"\"\"\n ],\n \"Condition\":{\n \"StringEquals\":{\n \"s3:prefix\":[\"\"], \"s3:delimiter\":[\"/\"]\n }\n }\n },\n {\n \"Sid\" : \"AllowGroupToSeeBucketListInAsperaConsole\",\n \"Action\" : [\n \"s3:ListAllMyBuckets\",\n \"s3:GetBucketLocation\"\n ],\n \"Effect\" : \"Allow\",\n \"Resource\" : [ \"arn:aws:s3:::\" ]\n }\n ]\n}\"\"\"\n\n else :\n return \"\"\"{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Sid\": \"GrantUploadDownloadPermissionsToBucket\",\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:AbortMultipartUpload\",\n \"s3:DeleteObject\",\n \"s3:DeleteObjectVersion\",\n \"s3:GetBucketAcl\",\n \"s3:GetBucketLocation\",\n \"s3:GetBucketLogging\",\n \"s3:GetBucketNotification\",\n \"s3:GetBucketPolicy\",\n \"s3:GetBucketRequestPayment\",\n \"s3:GetBucketTagging\",\n \"s3:GetBucketVersioning\",\n \"s3:GetBucketWebsite\",\n \"s3:GetLifecycleConfiguration\",\n \"s3:GetObject\",\n \"s3:GetObjectAcl\",\n \"s3:GetObjectTorrent\",\n \"s3:GetObjectVersion\",\n \"s3:GetObjectVersionAcl\",\n \"s3:GetObjectVersionTorrent\",\n \"s3:ListAllMyBuckets\",\n \"s3:ListBucket\",\n \"s3:ListBucketMultipartUploads\",\n \"s3:ListBucketVersions\",\n \"s3:ListMultipartUploadParts\",\n \"s3:PutBucketVersioning\",\n \"s3:PutObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\"\"\" + bucket_name + \"\"\"\",\n \"arn:aws:s3:::\"\"\" + bucket_name + \"\"\"/*\"\n ]\n }\n ]\n}\"\"\"", "def exists_intent_action(self, intent_keyword):\n pass", "def actions() -> None:\n pass", "def actions_required(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"actions_required\")", "def actions_required(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"actions_required\")", "def actions_required(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"actions_required\")", "def actions_required(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"actions_required\")", "def take_actions(self, actions: MultiAgentDict):\n\n # 1. - 4.\n wage_increases, demand = self.parse_actions(actions)\n wages = {agent.agent_id: agent.wage * (1 + wage_increases[agent.agent_id]) for agent in self.agents.values()}\n self.clear_labor_market(wages)\n self.clear_goods_market(demand)\n\n # 5. - 7.\n self.clear_dividends(self.firm.profit)\n self.clear_capital_market()\n\n return wage_increases, demand", "def test_ami_exists(self) -> None:\n owner = self.sts.get_caller_identity().get('Account')\n amis = self.ec2_client.describe_images(\n Owners=[owner],\n Filters=[{\n 'Name': 'name',\n 'Values': ['saints-xctf-web-server*']\n }]\n )\n self.assertTrue(len(amis.get('Images')) > 0)", "def validate_params(aws_default_region, aws_role_arn, aws_role_session_name, aws_access_key_id, aws_secret_access_key):\n if not aws_default_region:\n raise DemistoException('You must specify AWS default region.')\n\n if bool(aws_access_key_id) != bool(aws_secret_access_key):\n raise DemistoException('You must provide Access Key id and Secret key id to configure the instance with '\n 'credentials.')\n if bool(aws_role_arn) != bool(aws_role_session_name):\n raise DemistoException('Role session name is required when using role ARN.')", "def _build_action_space(self):\n feasible_actions = []\n # Adding the inform actions and request actions.\n for slot in sorted(self.slot_set.keys()):\n feasible_actions.append({'action': 'request', 'inform_slots': {}, 'request_slots': {slot: dialogue_configuration.VALUE_UNKNOWN},\"explicit_inform_slots\":{}, \"implicit_inform_slots\":{}})\n # Diseases as actions.\n for disease in sorted(self.disease_symptom.keys()):\n feasible_actions.append({'action': 'inform', 'inform_slots': {\"disease\":disease}, 'request_slots': {},\"explicit_inform_slots\":{}, \"implicit_inform_slots\":{}})\n\n return feasible_actions", "def actions(self):\n raise NotImplementedError", "def parse_exception(arg_action, arg_exception):\n # print(arg_exception)\n if arg_exception.response['Error']['Code'] == 'NoSuchBucket':\n print(f'BotoBucket() Error: \"NoSuchBucket\". '\n f'Class Method: {arg_action}, aws operation: {arg_exception.operation_name}')\n return [{arg_exception.operation_name}, arg_exception.response['Error']['Code'],\n arg_exception.response['Error']['Message'], arg_exception.response]\n\n elif arg_exception.response['Error']['Code'] == 'BucketAlreadyExists':\n print(f'BotoBucket() Error: \"Bucket Already Exists\". '\n f'Class Method: {arg_action}, aws operation: {arg_exception.operation_name}')\n return [{arg_exception.operation_name}, arg_exception.response['Error']['Code'],\n arg_exception.response['Error']['Message'], arg_exception.response]\n\n elif arg_exception.response['Error']['Code'] == 'IllegalLocationConstraintException':\n print(f'BotoBucket() Error: \"Wrong Region\". '\n f'Class Method: {arg_action}, aws operation: {arg_exception.operation_name}')\n return [{arg_exception.operation_name}, arg_exception.response['Error']['Code'],\n arg_exception.response['Error']['Message'], arg_exception.response]\n\n elif arg_exception.response['Error']['Code'] == 'BucketAlreadyOwnedByYou':\n print(f'BotoBucket() Error: \"Bucket Already Owned By You\". '\n f'Class Method: {arg_action}, aws operation: {arg_exception.operation_name}')\n return [{arg_exception.operation_name}, arg_exception.response['Error']['Code'],\n arg_exception.response['Error']['Message'], arg_exception.response]\n\n elif arg_exception.response['Error']['Code'] == 'InvalidBucketName':\n print(f'BotoBucket() Error: \"The specified bucket name is not valid.\". '\n f'Class Method: {arg_action}, aws operation: {arg_exception.operation_name}')\n return [{arg_exception.operation_name}, arg_exception.response['Error']['Code'],\n arg_exception.response['Error']['Message'], arg_exception.response]\n\n elif arg_exception.response['Error']['Code'] == 'BucketNotEmpty':\n print(f'BotoBucket() Error: \"The bucket you tried to delete is not empty.\". '\n f'Class Method: {arg_action}, aws operation: {arg_exception.operation_name}')\n return [{arg_exception.operation_name}, arg_exception.response['Error']['Code'],\n arg_exception.response['Error']['Message'], arg_exception.response]\n\n else:\n print(f'BotoBucket() Error: \"Other Exception. Please parse the response.\". '\n f'Class Method: {arg_action}, aws operation: {arg_exception.operation_name}')\n print(arg_exception.response)\n return [{arg_exception.operation_name}, arg_exception.response['Error']['Code'],\n arg_exception.response['Error']['Message'], arg_exception.response]", "def clean_ami_public_access(self):\n main_account = Account(region=config.aws.region)\n ddb_table = main_account.resource(\"dynamodb\").Table(self.config.publicAMIs.ddb_table_name)\n\n retention_period = self.config.publicAMIs.remediation_retention_period\n\n jira = JiraReporting(self.config)\n slack = SlackNotification(self.config)\n\n for account_id, account_name in self.config.aws.accounts.items():\n logging.debug(f\"Checking '{account_name} / {account_id}'\")\n issues = IssueOperations.get_account_open_issues(ddb_table, account_id, PublicAMIIssue)\n for issue in issues:\n ami_id = issue.issue_id\n\n in_whitelist = self.config.publicAMIs.in_whitelist(account_id, ami_id)\n\n if in_whitelist:\n logging.debug(f\"Skipping {ami_id} (in whitelist)\")\n\n # Adding label with \"whitelisted\" to jira ticket.\n jira.add_label(\n ticket_id=issue.jira_details.ticket,\n label=IssueStatus.Whitelisted.value\n )\n continue\n\n if issue.timestamps.reported is None:\n logging.debug(f\"Skipping '{ami_id}' (was not reported)\")\n continue\n\n if issue.timestamps.remediated is not None:\n logging.debug(f\"Skipping {ami_id} (has been already remediated)\")\n continue\n\n updated_date = issue.timestamp_as_datetime\n no_of_days_issue_created = (self.config.now - updated_date).days\n\n if no_of_days_issue_created >= retention_period:\n owner = issue.jira_details.owner\n bu = issue.jira_details.business_unit\n product = issue.jira_details.product\n\n try:\n account = Account(id=account_id,\n name=account_name,\n region=issue.issue_details.region,\n role_name=self.config.aws.role_name_reporting)\n if account.session is None:\n continue\n\n checker = PublicAMIChecker(account=account)\n checker.check(amis_to_check=[ami_id])\n ami = checker.get_ami(ami_id)\n if ami is None:\n logging.debug(f\"AMI {ami_id} was removed by user\")\n elif not ami.public_access:\n logging.debug(f\"AMI {ami.name} public access issue was remediated by user\")\n else:\n logging.debug(f\"Remediating '{ami.name}' \")\n\n remediation_succeed = True\n if ami.modify_image_attribute():\n comment = (f\"AMI '{ami.name}' public access issue \"\n f\"in '{account_name} / {account_id}' account \"\n f\"was remediated by hammer\")\n else:\n remediation_succeed = False\n comment = (f\"Failed to remediate AMI '{ami.name}' public access issue \"\n f\"in '{account_name} / {account_id}' account \"\n f\"due to some limitations. Please, check manually\")\n\n jira.remediate_issue(\n ticket_id=issue.jira_details.ticket,\n comment=comment,\n reassign=remediation_succeed,\n )\n slack.report_issue(\n msg=f\"{comment}\"\n f\"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}\",\n owner=owner,\n account_id=account_id,\n bu=bu, product=product,\n )\n IssueOperations.set_status_remediated(ddb_table, issue)\n except Exception:\n logging.exception(f\"Error occurred while updating AMI '{ami_id}' access \"\n f\"in '{account_name} / {account_id}'\")\n else:\n logging.debug(f\"Skipping '{ami_id}' \"\n f\"({retention_period - no_of_days_issue_created} days before remediation)\")", "def actions_required(self) -> Optional[str]:\n return pulumi.get(self, \"actions_required\")", "def get_actions(\n self, observations: Observations, action_space: gym.Space\n ) -> Actions:\n return super().get_actions(observations, action_space)", "def test_services_with_multiple_pages_ses(self):\n # SES V1: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonses.html\n self.assertTrue(\"ses:PutIdentityPolicy\" in self.all_actions)\n # SES V2: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonsimpleemailservicev2.html\n self.assertTrue(\"ses:ListImportJobs\" in self.all_actions)\n\n results = get_actions_for_service(\"ses\")\n actions = [\n \"ses:CloneReceiptRuleSet\",\n \"ses:CreateConfigurationSetTrackingOptions\",\n \"ses:CreateReceiptFilter\",\n \"ses:CreateReceiptRule\",\n \"ses:CreateReceiptRuleSet\",\n \"ses:CreateTemplate\",\n \"ses:DeleteConfigurationSetTrackingOptions\",\n \"ses:DeleteIdentity\",\n \"ses:DeleteIdentityPolicy\",\n \"ses:DeleteReceiptFilter\",\n \"ses:DeleteReceiptRule\",\n \"ses:DeleteReceiptRuleSet\",\n \"ses:DeleteTemplate\",\n \"ses:DeleteVerifiedEmailAddress\",\n \"ses:DescribeActiveReceiptRuleSet\",\n \"ses:DescribeConfigurationSet\",\n \"ses:DescribeReceiptRule\",\n \"ses:DescribeReceiptRuleSet\",\n \"ses:GetAccountSendingEnabled\",\n \"ses:GetIdentityDkimAttributes\",\n \"ses:GetIdentityMailFromDomainAttributes\",\n \"ses:GetIdentityNotificationAttributes\",\n \"ses:GetIdentityPolicies\",\n \"ses:GetIdentityVerificationAttributes\",\n \"ses:GetSendQuota\",\n \"ses:GetSendStatistics\",\n \"ses:GetTemplate\",\n \"ses:ListIdentities\",\n \"ses:ListIdentityPolicies\",\n \"ses:ListReceiptFilters\",\n \"ses:ListReceiptRuleSets\",\n \"ses:ListTemplates\",\n \"ses:ListVerifiedEmailAddresses\",\n \"ses:PutIdentityPolicy\",\n \"ses:ReorderReceiptRuleSet\",\n \"ses:SendBounce\",\n \"ses:SendBulkTemplatedEmail\",\n \"ses:SendRawEmail\",\n \"ses:SendTemplatedEmail\",\n \"ses:SetActiveReceiptRuleSet\",\n \"ses:SetIdentityDkimEnabled\",\n \"ses:SetIdentityFeedbackForwardingEnabled\",\n \"ses:SetIdentityHeadersInNotificationsEnabled\",\n \"ses:SetIdentityMailFromDomain\",\n \"ses:SetIdentityNotificationTopic\",\n \"ses:SetReceiptRulePosition\",\n \"ses:TestRenderTemplate\",\n \"ses:UpdateAccountSendingEnabled\",\n \"ses:UpdateConfigurationSetReputationMetricsEnabled\",\n \"ses:UpdateConfigurationSetSendingEnabled\",\n \"ses:UpdateConfigurationSetTrackingOptions\",\n \"ses:UpdateReceiptRule\",\n \"ses:UpdateTemplate\",\n \"ses:VerifyDomainDkim\",\n \"ses:VerifyDomainIdentity\",\n \"ses:VerifyEmailAddress\",\n \"ses:VerifyEmailIdentity\",\n ]\n for action in actions:\n self.assertTrue(action in results)", "def test_make_compatible_taxa_summaries_sample_id_map_incomplete_map(self):\r\n self.assertRaises(ValueError, _make_compatible_taxa_summaries,\r\n self.taxa_summary3, self.taxa_summary4, self.sample_id_map3)", "def test_gh_226_elasticloadbalancing_v1_and_v2(self):\n results = get_actions_for_service(\"elasticloadbalancing\")\n # print(json.dumps(results, indent=4))\n lb_v1_only_action = \"elasticloadbalancing:CreateTargetGroup\"\n lb_v2_only_action = \"elasticloadbalancing:SetSecurityGroups\"\n self.assertTrue(lb_v1_only_action in results)\n self.assertTrue(lb_v2_only_action in results)", "def test_get_actions_for_invalid_service(self):\n output = get_actions_for_service(\"invalid_service\")\n self.assertListEqual([], output)", "def test_was_produced_by_action(self):\n\n test_content = {\n AbstractAction.ACTION: WordExtraction.__name__,\n AbstractAction.RESULT: ['One', 'Two']\n }\n\n assert WordExtraction.produced(test_content)\n\n test_content[AbstractAction.ACTION] = ''\n\n assert not WordExtraction.produced(test_content)", "def test_location(self, all_metars):\n expected = [\"KIAH\", 'KGNV', 'KNID', 'KTPA', 'KP60']\n for metar, expected_val in zip(all_metars, expected):\n parser = Parser(metar)\n actual = parser.parse()\n assert expected_val == actual['location']", "def _check_market_place_in_range(self):\n\t\tfor building in self.get_buildings_in_range():\n\t\t\tif building.id == BUILDINGS.MARKET_PLACE_CLASS:\n\t\t\t\tif StaticPather.get_path_on_roads(self.island, self, building) is not None:\n\t\t\t\t\t# a market place is in range\n\t\t\t\t\treturn\n\t\t# no market place found\n\t\tself.session.ingame_gui.message_widget.add(self.position.origin.x, self.position.origin.y, \\\n\t\t 'NO_MARKET_PLACE_IN_RANGE')", "def test_create_resource_access_review_for_all_namespaces(self):\n pass", "def test_police_abbreviations(self):\n for word in self.report.get_words():\n for uword in self.rules.police_abbreviations:\n if uword[\"word\"] == word.text.lower():\n self.add_error(\n f\"{word.text} är en intern förkortning. \"\n f\"Använd {uword['means']} istället.\",\n word=word,\n )", "def test_non_additive_requires_tags(self):\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import launch_instances, run_block_device_dict, farm_security_group_setup\n\n # launch_instances requires vpc setup as done by firesim/scripts/setup_firesim.py\n from awstools.aws_setup import aws_setup\n aws_setup()\n farm_security_group_setup()\n\n type = 'f1.2xlarge'\n\n with pytest.raises(ValueError):\n launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n always_expand=False)", "def get_available_actions(self):\n return self.actions", "def test_assessor_access_normal(self):\n assessor = get_or_create_default_assessor()\n self.client.login(assessor.email)\n # This assessor doesn't belong to a group\n self.assertTrue(is_assessor(assessor))\n # add the assessor to the assessment group\n self.assertTrue(Assessment.objects.filter(application=self.application).count() > 0)\n for assessment in Assessment.objects.filter(application=self.application):\n add_assessor_to_assessor_group(assessor, assessment.assessor_group)\n\n # forbidden\n urls_get_forbidden = [\n reverse('wl_applications:enter_conditions', args=[self.application.pk]),\n ]\n urls_post_forbidden = [\n {\n 'url': reverse('wl_applications:create_condition', args=[self.application.pk]),\n 'data': {\n 'code': '123488374',\n 'text': 'condition text'\n }\n },\n {\n 'url': reverse('wl_applications:set_assessment_condition_state'),\n 'data': {\n 'assessmentConditionID': self.assessment_condition.pk,\n 'acceptanceStatus': 'accepted',\n }\n },\n {\n 'url': reverse('wl_applications:enter_conditions', args=[self.application.pk]),\n 'data': {\n 'conditionID': [self.condition.pk],\n }\n },\n ]\n # Allowed\n urls_get_allowed = [\n reverse('wl_applications:search_conditions'),\n reverse('wl_applications:enter_conditions_assessor', args=[self.application.pk, self.assessment.pk]),\n ]\n urls_post_allowed = [\n {\n 'url': reverse('wl_applications:enter_conditions_assessor',\n args=[self.application.pk, self.assessment.pk]),\n 'data': {\n 'conditionID': [self.condition.pk],\n }\n },\n ]\n for url in urls_get_forbidden:\n response = self.client.get(url, follow=True)\n if response.status_code != 403:\n self.assertRedirects(response, reverse('wl_dashboard:tables_assessor'), status_code=302,\n target_status_code=200)\n for url in urls_post_forbidden:\n response = self.client.post(url['url'], url['data'], follow=True)\n if response.status_code != 403:\n self.assertRedirects(response, reverse('wl_dashboard:tables_assessor'), status_code=302,\n target_status_code=200)\n for url in urls_get_allowed:\n response = self.client.get(url, follow=True)\n self.assertEqual(200, response.status_code)\n\n for url in urls_post_allowed:\n response = self.client.post(url['url'], url['data'], follow=True)\n self.assertEqual(200, response.status_code)", "def _generate_actions(self) -> list:\n pass", "def deploy_handler_with_advance_op(self):\n bucket = [\"src_bucket\", \"metadata\",\"advance_op\"]\n self.check_for_buckets(bucket)\n self.__deploy_function(EXPORTED_FUNCTION.ADVANCE_OP)", "def test_emtpy_conflict_places(conflict_places):\n assert conflict_places.named_place(\"Woodshop\") == None", "def clean_up(exc):\n\n FORBIDDEN_FIELDS_TECH = [\n \"categories\",\n ]\n\n FORBIDDEN_FIELDS_BIO = [\"location\", \"product\"]\n\n for field in list(exc.keys()):\n if exc[field] is None or exc[field] == \"None\":\n del exc[field]\n continue\n\n if exc[\"type\"] == \"biosphere\" and field in FORBIDDEN_FIELDS_BIO:\n del exc[field]\n if exc[\"type\"] == \"technosphere\" and field in FORBIDDEN_FIELDS_TECH:\n del exc[field]\n\n return exc", "def check(self):\r\n for action in self._actions:\r\n action.check()", "def corporate_action_restricted_assets(self) -> Tuple[str, ...]:\n return self.__corporate_action_restricted_assets", "def step_impl(context, objects_type):\n\n log.info(\"=====> From the STB verify that the HTTP Cache is built and the objects from the nsa file are available to query\")\n\n if objects_type == \"ObjectsTest1\":\n object_list = resourceset_parameters.ObjectsTest1\n elif objects_type == \"ObjectsTest2\":\n object_list = resourceset_parameters.ObjectsTest2\n elif objects_type == \"ObjectsTest4\":\n object_list = resourceset_parameters.ObjectsTest4\n else:\n assert False, \" ****> Failed: No objects_type parameter while posting. Got: {objects_type}\".format(objects_type=objects_type)\n\n for i in object_list:\n verify_object_available_in_stb(context, resourceset_parameters.Object_names[i])", "def soft_assert_bulk_verify_filter_functionality(page, modal, exp_asmt,\n soft_assert):\n filter_section_element = modal.filter_section.expand()\n if not isinstance(page, dashboard.MyAssessments):\n soft_assert.expect(\n filter_section_element.get_mapped_to_audit_filter() == exp_asmt.audit,\n \"'Filter by Mapping' section should contain title of opened audit.\")\n else:\n filter_section_element.add_mapping_filter(\n objects.get_singular(objects.AUDITS, title=True),\n element.Common.TITLE, exp_asmt.audit)\n filter_section_element.apply()\n base.Test.general_equal_soft_assert(\n soft_assert, [exp_asmt],\n webui_service.AssessmentsService().get_objs_from_bulk_update_modal(\n modal, with_second_tier_info=True),\n *exp_asmt.bulk_update_modal_tree_view_attrs_to_exclude)", "def get_mapping_actions(image=None, imageId=None, in_digests=[], bundle={}):\n\n if not image or not bundle:\n raise Exception(\"input error\")\n\n if not verify_policy_bundle(bundle=bundle):\n raise Exception(\"input bundle does not conform to bundle schema\")\n\n ret = []\n \n image_infos = []\n\n image_info = anchore_utils.get_all_image_info(image)\n if image_info and image_info not in image_infos:\n image_infos.append(image_info)\n\n for m in bundle['mappings']:\n polname = m['policy_id']\n wlnames = m['whitelist_ids']\n\n for image_info in image_infos:\n #_logger.info(\"IMAGE INFO: \" + str(image_info))\n ii = {}\n ii.update(image_info)\n registry = ii.pop('registry', \"N/A\")\n repo = ii.pop('repo', \"N/A\")\n\n tags = []\n fulltag = ii.pop('fulltag', \"N/A\")\n if fulltag != 'N/A':\n tinfo = anchore_utils.parse_dockerimage_string(fulltag)\n if 'tag' in tinfo and tinfo['tag']:\n tag = tinfo['tag']\n\n for t in [image, fulltag]:\n tinfo = anchore_utils.parse_dockerimage_string(t)\n if 'tag' in tinfo and tinfo['tag'] and tinfo['tag'] not in tags:\n tags.append(tinfo['tag'])\n\n digest = ii.pop('digest', \"N/A\")\n digests = [digest]\n for d in image_info['digests']:\n dinfo = anchore_utils.parse_dockerimage_string(d)\n if 'digest' in dinfo and dinfo['digest']:\n digests.append(dinfo['digest'])\n \n p_ids = []\n p_names = []\n for p in bundle['policies']:\n p_ids.append(p['id'])\n p_names.append(p['name'])\n\n wl_ids = []\n wl_names = []\n for wl in bundle['whitelists']:\n wl_ids.append(wl['id'])\n wl_names.append(wl['name'])\n \n if polname not in p_ids:\n _logger.info(\"policy not in bundle: \" + str(polname))\n continue\n\n skip=False\n for wlname in wlnames:\n if wlname not in wl_ids:\n _logger.info(\"whitelist not in bundle\" + str(wlname))\n skip=True\n if skip:\n continue\n\n mname = m['name']\n mregistry = m['registry']\n mrepo = m['repository']\n if m['image']['type'] == 'tag':\n mtag = m['image']['value']\n mdigest = None\n mimageId = None\n elif m['image']['type'] == 'digest':\n mdigest = m['image']['value']\n mtag = None\n mimageId = None\n elif m['image']['type'] == 'id':\n mimageId = m['image']['value']\n mtag = None\n mdigest = None\n else:\n mtag = mdigest = mimageId = None\n\n if registry == mregistry or mregistry == '*':\n _logger.debug(\"checking mapping for image (\"+str(image_info)+\") match.\")\n\n if repo == mrepo or mrepo == '*':\n doit = False\n matchstring = mname + \": N/A\"\n if tag and (mtag == '*' or mtag == tag or mtag in tags):\n matchstring = mname + \":\" + ','.join([mregistry, mrepo, mtag])\n doit = True\n elif digest and (mdigest == digest or mdigest in in_digests or mdigest in digests):\n matchstring = mname + \":\" + ','.join([mregistry, mrepo, mdigest])\n doit = True\n elif imageId and (mimageId == imageId):\n matchstring = mname + \":\" + ','.join([mregistry, mrepo, mimageId])\n doit = True\n\n matchstring = matchstring.encode('utf8')\n if doit:\n _logger.debug(\"match found for image (\"+str(matchstring)+\")\")\n\n wldata = []\n wldataset = set()\n for wlname in wlnames:\n wldataset = set(list(wldataset) + extract_whitelist_data(bundle, wlname))\n wldata = list(wldataset)\n\n poldata = extract_policy_data(bundle, polname)\n \n wlnames.sort()\n evalstr = ','.join([polname] + wlnames)\n evalhash = hashlib.md5(evalstr).hexdigest()\n ret.append( ( poldata, wldata, polname,wlnames, matchstring, m, evalhash) )\n return(ret)\n else:\n _logger.debug(\"no match found for image (\"+str(image_info)+\") match.\")\n else:\n _logger.debug(\"no match found for image (\"+str(image_info)+\") match.\")\n\n return(ret)", "def aws_permissions(self, perms):\n for perm in perms:\n group = perm.get(\"Group\")\n if group:\n self.allowed_groups.append(group)\n\n user = perm.get(\"UserId\")\n if user:\n self.allowed_users.append(user)", "def get_legal_actions(self, index):\n actions = []\n agent = self.agent_states[index]\n for action in ACTIONS:\n pos = agent.pos[0] + action[0], agent.pos[1] + action[1]\n if MAP[pos[0]][pos[1]] not in WALL:\n actions.append(action)\n return actions", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_machine_policy_template_action(self):\n pass", "def test_view_acls(self):\n v1, v2, v3 = set_resources_and_sync([\n make_video(\n media_id='123', acl=['USER_spqr1', 'USER_abcd1', 'INST_botolph', 'GROUP_1234']),\n make_video(media_id='456', acl=['WORLD']),\n make_video(media_id='789', acl=['CAM']),\n ])\n i1 = mpmodels.MediaItem.objects.get(jwp__key=v1.key)\n i2 = mpmodels.MediaItem.objects.get(jwp__key=v2.key)\n i3 = mpmodels.MediaItem.objects.get(jwp__key=v3.key)\n\n self.assertEqual(i1.view_permission.crsids, ['spqr1', 'abcd1'])\n self.assertEqual(i1.view_permission.lookup_groups, ['1234'])\n self.assertEqual(i1.view_permission.lookup_insts, ['botolph'])\n self.assertFalse(i1.view_permission.is_public)\n self.assertFalse(i1.view_permission.is_signed_in)\n\n self.assertEqual(i2.view_permission.crsids, [])\n self.assertEqual(i2.view_permission.lookup_groups, [])\n self.assertEqual(i2.view_permission.lookup_insts, [])\n self.assertTrue(i2.view_permission.is_public)\n self.assertFalse(i2.view_permission.is_signed_in)\n\n self.assertEqual(i3.view_permission.crsids, [])\n self.assertEqual(i3.view_permission.lookup_groups, [])\n self.assertEqual(i3.view_permission.lookup_insts, [])\n self.assertFalse(i3.view_permission.is_public)\n self.assertTrue(i3.view_permission.is_signed_in)", "def test_resource_actions(self):\n test_resource = ResourceTypeName.get()\n expected_actions = sorted(['rt:get', 'rt:put', 'rt:update', 'rt:delete'])\n self.app.post(\n f'/v1/resource/{test_resource}',\n data=json.dumps({'actions': expected_actions}),\n headers=admin_headers)\n\n # Get the actions for a resource type\n resp = self.app.get(f'/v1/resource/{test_resource}/actions', headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n actions = json.loads(resp.body)['actions']\n self.assertEqual(actions, expected_actions)\n\n # Delete actions from a resource type\n modify_actions = expected_actions[-2:]\n resp = self.app.delete(f'/v1/resource/{test_resource}/actions',\n data=json.dumps({'actions': modify_actions}),\n headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n resp = self.app.get(f'/v1/resource/{test_resource}/actions',\n data=json.dumps({'actions': modify_actions}),\n headers=admin_headers)\n actions = sorted(json.loads(resp.body)['actions'])\n self.assertEqual(actions, expected_actions[:2])\n\n # OK returned when deleting actions not part of a resource type\n resp = self.app.delete(f'/v1/resource/{test_resource}/actions',\n data=json.dumps({'actions': modify_actions}),\n headers=admin_headers)\n\n # Put actions into a resource type\n resp = self.app.put(f'/v1/resource/{test_resource}/actions',\n data=json.dumps({'actions': modify_actions}),\n headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n resp = self.app.get(f'/v1/resource/{test_resource}/actions',\n data=json.dumps({'actions': modify_actions}),\n headers=admin_headers)\n actions = sorted(json.loads(resp.body)['actions'])\n self.assertEqual(actions, expected_actions)\n\n # OK returned when putting actions already a part of a resource type.\n resp = self.app.put(f'/v1/resource/{test_resource}/actions',\n data=json.dumps({'actions': modify_actions}),\n headers=admin_headers)\n self.assertEqual(resp.status_code, 200)", "def pre_check():\n try:\n x = os.environ['AWS_DEFAULT_REGION']\n except KeyError:\n print(\"FATAL ERROR:\")\n traceback.print_exc(file=sys.stdout)\n sys.exit(\"Please set your shell variables for AWS access\")\n del x", "def get_actions(self, request):\n actions = super(RateLimitedIPAdmin, self).get_actions(request)\n del actions['delete_selected']\n return actions", "def test_services_with_multiple_pages_elb(self):\n results = get_actions_for_service(\"elasticloadbalancing\")\n actions = [\n \"elasticloadbalancing:ApplySecurityGroupsToLoadBalancer\",\n \"elasticloadbalancing:AttachLoadBalancerToSubnets\",\n \"elasticloadbalancing:ConfigureHealthCheck\",\n \"elasticloadbalancing:CreateAppCookieStickinessPolicy\",\n \"elasticloadbalancing:CreateLBCookieStickinessPolicy\",\n \"elasticloadbalancing:CreateLoadBalancerListeners\",\n \"elasticloadbalancing:CreateLoadBalancerPolicy\",\n \"elasticloadbalancing:DeleteLoadBalancerListeners\",\n \"elasticloadbalancing:DeleteLoadBalancerPolicy\",\n \"elasticloadbalancing:DeregisterInstancesFromLoadBalancer\",\n \"elasticloadbalancing:DescribeInstanceHealth\",\n \"elasticloadbalancing:DescribeLoadBalancerPolicies\",\n \"elasticloadbalancing:DescribeLoadBalancerPolicyTypes\",\n \"elasticloadbalancing:DetachLoadBalancerFromSubnets\",\n \"elasticloadbalancing:DisableAvailabilityZonesForLoadBalancer\",\n \"elasticloadbalancing:EnableAvailabilityZonesForLoadBalancer\",\n \"elasticloadbalancing:RegisterInstancesWithLoadBalancer\",\n \"elasticloadbalancing:SetLoadBalancerListenerSSLCertificate\",\n \"elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer\",\n \"elasticloadbalancing:SetLoadBalancerPoliciesOfListener\",\n ]\n for action in actions:\n self.assertTrue(action in results)", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_release_snapshot_variables_action_spaces(self):\n pass", "def all_allowed_actions(self):\n actions = []\n for managed_policy in self.attached_managed_policies:\n actions.extend(managed_policy.policy_document.all_allowed_actions)\n for inline_policy in self.inline_policies:\n actions.extend(inline_policy.policy_document.all_allowed_actions)\n for group in self.groups:\n actions.extend(group.all_allowed_actions)\n actions = list(dict.fromkeys(actions))\n actions.sort()\n return actions", "def available_actions(cls, piles):\n actions = set()\n for i, pile in enumerate(piles):\n for j in range(1, piles[i] + 1):\n actions.add((i, j))\n return actions", "def actions():\n pass", "def describe_actions(self) -> Iterator[str]:\r\n yield \"Replace all DA attributes with anonymized values that precede the originals\"\r\n yield \"Replace all DT attributes with anonymized values that precede the originals\"\r\n yield \"Replace all TM attributes with anonymized values that precede the originals\"", "def getLegalMovingActions(state,agent):\n actions = state.getLegalActions(agent)\n # Removing 'Stop'\n if Directions.STOP in actions:\n actions.remove(Directions.STOP)\n return actions", "def test_add_azr_location(self):\n pass", "def actions(self, agent_state):\n raise NotImplementedError(\"Don't know what actions are available\")", "def test_box_actions_out_of_bound(env: gym.Env):\n env.reset(seed=42)\n\n oob_env = gym.make(env.spec.id, disable_env_checker=True)\n oob_env.reset(seed=42)\n\n assert isinstance(env.action_space, spaces.Box)\n dtype = env.action_space.dtype\n upper_bounds = env.action_space.high\n lower_bounds = env.action_space.low\n\n for i, (is_upper_bound, is_lower_bound) in enumerate(\n zip(env.action_space.bounded_above, env.action_space.bounded_below)\n ):\n if is_upper_bound:\n obs, _, _, _, _ = env.step(upper_bounds)\n oob_action = upper_bounds.copy()\n oob_action[i] += np.cast[dtype](OOB_VALUE)\n\n assert oob_action[i] > upper_bounds[i]\n oob_obs, _, _, _, _ = oob_env.step(oob_action)\n\n assert np.alltrue(obs == oob_obs)\n\n if is_lower_bound:\n obs, _, _, _, _ = env.step(\n lower_bounds\n ) # `env` is unwrapped, and in new step API\n oob_action = lower_bounds.copy()\n oob_action[i] -= np.cast[dtype](OOB_VALUE)\n\n assert oob_action[i] < lower_bounds[i]\n oob_obs, _, _, _, _ = oob_env.step(oob_action)\n\n assert np.alltrue(obs == oob_obs)\n\n env.close()", "def setUp(self):\n user = User.objects.create(email=\"[email protected]\", first_name=\"Test1\", last_name=\"User\")\n group = AnaGroup.objects.create(name=\"test group\")\n IAM.objects.create(user=user,\n aws_user=\"AWS user\",\n aws_access_key=\"AWS access key\",\n aws_secret_access_key=\"AWS secret key\",\n group=group)" ]
[ "0.58062184", "0.57554114", "0.5695702", "0.5673701", "0.5447731", "0.5414008", "0.53181934", "0.5090683", "0.5057209", "0.49688128", "0.49623215", "0.49491638", "0.49443293", "0.49354288", "0.4900723", "0.48934472", "0.48312107", "0.48288488", "0.47950754", "0.4768656", "0.4768656", "0.47588596", "0.47535774", "0.46872535", "0.46716824", "0.46657667", "0.46551266", "0.4650297", "0.46221343", "0.46129328", "0.46048793", "0.46030843", "0.4589791", "0.4587225", "0.45796263", "0.45755762", "0.45709822", "0.45583615", "0.45398676", "0.45375967", "0.45359737", "0.45192328", "0.45069426", "0.4497698", "0.4493605", "0.4488601", "0.44852486", "0.44758165", "0.4468897", "0.4468897", "0.4468897", "0.4468897", "0.44616726", "0.4461071", "0.44579223", "0.44554195", "0.44473514", "0.44452655", "0.444227", "0.4440376", "0.44328898", "0.44327727", "0.44325766", "0.44269857", "0.44258615", "0.44230312", "0.4422442", "0.4415729", "0.4394283", "0.43914425", "0.43861824", "0.43773124", "0.43732613", "0.43678373", "0.43650365", "0.4363115", "0.43604678", "0.43601972", "0.43586403", "0.4354752", "0.43527237", "0.43474957", "0.43405613", "0.4334881", "0.4330424", "0.43295276", "0.4323804", "0.4323179", "0.43227136", "0.43159863", "0.43155038", "0.43144587", "0.43103507", "0.43075073", "0.43047482", "0.430064", "0.42993727", "0.42972803", "0.42953792", "0.42911258" ]
0.66148233
0
Called when a mouse button is pressed in the widget. Adjust method signature as appropriate for callback.
def button_press_event(self, widget, event): x, y = event.x, event.y # x, y = coordinates where the button was pressed self.last_win_x, self.last_win_y = x, y button = 0 # Prepare a button mask with bits set as follows: # left button: 0x1 # middle button: 0x2 # right button: 0x4 # Others can be added as appropriate self.logger.debug("button down event at %dx%d, button=%x" % (x, y, button)) data_x, data_y = self.check_cursor_location() return self.make_ui_callback('button-press', button, data_x, data_y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_mouse_press(self, x, y, button):\n\n pass", "def ev_mousebuttondown(self, event: MouseButtonDown) -> None:", "def handle_mouse_press(self, event):", "def mouse_press_event(self, x: int, y: int, button: int):\n pass", "def on_mouse_press(self, x, y, button, key_modifiers):\r\n pass", "def ev_mousebuttondown(self, event: tcod.event.MouseButtonDown) -> T | None:", "def _press(self, event):", "def ev_mousebuttonup(self, event: MouseButtonUp) -> None:", "def on_mouse_release(self, x, y, button):\n pass", "def eventHandler(self, event: pygame.event):\n # change selected color if this button's rectangle was clicked\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n if self.rect.collidepoint(event.pos): # is mouse over button\n self.image = self._images[ButtonImages.CLICKING_IMAGE.value]\n self.beingClicked = True\n for func, *args in self.functionsToInvokeWhenClicked:\n func(*args)\n elif event.type == pygame.MOUSEBUTTONUP and self.beingClicked:\n if event.button == 1:\n self.beingClicked = False\n self.image = self._images[ButtonImages.DEFAULT_IMAGE.value]", "def press(self):\n self.clicked = True\n if self.command:\n self.command(self.name)", "def ev_mousebuttonup(self, event: tcod.event.MouseButtonUp) -> T | None:", "def bind(self):\n self.canvas.bind(\"<ButtonPress-1>\", self.click)", "def on_mouse_release(self, x, y, button, key_modifiers):\r\n pass", "def emitPressEvent(self, clickLocation, button, currentKbKey, items):\n # emit the mousePressEvent signal\n self.mousePress.emit(self, clickLocation, button, currentKbKey, items)", "def on_mouse_up(self, pos, mouse_button):\n for item in button.Button.all_buttons:\n if item.collidepoint(pos):\n self.buttons_clicked.append((item, mouse_button))\n item.on_click(mouse_button)", "def HandlePress(self, event: tkEvent):\n pass", "def HandButton(self, event):\n pass", "def input(self, event: pygame.event) -> None:\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n self.user_clicked = True", "def button_press_cb(self, source, event):\n\n if event.button == MOUSE_BUTTON_RIGHT:\n pass\n return True\n elif event.button == MOUSE_BUTTON_MIDDLE:\n self.emit('begin-move')\n return True", "def handle_mouse_click(self, button: Button) -> None:\n if button.name == 'BACK':\n self._clear_all_input()\n self.current_page -= 1\n self._focused_button = None\n if self.current_page == len(self.pages) - 2:\n self.current_page -= 1\n elif button.name == 'Show Graph':\n self._plot_graph()\n elif button.name == 'Multiple Regression':\n self._selection.handle_selection(self.current_page, button.name)\n self.current_page += 2\n self._update_ghg_coefs()\n elif button.tag == 'normal' and self.current_page < len(self.pages) - 2:\n self._selection.handle_selection(self.current_page, button.name)\n self.current_page += 1\n elif isinstance(button, InputButton):\n self._focused_button = button", "def on_press(self):\n self.pressed = True", "def on_press(self):\n self.pressed = True", "def button_release_event(self, widget, event):\n x, y = event.x, event.y\n\n # x, y = coordinates where the button was released\n self.last_win_x, self.last_win_y = x, y\n\n button = 0\n # prepare button mask as in button_press_event()\n\n data_x, data_y = self.check_cursor_location()\n\n return self.make_ui_callback('button-release', button, data_x, data_y)", "def mousePressEvent(self, mouse_event):\r\n return", "def __on_click(self, evt):\n if evt.button() == Qt.LeftButton:\n return self._on_left_click(evt)\n if evt.button() == Qt.RightButton:\n return self._on_right_click(evt)", "def _press(self, event):\n # make the drawn box/line visible get the click-coordinates,\n # button, ...\n if self._interactive and self._selection_artist.get_visible():\n self._set_active_handle(event)\n else:\n self._active_handle = None\n\n if ((self._active_handle is None or not self._interactive) and\n self._allow_creation):\n # Clear previous rectangle before drawing new rectangle.\n self.update()\n\n if (self._active_handle is None and not self.ignore_event_outside and\n self._allow_creation):\n x = event.xdata\n y = event.ydata\n self._visible = False\n self.extents = x, x, y, y\n self._visible = True\n else:\n self.set_visible(True)\n\n self._extents_on_press = self.extents\n self._rotation_on_press = self._rotation\n self._set_aspect_ratio_correction()\n\n return False", "def button_press_cb(self, darea, event):\n x, y = event.x, event.y\n self.draw_pointer(self.cr, x, y)\n self.queue_draw()\n self.oldx, self.oldy = x, y\n rel_x, rel_y = self.absolute_to_relative(x, y)\n self.emit('dnd-value', rel_x, rel_y)\n self.emit('start-dnd')\n return True", "def m_press(self, button: MButton):\n pass", "def ev_MOUSEDOWN(self, event):", "def on_key_press(self, event):\n\n #print(\"you pressed {}\".format(event.key))\n key_press_handler(event, self.canvas, self.toolbar)", "def ev_MOUSEUP(self, event):", "def ev_mousebuttondown(self, event):\n if self.engine.game_map.in_bounds(*event.tile):\n if event.button == 1:\n return self.on_index_selected(*event.tile)\n return super().ev_mousebuttondown(event)", "def mouseReleaseEvent(self, event):\n button = event.button()\n\n # select an item on which we clicked\n item = self.itemAt(event.x(), event.y())\n if item:\n self.setCurrentItem(item)\n if button == 1:\n print \"SIMPLE LEFT CLICK\"", "def _on_button_press_event(self, widget, event):\n if event.button == 3:\n self.menu.popup(None, None, None, None, event.button, event.time)\n self.menu.show_all()", "def _press(self, event):\n self._set_cursor(True)\n if self._interactive and self._selection_artist.get_visible():\n self._set_active_handle(event)\n else:\n self._active_handle = None\n\n if self._active_handle is None or not self._interactive:\n # Clear previous rectangle before drawing new rectangle.\n self.update()\n\n v = event.xdata if self.direction == 'horizontal' else event.ydata\n # self._pressv and self._prev are deprecated but we still need to\n # maintain them\n self._pressv = v\n self._prev = self._get_data(event)\n\n if self._active_handle is None and not self.ignore_event_outside:\n # when the press event outside the span, we initially set the\n # visibility to False and extents to (v, v)\n # update will be called when setting the extents\n self._visible = False\n self.extents = v, v\n # We need to set the visibility back, so the span selector will be\n # drawn when necessary (span width > 0)\n self._visible = True\n else:\n self.set_visible(True)\n\n return False", "def mouse_click(self,x,y,button,double_click):\n raise NotImplementedError(\"ERROR: Unimplemented function.\")", "def mouse_release_event(self, x: int, y: int, button: int):\n pass", "def set_mouseclick_handler(self, mouse_handler):\n STmouse.Mouse(self.canvas, '<Button-1>', mouse_handler)", "def OnButton(self, event):\r\n \r\n button = event.GetInt()\r\n\r\n if button == AUI_BUTTON_LEFT or button == AUI_BUTTON_RIGHT:\r\n if button == AUI_BUTTON_LEFT:\r\n if self.GetTabOffset() > 0:\r\n \r\n self.SetTabOffset(self.GetTabOffset()-1)\r\n self.Refresh()\r\n self.Update()\r\n else:\r\n self.SetTabOffset(self.GetTabOffset()+1)\r\n self.Refresh()\r\n self.Update()\r\n \r\n elif button == AUI_BUTTON_WINDOWLIST:\r\n idx = self.GetArtProvider().ShowDropDown(self, self._pages, self.GetActivePage())\r\n \r\n if idx != -1:\r\n \r\n e = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_PAGE_CHANGING, self.GetId())\r\n e.SetSelection(idx)\r\n e.SetOldSelection(self.GetActivePage())\r\n e.SetEventObject(self)\r\n self.GetEventHandler().ProcessEvent(e)\r\n \r\n else:\r\n event.Skip()", "def cb_something_4(self, button): \n print(\"Do Something 4\")", "def handle_press( self, x, y ):\n self.pressed_flag = True\n self.first_point = (x, y)", "def _on_key_press(self, event):", "def on_pushButton_clicked(self):\n # TODO: not implemented yet\n raise NotImplementedError", "def on_pushButton_clicked(self):\n # TODO: not implemented yet\n raise NotImplementedError", "def clickedAction(self, events):\n print(\"The {} button was clicked!\".format(self.imgname))", "def mousePressed(self, _evt, _id):\n _widget = None\n \n if _id == ois.MB_Left:\n _widget = self._mouseLeft\n elif _id == ois.MB_Right:\n _widget = self._mouseRight\n elif _id == ois.MB_Middle:\n _widget = self._mouseMiddle\n \n if _widget is not None:\n self._addLinearAnimation(_widget, 1.0)\n \n return False", "def _on_pyglet_mouse_click(self, x, y, button, modifiers):\n button_time = clock()\n this_button = self._button_names[button]\n self._mouse_buffer.append((this_button, x, y, button_time))", "def on_mouse_press(self, x, y, button, modifiers):\n\n self.gamestatemanager.peek().on_mouse_press(x, y, button, modifiers)\n\n if self.exclusive:\n self.gamestatemanager.peek().on_mouse_press(x, y, button, modifiers)\n else:\n self.set_exclusive_mouse(True)", "def handle_mousedown(self, button, name):\r\n x = widget.Widget.handle_mousedown(self, button, name)\r\n if not self.mouse_on_me():\r\n return False\r\n if not self.get_visible():\r\n return False\r\n for i in self.widgets:\r\n if i.get_visible():\r\n if i.handle_mousedown(button, name):\r\n return True\r\n return x", "def _left_button_press_event(self, obj, event):\n #print('area_picker - left_button_press_event')\n self.OnLeftButtonDown()\n pixel_x, pixel_y = self.parent.vtk_interactor.GetEventPosition()\n self.picker_points.append((pixel_x, pixel_y))", "def OnButton(self, event):\n button = event.GetEventObject().GetName()\n if button == \"Button1\":\n self.OnButton1()\n elif button == \"Button2\":\n self.OnButton2()\n elif button == \"Button3\":\n self.OnExit(event)", "def OnLeftUp_ClickButton(self, event):\r\n \r\n self._hover_button = None\r\n\r\n if self._action_part:\r\n self.RefreshButton(self._action_part)\r\n\r\n # make sure we're still over the item that was originally clicked\r\n if self._action_part == self.HitTest(*event.GetPosition()):\r\n \r\n # fire button-click event\r\n e = AuiManagerEvent(wxEVT_AUI_PANE_BUTTON)\r\n e.SetManager(self)\r\n e.SetPane(self._action_part.pane)\r\n e.SetButton(self._action_part.button.button_id)\r\n self.ProcessMgrEvent(e)", "def mousePressEvent(self, event):\n #sw = self.spw.windows['Sort']\n buttons = event.buttons()\n if buttons == QtCore.Qt.MiddleButton:\n #sw.on_actionSelectRandomSpikes_triggered()\n #sw.spykewindow.plotButton.click() # same as hitting ENTER in nslist\n self.selecting = True\n self.setMouseTracking(True) # while selecting\n self.selectPointsUnderCursor()\n self.lastPressPos = QtCore.QPoint(event.pos())\n self.lastPos = QtCore.QPoint(event.pos())", "def mousePressEvent(self, event): \n if event.type() == qtc.QEvent.MouseButtonPress:\n if event.button() == qtc.Qt.RightButton:\n self.right_click_event()\n\n elif event.button() == qtc.Qt.LeftButton:\n self.left_click_event(event)\n self.mouseStartPosY = event.pos().y()\n self.startValue = self.value()", "def mousePressEvent(self, event):\n #sw = self.spw.windows['Sort']\n buttons = event.buttons()\n if buttons == QtCore.Qt.MiddleButton:\n #sw.on_actionSelectRandomSpikes_triggered()\n #sw.spykewindow.ui.plotButton.click() # same as hitting ENTER in nslist\n self.selecting = True\n self.setMouseTracking(True) # while selecting\n self.selectPointsUnderCursor()\n self.lastPressPos = QtCore.QPoint(event.pos())\n self.lastPos = QtCore.QPoint(event.pos())", "def _left_button_release_event(self, obj, event):\n #self.OnLeftButtonUp()\n pixel_x, pixel_y = self.parent.vtk_interactor.GetEventPosition()\n #selector = vtk.vtkVisibleCellSelector()\n\n self.picker_points.append((pixel_x, pixel_y))\n\n #print(self.picker_points)\n if len(self.picker_points) == 2:\n p1x, p1y = self.picker_points[0]\n p2x, p2y = self.picker_points[1]\n self.picker_points = []\n xmin = min(p1x, p2x)\n ymin = min(p1y, p2y)\n xmax = max(p1x, p2x)\n ymax = max(p1y, p2y)\n #print(self.picker_points)\n #print('_area_pick_left_button_release', cell_id)\n\n dx = abs(p1x - p2x)\n dy = abs(p1y - p2y)\n self.picker_points = []\n if dx > 0 and dy > 0:\n if self._pick_visible:\n self._pick_visible_ids(xmin, ymin, xmax, ymax)\n else:\n self._pick_depth_ids(xmin, ymin, xmax, ymax)\n self.parent.vtk_interactor.Render()\n self.picker_points = []", "def handle_mousedown(self, button, name):\r\n if self.get_visible():\r\n for i in self.widgets:\r\n if i.get_visible():\r\n if i.handle_mousedown(button, name):\r\n return True\r\n return False", "def cb_something_1(self, button):\n print(\"Do Something 1\")", "def signal_from_widget(self, event):\n self.keyPressEvent(event)", "def OnLeftUp(self, event):\r\n\r\n self._on_button = False\r\n \r\n if self._is_dragging:\r\n\r\n if self.HasCapture():\r\n self.ReleaseMouse()\r\n \r\n self._is_dragging = False\r\n if self._drag_image:\r\n self._drag_image.EndDrag()\r\n del self._drag_image\r\n self._drag_image = None\r\n self.GetParent().Refresh()\r\n\r\n evt = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_END_DRAG, self.GetId())\r\n evt.SetSelection(self.GetIdxFromWindow(self._click_tab))\r\n evt.SetOldSelection(evt.GetSelection())\r\n evt.SetEventObject(self)\r\n self.GetEventHandler().ProcessEvent(evt)\r\n\r\n return\r\n\r\n if self.HasCapture():\r\n self.ReleaseMouse()\r\n \r\n if self._pressed_button:\r\n \r\n # make sure we're still clicking the button\r\n button = self.ButtonHitTest(event.GetX(), event.GetY())\r\n \r\n if button is None:\r\n return\r\n\r\n if button != self._pressed_button:\r\n self._pressed_button = None\r\n return\r\n \r\n self.Refresh()\r\n self.Update()\r\n\r\n if self._pressed_button.cur_state & AUI_BUTTON_STATE_DISABLED == 0:\r\n \r\n evt = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_BUTTON, self.GetId())\r\n evt.SetSelection(self.GetIdxFromWindow(self._click_tab))\r\n evt.SetInt(self._pressed_button.id)\r\n evt.SetEventObject(self)\r\n self.GetEventHandler().ProcessEvent(evt)\r\n \r\n self._pressed_button = None\r\n \r\n self._click_pt = wx.Point(-1, -1)\r\n self._is_dragging = False\r\n self._click_tab = None", "def click_button(self):\n self.widgets.get('button').click()", "def mousePressEvent(self, event):\n if event.buttons() == QtCore.Qt.LeftButton:\n self.view_state.mouse = np.array([event.x(), event.y()])", "def on_toolButton_clicked(self):\n # TODO: not implemented yet\n raise NotImplementedError", "def button_release_cb(self, darea, event):\n self.oldx, self.oldy = event.x, event.y\n self.draw_pointer(self.cr, None, None)\n self.queue_draw()\n self.oldx, self.oldy = None, None\n self.emit('end-dnd')\n return True", "def __check_if_got_pressed(self):\n mouse_x_pos,mouse_y_pos = pg.mouse.get_pos()\n\n if utilitiez.on_object(self.rect.x, self.rect.y, self.rect.width, self.rect.height, mouse_x_pos, mouse_y_pos,\n MOUSE_WIDTH, MOUSE_HEIGHT):\n self.__on_click()", "def _onscreenclick(self, fun, num=1, add=None):\n if fun is None:\n self.cv.unbind(\"<Button-%s>\" % num)\n else:\n def eventfun(event):\n x, y = (self.cv.canvasx(event.x)/self.xscale,\n -self.cv.canvasy(event.y)/self.yscale)\n fun(x, y)\n self.cv.bind(\"<Button-%s>\" % num, eventfun, add)", "def onMouseLeftDown(self, event):\n # [NOTE] No need to call self.choice(). It is enough to call\n # event.Skip() and the machine will be called self.OnButtonClick()\n event.Skip()", "def _pressed(self, evt):\n x, y, widget = evt.x, evt.y, evt.widget\n item = widget.identify_row(y)\n column = widget.identify_column(x)\n\n if not column or not item in self._items:\n # clicked in the weekdays row or just outside the columns\n return\n\n item_values = widget.item(item)['values']\n if not len(item_values): # row is empty for this month\n return\n\n text = item_values[int(column[1]) - 1]\n if not text: # date is empty\n return\n\n bbox = widget.bbox(item, column)\n if not bbox: # calendar not visible yet\n return\n\n # update and then show selection\n text = '%02d' % text\n self._selection = (text, item, column)\n self._show_selection(text, bbox)", "def _on_key_release(self, event):", "def cb_something_3(self, button):\n print(\"Do Something 3\")", "def _onrelease(self, item, fun, num=1, add=None):\n if fun is None:\n self.cv.tag_unbind(item, \"<Button%s-ButtonRelease>\" % num)\n else:\n def eventfun(event):\n x, y = (self.cv.canvasx(event.x)/self.xscale,\n -self.cv.canvasy(event.y)/self.yscale)\n fun(x, y)\n self.cv.tag_bind(item, \"<Button%s-ButtonRelease>\" % num,\n eventfun, add)", "def get_event(self, event):\n if event.type == pg.MOUSEBUTTONDOWN and event.button == 1:\n if self.rect.collidepoint(event.pos):\n self.toggle()", "def _click(self):\n if hasattr(self.canvas[\"items\"][self.index], 'commandFunc'):\n self.canvas[\"items\"][self.index].commandFunc(None)", "def click(self):\r\n pass", "def on_mouse_press(self, x, y, button, modifiers):\n self.add_wall()", "def was_pressed(self) -> bool:", "def click(self, x, y, button, press):\n\n if self.is_in_screen(x, y) and not self.pause:\n self.get_color(x, y)\n self.record(x, y, button, press)", "def on_mouse_click(self, event):\n if not self.is_game_over:\n try:\n # i, j coordinates of the click event\n i = int(round(event.ydata))\n j = int(round(event.xdata))\n\n # Left button\n if event.button == 1 or event.button == 2:\n self.reveal(i, j)\n\n # Right button\n elif event.button == 3:\n self.flag(i, j)\n\n except (TypeError, IndexError):\n pass", "def pushButtonClicked(self, but_id, button):\n self.ui.tv_bindings.clearSelection()\n lstMatch = self.ui.tv_bindings.findItems(but_id, QtCore.Qt.MatchExactly, 0)[0]\n lstMatch.setSelected(True)\n lstMatch.setText(1, '[Press a key]')\n button.installEventFilter(self)\n self.efButton = button # Not elegant, but.... works", "def mouseDragged():\n if mousePressed:\n mousePressed()", "def mousePressEvent(self, event):\n self._use_zinc_mouse_event_handling = False # Track when zinc should be handling mouse events\n if self._ignore_mouse_events:\n event.ignore()\n return\n\n event.accept()\n if event.button() not in button_map:\n return\n \n self._selection_position_start = (event.x(), event.y())\n\n if button_map[event.button()] == Sceneviewerinput.BUTTON_TYPE_LEFT\\\n and self._selectionKeyPressed and (self._nodeSelectMode or self._elemSelectMode):\n self._selection_mode = SelectionMode.EXCLUSIVE\n if event.modifiers() & QtCore.Qt.SHIFT:\n self._selection_mode = SelectionMode.ADDITIVE\n else:\n scene_input = self._sceneviewer.createSceneviewerinput()\n scene_input.setPosition(event.x(), event.y())\n scene_input.setEventType(Sceneviewerinput.EVENT_TYPE_BUTTON_PRESS)\n scene_input.setButtonType(button_map[event.button()])\n scene_input.setModifierFlags(modifier_map(event.modifiers()))\n self._sceneviewer.processSceneviewerinput(scene_input)\n self._use_zinc_mouse_event_handling = True", "def on_click(self, event_callable, ret_widget_values=None, block_signal=False):\n #TODO Implementation of ret_widget_values\n #TODO Implementation of block_signal?? or removal\n self.on_click_callable = event_callable\n self._raw_toolbar.onClick(\n self.on_click_return,\n ret_widget_values=ret_widget_values,\n block_signal=block_signal\n )", "def on_mouse_press(self, x, y, button, modifiers):\n \n menu: Menu = self.get_menu_for_display()\n\n menu_click_x, menu_click_y = self.get_menu_click(menu, x, y)\n\n if button == arcade.MOUSE_BUTTON_LEFT:\n if menu:\n menu.button_list.check_mouse_press_for_buttons(\n menu_click_x,\n menu_click_y,\n )", "def LeftClick(self):\n self._PressLeftButton()\n self._ReleaseAllButtons()", "def leftButtonDown(self):\n\t\tautopy.mouse.toggle(True,autopy.mouse.LEFT_BUTTON)", "def HandleKeyboardInput(self):\n key = yg.getKeyPress()\n if key == \"Return\":\n self.buttons[len(self.buttons) - 1].Click()", "def on_click(self, x, y):\n self.menu_pointer.on_click(x, y)", "def mousePressEvent(self, event): \n if event.type() == qtc.QEvent.MouseButtonPress:\n if event.button() == qtc.Qt.LeftButton:\n self.mouseStartPosY = event.pos().y()\n self.startValue = self.value()\n\n elif event.button() == qtc.Qt.MidButton:\n self.set_value_to_default()", "def button_press_event(self, widget, event, menu):\n\t\tif event.type == gtk.gdk.BUTTON_PRESS and event.button == 3:\n\t\t\tmenu.popup(None, None, None, event.button, event.time)\n\t\treturn False", "def set_events(self):\r\n\r\n self.canvas.bind(\"<Button-1>\", self.event_click_left)\r\n self.bind(\"<Return>\", self.event_return)", "def when_pressed(self, button, func, *args):\n\n self.hardware_interfaces[self._gpio].set_pin_event(self._b_names[button],\n func,\n *args)", "def handle_button(self, event, event_type):\n # 0 for left\n # 1 for right\n # 2 for middle/center\n # 3 for side\n mouse_button_number = self._get_mouse_button_number(event)\n\n # Identify buttons 3,4,5\n if event_type in (25, 26):\n event_type = event_type + (mouse_button_number * 0.1)\n\n # Add buttons to events\n event_type_string, event_code, value, scan = self.codes[event_type]\n if event_type_string == \"Key\":\n scan_event, key_event = self.emulate_press(\n event_code, scan, value, self.timeval)\n self.events.append(scan_event)\n self.events.append(key_event)\n\n # doubleclick/n-click of button\n click_state = self._get_click_state(event)\n\n repeat = self.emulate_repeat(click_state, self.timeval)\n self.events.append(repeat)", "def onButtonPress(self, event):\n\n if event.xdata and event.ydata:\n self.emit(QtCore.SIGNAL(\"positionSelected(float, float)\"),\n float(event.xdata), float(event.ydata))", "def mousePressEvent(self, ev):\n super(PlotObject, self).mousePressEvent(ev)\n self._downpos = self.mousePos", "def check_event(self, event):\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n if self.selected:\r\n for item in self.buttons:\r\n item.handleMouseDown(event.pos[0], event.pos[1])\r\n else:\r\n self.tab.handleMouseDown(event.pos[0], event.pos[1])", "def handle_mouseup(self, button, name):\r\n if self.get_visible():\r\n for i in self.widgets:\r\n if i.get_visible():\r\n if i.handle_mouseup(button, name):\r\n return True\r\n return False", "def on_click(self, x, y):\n mul_x, mul_y = self.multiplier\n off_x, off_y = self.offset\n x -= off_x\n x /= mul_x\n y -= off_y\n y /= mul_y\n for button in self.button_dict.values():\n button.check_click(x, y)", "def get_pressed(self): \n raise NotImplementedError", "def pressed(self) -> bool:\n return self.type == \"JOYBUTTONDOWN\"" ]
[ "0.8186688", "0.7803107", "0.7685904", "0.7667033", "0.7550329", "0.75264764", "0.74540734", "0.74537903", "0.7434162", "0.71306", "0.71141076", "0.7086629", "0.70717835", "0.70475805", "0.70216006", "0.70136315", "0.69730556", "0.69179136", "0.69159424", "0.69106615", "0.69092005", "0.69086593", "0.69086593", "0.6883387", "0.6881508", "0.687479", "0.68471086", "0.6836545", "0.6808178", "0.67737955", "0.67535317", "0.6742284", "0.6738339", "0.6737025", "0.6701703", "0.6678614", "0.66766125", "0.6574762", "0.65687996", "0.65682995", "0.6563857", "0.6554848", "0.65425086", "0.6528905", "0.6528905", "0.6512811", "0.65086156", "0.6495434", "0.64905447", "0.6487096", "0.64787346", "0.64631367", "0.6460059", "0.6455883", "0.6452478", "0.644119", "0.6429059", "0.6428801", "0.64114636", "0.6406296", "0.63963807", "0.6394005", "0.63830185", "0.6379297", "0.63791144", "0.63778967", "0.6376003", "0.6373461", "0.63720185", "0.6364364", "0.6363386", "0.6356046", "0.634816", "0.6344864", "0.63408995", "0.63391787", "0.63276494", "0.6326992", "0.6322749", "0.631902", "0.6310714", "0.6308334", "0.6304208", "0.62975895", "0.62935483", "0.62914467", "0.6287971", "0.6283372", "0.6277994", "0.6276328", "0.62707454", "0.6270045", "0.6268654", "0.6268415", "0.62662643", "0.62619746", "0.62611157", "0.6258642", "0.6252097", "0.62520045" ]
0.786055
1
Called when a mouse button is released after being pressed. Adjust method signature as appropriate for callback.
def button_release_event(self, widget, event): x, y = event.x, event.y # x, y = coordinates where the button was released self.last_win_x, self.last_win_y = x, y button = 0 # prepare button mask as in button_press_event() data_x, data_y = self.check_cursor_location() return self.make_ui_callback('button-release', button, data_x, data_y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mouse_release_event(self, x: int, y: int, button: int):\n pass", "def on_mouse_release(self, x, y, button):\n pass", "def on_mouse_release(self, x, y, button, key_modifiers):\r\n pass", "def OnMouseUp(self, evt):\n self.ReleaseMouse()", "def emitReleaseEvent(self, clickLocation, button, currentKbKey, items):\n # emit the mouseReleaseEvent signal\n self.mouseRelease.emit(self, clickLocation, button, currentKbKey, items)", "def ev_mousebuttonup(self, event: MouseButtonUp) -> None:", "def on_release(self):\n self.pressed = False", "def on_release(self):\n self.pressed = False", "def button_release(self, event: Any) -> None:\n if event.button == 1:\n self.left_button_down = False\n if event.button == 2:\n self.middle_button_down = False\n if event.button == 3:\n self.right_button_down = False", "def release():\n gui.mouseUp()", "def ev_mousebuttonup(self, event: tcod.event.MouseButtonUp) -> T | None:", "def mouse_release(self):\n\n # play button press\n if self.play_button.is_active:\n # change to gameplay\n self.switch_context(game.GameContext)", "def ev_mousebuttondown(self, event: MouseButtonDown) -> None:", "def release(button='left', coords=(0, 0)):\n _perform_click_input(button=button, coords=coords, button_down=False, button_up=True)", "def _release(self, event):", "def HandleRelease(self, event: tkEvent):\n pass", "def button_release_cb(self, darea, event):\n self.oldx, self.oldy = event.x, event.y\n self.draw_pointer(self.cr, None, None)\n self.queue_draw()\n self.oldx, self.oldy = None, None\n self.emit('end-dnd')\n return True", "def ev_mousebuttondown(self, event: tcod.event.MouseButtonDown) -> T | None:", "def m_release(self, button: MButton):\n pass", "def releaseKeyButtons(self):\n self._myKey.removeKeyButtonEvent([\n CONFIG_KEY.BUTTON_ACT_A,\n CONFIG_KEY.BUTTON_ACT_B,\n CONFIG_KEY.BUTTON_JOY_UP,\n CONFIG_KEY.BUTTON_JOY_DOWN,\n CONFIG_KEY.BUTTON_JOY_LEFT,\n CONFIG_KEY.BUTTON_JOY_RIGHT,\n CONFIG_KEY.BUTTON_JOY_OK\n ])", "def key_release_event(self, event):\n pass", "def mouseReleaseEvent(self, event):\n super(QIntSpinner3DS, self).mousePressEvent(event)\n super(QIntSpinner3DS, self).mouseReleaseEvent(event)\n self.unsetCursor()", "def _on_key_release(self, event):", "def _onrelease(self, item, fun, num=1, add=None):\n if fun is None:\n self.cv.tag_unbind(item, \"<Button%s-ButtonRelease>\" % num)\n else:\n def eventfun(event):\n x, y = (self.cv.canvasx(event.x)/self.xscale,\n -self.cv.canvasy(event.y)/self.yscale)\n fun(x, y)\n self.cv.tag_bind(item, \"<Button%s-ButtonRelease>\" % num,\n eventfun, add)", "def on_mouse_release(self, x, y, button, modifiers):\n \n menu: Menu = self.get_menu_for_display()\n\n menu_click_x, menu_click_y = self.get_menu_click(menu, x, y)\n\n if button == arcade.MOUSE_BUTTON_LEFT:\n if menu:\n menu.button_list.check_mouse_release_for_buttons(\n menu_click_x,\n menu_click_y,\n )", "def unpress(self):\n if self.unclick:\n self.clicked = False", "def on_mouse_press(self, x, y, button):\n\n pass", "def mouseReleaseEventEnabled(self, ev):\n\n self._btns.remove(ev.button())", "def __mouse_release(self, event, right_click=False):\n global choose_rectangle\n if right_click:\n return\n if choose_rectangle:\n self.__finish_rectangle(event)", "def _OnMplMouseRelease( self, ev ):\n if ev.button == 3:\n ev.guiEvent.Skip()", "def mouseReleaseEvent(self, event):\n # super(PlotWidget, self).mouseReleaseEvent(event)\n event.accept()", "def _release(self, event):\n self._set_cursor(False)\n # self._pressv is deprecated but we still need to maintain it\n self._pressv = None\n\n if not self._interactive:\n self._selection_artist.set_visible(False)\n\n if (self._active_handle is None and self._selection_completed and\n self.ignore_event_outside):\n return\n\n vmin, vmax = self.extents\n span = vmax - vmin\n\n if span <= self.minspan:\n # Remove span and set self._selection_completed = False\n self.set_visible(False)\n if self._selection_completed:\n # Call onselect, only when the span is already existing\n self.onselect(vmin, vmax)\n self._selection_completed = False\n else:\n self.onselect(vmin, vmax)\n self._selection_completed = True\n\n self.update()\n\n self._active_handle = None\n\n return False", "def mouseReleased():\n if not game_controller.game_over:\n if game_controller.falling_disk and \\\n game_controller.falling_disk.y_vel == 0:\n game_controller.handle_mouseReleased()", "def keyReleaseEvent(self, event):\n self.game_engine.input_manager.keyReleaseEvent(event)", "def handle_mouse_press(self, event):", "def on_mouse_release(self, x: float, y: float, button: int, modifiers: int):\n if self.heldLetter is not None:\n self.active_blocks.remove(self.heldLetter)\n self.moving_blocks.append(self.heldLetter)\n if len(arcade.get_sprites_at_point((x, y), self.inactive_blocks)) == 0 and x < BOARD_WIDTH:\n letter_x, letter_y = self.nearest_cell(x, y)\n self.heldLetter.place(letter_x, letter_y)\n self.board_temp[int((letter_x-SLOT_WIDTH/2)/SLOT_WIDTH)][int((letter_y - SLOT_HEIGHT/2)/SLOT_HEIGHT)] = self.heldLetter\n else:\n self.heldLetter.return_home()\n self.heldLetter = None", "def OnTokenButtonRelease(self, event):\n self._drag_data = {\"x\": 0, \"item\": None}\n\n # Rebind the main GUI buttons because they are unbinded while dragging the beats\n self.myMainGUI.root.after(200, self.myMainGUI.bindButtons)", "def mouseReleaseEvent(self, event):\n button = event.button()\n\n # select an item on which we clicked\n item = self.itemAt(event.x(), event.y())\n if item:\n self.setCurrentItem(item)\n if button == 1:\n print \"SIMPLE LEFT CLICK\"", "def mouse_press_event(self, x: int, y: int, button: int):\n pass", "def on_mouse_up(self, pos, mouse_button):\n for item in button.Button.all_buttons:\n if item.collidepoint(pos):\n self.buttons_clicked.append((item, mouse_button))\n item.on_click(mouse_button)", "def keyReleaseEvent (self, event):\n super(DiagramScene, self).keyReleaseEvent(event)", "def isButtonReleased() -> bool:\n pass", "def ev_joybuttondown(self, event: tcod.event.JoystickButton) -> T | None:", "def on_mouse_press(self, x, y, button, key_modifiers):\r\n pass", "def check_mouse_release_for_buttons(x: float, y: float, button_list: list):\n for button in button_list:\n if button.pressed:\n #sets button pressed to false\n button.on_release()", "def on_mouse_release(self, x: float, y: float, button, modifiers):\n #dialogue buttons\n check_mouse_release_for_buttons(x, y, self.levels[self.current_level].dialogue_list)\n\n #room info prompt buttons\n check_mouse_release_for_buttons(x, y, self.levels[self.current_level].room_info_list)", "def mouseReleaseEvent(self, ev):\n\n # handle the built mouse events first\n\n # panning...\n if self.panning and (ev.button() == Qt.LeftButton):\n # we're done panning\n self.leftBtnClicked = False\n self.setCursor(Qt.OpenHandCursor)\n self.lastPanPoint = QPoint()\n\n # \"auto\" rubber banding...\n elif self.rubberBandKey and self.rubberBanding:\n\n # end the rubber band selection\n rubberBandRect = self.endRubberBand().toRect()\n\n # check if the user selected anything\n if (rubberBandRect):\n items = self.items(rubberBandRect)\n\n # filter the selected items\n items = self.filterSelectedItems(items)\n\n # If we're handling selections deal with the selection states of our marks\n if self.doSelections:\n\n for item in self.selectedItems:\n item.setSelected(False)\n for item in items:\n item.setSelected(True)\n self.selectedItems = items\n\n # call the emit method - we don't directly emit here in case a child class\n # wants to transform the data before emitting it.\n self.emitRubberbandSelection(rubberBandRect, items)\n\n else:\n # This event isn't handled by automatically - emit a release event\n clickLocation = self.mapToScene(ev.pos())\n\n # do a \"sloppy selection\" and return all items that intersect our\n # selection rectangle. The selection rectangle is set by calling\n # the setSelectionRadius method.\n\n # move our selection rectangle into place - depending on the size of\n # the selection area, this may not be centered on the click location\n areaLoc = ev.pos() - self.selectionRadius\n self.selectionArea.moveTo(areaLoc)\n\n # check if the user clicked on anything - this will return a list of\n # items that intersect the selection rectangle.\n items = self.items(self.selectionArea)\n\n # filter the selection so we only return marks or text not associated\n # with a mark.\n items = self.filterSelectedItems(items)\n\n # call the emit method - we don't directly emit here in case a child class\n # wants to transform the data before emitting it.\n self.emitReleaseEvent(clickLocation, ev.button(), self.currentKbKey, items)", "def _ReleaseAllButtons(self):\n self._kit.MouseReleaseAllButtons()\n time.sleep(self.send_delay)", "def ev_MOUSEUP(self, event):", "def check_mouse_release_for_buttons(_x, _y, button_list):\n for button in button_list:\n if button.pressed:\n button.on_release()", "def mouseDragged():\n if mousePressed:\n mousePressed()", "def mouseReleaseEvent(self, event):\n if event.button() is not QtCore.Qt.MouseButton.LeftButton:\n return False\n if self.mousenode is not None:\n self.remove_mousenode(event)\n return QtGui.QGraphicsScene.mouseReleaseEvent(self, event)", "def mouse_out(self):\n pass", "def button_press_event(self, widget, event):\n x, y = event.x, event.y\n\n # x, y = coordinates where the button was pressed\n self.last_win_x, self.last_win_y = x, y\n\n button = 0\n # Prepare a button mask with bits set as follows:\n # left button: 0x1\n # middle button: 0x2\n # right button: 0x4\n # Others can be added as appropriate\n self.logger.debug(\"button down event at %dx%d, button=%x\" % (x, y, button))\n\n data_x, data_y = self.check_cursor_location()\n\n return self.make_ui_callback('button-press', button, data_x, data_y)", "def on_release(self, keyname):\n self.keydown = False\n keyname = str(keyname).strip('\\'')\n log.info('KEY RELEASE ' + keyname)\n if keyname in self.controls_keyrelease:\n key_handler = self.controls_keyrelease[keyname]()", "def rightButtonUp(self):\n\t\tautopy.mouse.toggle(False,autopy.mouse.RIGHT_BUTTON)", "def mouseReleased(self, _evt, _id):\n if not self.is_enabled: return False\n \n self.mouse_icon.mouseReleased(_evt, _id)\n return False", "def exit_on_click(self):\n self.get_mouse()\n self._close()", "def on_key_release(self, key_released: int, _: int) -> None:\n if key_released in (key.LEFT, key.RIGHT, key.A, key.D):\n self.change_x = 0\n self.direction = None", "def handle_mousehold(self, button, name):\r\n if widget.Widget.handle_mousehold(self, button, name):\r\n app.App.handle_mousehold(self, button, name)\r\n return True\r\n return False", "def keyReleaseEvent(self, ev):\n self.currentKbKey = None\n\n if (ev.key() == self.panKey):\n # disable Pan/Zoom mode\n self.panning = False\n if self.__pointerLeftWidget:\n # we've left the widget - reset the cursor to the standard arrow\n self.setCursor(Qt.ArrowCursor)\n else:\n self.setCursor(self.defaultCursor)\n elif (ev.key() == self.selectAddKey):\n # disable selection add mode\n if self.__pointerLeftWidget:\n # we've left the widget - reset the cursor to the standard arrow\n self.setCursor(Qt.ArrowCursor)\n else:\n self.setCursor(self.defaultCursor)\n elif (ev.key() == self.zoomKey):\n # disable zoom mode\n self.__zooming = False\n else:\n self.keyRelease.emit(self, ev)", "def handle_release(self, x, y):\n # append new line to list of lines\n self.lines.append( (self.first_point, (x, y)) )\n\n # clear mouse pressed flag and rubber band line coords\n self.pressed_flag = False\n self.first_point = None\n self.last_point = None\n\n # trigger canvas to redraw itself\n self.redraw()", "def onRelease(event):\r\n global initPos\r\n initPos = None # Reset the position ready for next click\r", "def _release(self, event):\n # Release active tool handle.\n if self._active_handle_idx >= 0:\n if event.button == 3:\n self._remove_vertex(self._active_handle_idx)\n self._draw_polygon()\n self._active_handle_idx = -1\n\n # Complete the polygon.\n elif len(self._xys) > 3 and self._xys[-1] == self._xys[0]:\n self._selection_completed = True\n if self._draw_box and self._box is None:\n self._add_box()\n\n # Place new vertex.\n elif (not self._selection_completed\n and 'move_all' not in self._state\n and 'move_vertex' not in self._state):\n self._xys.insert(-1, (event.xdata, event.ydata))\n\n if self._selection_completed:\n self.onselect(self.verts)", "def mouseReleaseEvent( self, event ):\n event.setAccepted(False)\n if self._hotspotPressed:\n event.accept()\n self._hotspotPressed = False\n return\n \n # ignore events when the scene is in view mode\n scene = self.scene()\n if ( self.isLocked() or self._ignoreMouseEvents or \\\n (scene and (scene.inViewMode() or scene.isConnecting()))):\n event.ignore()\n self._ignoreMouseEvents = False\n return\n \n super(XNode, self).mouseReleaseEvent(event)\n \n # emit the geometry changed signal\n self.emitGeometryChanged()\n \n # unblock the selection signals\n if ( scene ):\n scene.blockSelectionSignals(False)\n \n delta = datetime.datetime.now() - self._pressTime\n if not scene.signalsBlocked() and delta.seconds < 1:\n scene.nodeClicked.emit(self)", "def on_canvas_mouse_release(self, event) -> None:\r\n\r\n self.edit_toggle_mode = None", "def _press(self, event):", "def key_release_event(self, widget, event):\n # get keyname or keycode and translate to ginga standard\n # keyname =\n # keycode =\n keyname = '' # self.transkey(keyname, keycode)\n self.logger.debug(\"key release event, key=%s\" % (keyname))\n return self.make_ui_callback('key-release', keyname)", "def ev_joybuttonup(self, event: tcod.event.JoystickButton) -> T | None:", "def triangleBtnHandler(val):\n if val == 1 :\n print(\"Triangle button pressed\")\n else:\n print(\"Triangle button released\")", "def on_key_release(self, symbol, modifiers):\n self.gamestatemanager.peek().on_key_release(symbol, modifiers, self.config_data[\"controls\"])", "def down(self, event):\n self.dragx = event.x\n self.dragy = event.y\n self.canvas.bind(\"<B1-Motion>\", self.motion)\n self.canvas.bind(\"<ButtonRelease-1>\", self.up)\n return True", "def on_key_release(self, key, modifiers):\n pass # stop animation", "def on_key_release(self, key, modifiers):\n player_controller.input_release(key, self.player)", "def handle_mouseup(self, button, name):\r\n x = widget.Widget.handle_mouseup(self, button, name)\r\n if not self.mouse_on_me():\r\n return False\r\n if not self.get_visible():\r\n return False\r\n for i in self.widgets:\r\n if i.get_visible():\r\n if i.handle_mouseup(button, name):\r\n return True\r\n return x", "def button_handler(self, channel):\n if channel != self.BUTTON_PIN:\n return\n\n state = GPIO.input(self.BUTTON_PIN)\n now = time.time()\n delta = now - self.prev_button_state[1]\n\n if self.prev_button_state[0] != state:\n self.prev_button_state = (state, now)\n\n if state == GPIO.HIGH:\n self.button_hold = None\n\n # debounce the button tap and trigger action\n if delta > self.TAP_TIME and self.button_tap is None:\n self.button_tap = True\n os.kill(os.getpid(), signal.SIGALRM)\n else:\n self.button_tap = None\n\n # schedule a hold check\n signal.alarm(int(self.HOLD_TIME))\n\n elif state == GPIO.LOW:\n if delta >= self.HOLD_TIME and self.button_hold is None:\n self.button_hold = True\n self.button_tap = False", "def on_press(self):\n self.pressed = True", "def on_press(self):\n self.pressed = True", "def delete_button_callback(self, button):\n\t\tRPIO.del_interrupt_callback(button)", "def mouseReleaseEvent(self, event):\n self.box_begin = self.begin\n self.box_end = event.pos()\n self.begin = event.pos()\n self.end = event.pos()\n if not self.permanent_show:\n self.update()", "def debounced_key_release(event):\n # print('Debounced release', repr(event.key))\n key_indicator.set_text('')\n fig.canvas.draw()", "def leftButtonUp(self):\n\t\tautopy.mouse.toggle(False,autopy.mouse.LEFT_BUTTON)", "def on_key_release(self, key, modifiers):\n\n if key == arcade.key.UP:\n self.up_pressed = False\n elif key == arcade.key.DOWN:\n self.down_pressed = False\n elif key == arcade.key.LEFT:\n self.left_pressed = False\n elif key == arcade.key.RIGHT:\n self.right_pressed = False", "def on_key_release(self, key, modifiers):\n\n if key == arcade.key.UP:\n self.up_pressed = False\n elif key == arcade.key.DOWN:\n self.down_pressed = False\n elif key == arcade.key.LEFT:\n self.left_pressed = False\n elif key == arcade.key.RIGHT:\n self.right_pressed = False", "def mouse_right_up(self):\n pass", "def ev_MOUSEDOWN(self, event):", "def HandButton(self, event):\n pass", "def up(self, event):\n event.widget.unbind (\"<B1-Motion>\")\n event.widget.unbind (\"<ButtonRelease-1>\")\n self.diag.update_arrows()", "def ev_controllerbuttondown(self, event: tcod.event.ControllerButton) -> T | None:", "def on_release(self, released_key ):\n if released_key is not None:\n if isinstance(released_key, pynput.keyboard.KeyCode) and released_key.char is not None:\n released_key = released_key.char.lower()\n elif isinstance(released_key, pynput.keyboard.Key):\n released_key = released_key.name\n self.keys_set.discard(released_key)", "def on_release(self, event):\n self.current_point = None", "def eventHandler(self, event: pygame.event):\n # change selected color if this button's rectangle was clicked\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n if self.rect.collidepoint(event.pos): # is mouse over button\n self.image = self._images[ButtonImages.CLICKING_IMAGE.value]\n self.beingClicked = True\n for func, *args in self.functionsToInvokeWhenClicked:\n func(*args)\n elif event.type == pygame.MOUSEBUTTONUP and self.beingClicked:\n if event.button == 1:\n self.beingClicked = False\n self.image = self._images[ButtonImages.DEFAULT_IMAGE.value]", "def exit_btn_callback(evt):\n print(\"Inside exit_btn_callback. Event object is: \", evt)\n mainwin.destroy()", "def _callbackKeyButton(self, channel):\n if self._myKey.readKeyButton(channel) == 0:\n self.onKeyButtonDown(channel)\n return\n\n if self._myKey.readKeyButton(channel) == 1:\n self.onKeyButtonUp(channel)\n return", "def on_key_up(self, keyboard, keycode):\n Logger.debug('KeyUp Event: Keycode[1] is \"{}\"'.format(keycode[1]))\n self.keysPressed.remove(keycode[1])", "def check_mouse_release_for_buttons(x, y, button_list):\n for button in button_list:\n if x > button.center_x + button.width / 2:\n continue\n if x < button.center_x - button.width / 2:\n continue\n if y > button.center_y + button.height / 2:\n continue\n if y < button.center_y - button.height / 2:\n continue\n button.on_release()", "def mouseReleaseEvent(self, event: QMouseEvent):\n self._moving = False\n self.rectChanged.emit(self._rect)\n super().mouseReleaseEvent(event)", "def key_handler(self, event):\n if event.type == pygame.KEYUP: \n self.done = True", "def handle_keyrelease(self, event):\r\n if event.keysym == \"BackSpace\":\r\n self.delete(self.index(tkinter.INSERT), tkinter.END)\r\n self.position = self.index(tkinter.END)\r\n if event.keysym == \"Left\":\r\n if self.position < self.index(tkinter.END): # delete the selection\r\n self.delete(self.position, tkinter.END)\r\n else:\r\n self.position = self.position-1 # delete one character\r\n self.delete(self.position, tkinter.END)\r\n if event.keysym == \"Right\":\r\n self.position = self.index(tkinter.END) # go to end (no selection)\r\n if len(event.keysym) == 1:\r\n self.autocomplete()\r\n # No need for up/down, we'll jump to the popup\r\n # list at the position of the autocompletion\r", "def button_press_cb(self, source, event):\n\n if event.button == MOUSE_BUTTON_RIGHT:\n pass\n return True\n elif event.button == MOUSE_BUTTON_MIDDLE:\n self.emit('begin-move')\n return True" ]
[ "0.80996853", "0.80075884", "0.7933358", "0.74254155", "0.7286587", "0.7269564", "0.72388613", "0.72388613", "0.72174096", "0.71852064", "0.7037113", "0.7027572", "0.7019888", "0.69756335", "0.6968941", "0.6932969", "0.69013923", "0.68389016", "0.6834489", "0.68217915", "0.68064696", "0.67922896", "0.67721957", "0.67626584", "0.6738707", "0.6725046", "0.67031455", "0.6703062", "0.667345", "0.6661664", "0.6637001", "0.6592699", "0.65886796", "0.6568915", "0.6561103", "0.6561102", "0.654002", "0.65370697", "0.6535258", "0.6524198", "0.6510034", "0.6508104", "0.648113", "0.64720184", "0.64643985", "0.6451043", "0.6433521", "0.64274275", "0.6401048", "0.6399519", "0.6398906", "0.63691485", "0.6367818", "0.6328204", "0.627359", "0.6267704", "0.62474436", "0.6225996", "0.6225327", "0.6215503", "0.6209651", "0.6205074", "0.61963767", "0.6182396", "0.6179401", "0.6173177", "0.6167398", "0.6159565", "0.6152561", "0.6145867", "0.6137639", "0.60926855", "0.60899174", "0.6082023", "0.6076744", "0.60733455", "0.60664153", "0.60664153", "0.6050228", "0.6038865", "0.60152435", "0.60091376", "0.6004603", "0.6004603", "0.5998809", "0.5990724", "0.59758055", "0.59740525", "0.59453046", "0.5943226", "0.5925004", "0.5923064", "0.5921629", "0.591867", "0.5915708", "0.590709", "0.590701", "0.5903546", "0.5902491", "0.59009856" ]
0.7786435
3
Called when a mouse cursor is moving in the widget. Adjust method signature as appropriate for callback.
def motion_notify_event(self, widget, event): x, y = event.x, event.y # x, y = coordinates of cursor self.last_win_x, self.last_win_y = x, y button = 0 # prepare button mask as in button_press_event() data_x, data_y = self.check_cursor_location() return self.make_ui_callback('motion', button, data_x, data_y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mouse_move_callback(self, event):\n # TODO drag and drop figuriek\n print(\"moving at \", event.x + self.offset_x, event.y + self.offset_y)", "def on_mouse_motion(self, x, y, delta_x, delta_y):\r\n pass", "def __mouseMoved(self, x, y):\n # Are we on the bounding box?\n if pointOnBox(x, y, self.currentBox, thickness=self.__THICKNESS):\n position = getCursorPosition(x, y, self.currentBox, thickness=self.__THICKNESS)\n cursor = [\n wx.CURSOR_SIZENWSE,\n wx.CURSOR_SIZENS,\n wx.CURSOR_SIZENESW,\n wx.CURSOR_SIZEWE,\n wx.CURSOR_SIZENWSE,\n wx.CURSOR_SIZENS,\n wx.CURSOR_SIZENESW,\n wx.CURSOR_SIZEWE\n ] [position]\n self.__setCursor(cursor)\n elif pointInBox(x, y, self.currentBox):\n self.__setCursor(wx.CURSOR_HAND)\n else:\n self.__setCursor()", "def on_mouse_motion(x, y, dx, dy):\n if in_box(x, y):\n # Change the cursor if inside the box.\n self.window.set_mouse_cursor(self.hand_cursor)\n else:\n self.window.set_mouse_cursor(self.default_cursor)", "def moveCursor(self):\n\n\t\tself._before = self.rect.center\n\t\tself.rect.center = self._pos", "def on_mouse_motion(self, x, y, delta_x, delta_y):\n \n pass", "def handle_mouse(self, x, y):\n pass", "def on_mouse_move(self, event: PointEvent):\n self.x = event.x\n self.y = event.y\n self.handle_mouse(self.x, self.y)", "def handle_mouse(self, x, y):\n self.x = x\n self.y = y\n global _pending_handle_mouse\n if not _pending_handle_mouse:\n _pending_handle_mouse = True\n if self.fig.document is not None:\n self.fig.document.add_timeout_callback(self.handle_mouse_callback, 100)\n else:\n self.handle_mouse_callback()", "def __master_cursor_pos_callback(self, glfw_window, xpos, ypos):\n # flip glfw window space to match OGL space(like texture that has bottom left origin)\n ypos = self.window.glyph.size[1] - ypos\n\n # update values\n self.__pos_instant = Vec(xpos, ypos, 0)\n self.__accel = self.__pos_instant - self.__pos_prev\n self.__pos_prev = self.__pos_instant\n\n # call registered callbacks\n self.call_cursor_pos_callback(glfw_window, *self.__pos_instant.xy, mouse=self)", "def _motion(self, event):\n if self.current:\n # modify the current line by changing the end coordinates\n # to be the current mouse position\n coords = event.widget.coords(self.current)\n coords[2] = event.x\n coords[3] = event.y\n\n event.widget.coords(self.current, *coords)", "def ev_mousemotion(self, event: MouseMotion) -> None:", "def grab(self, event):\n self.ypos = event.y\n self.xpos = event.x\n self.config(cursor='fleur')", "def ev_MOUSEMOTION(self, event):", "def mousePosition(self):", "def mouseEvent(self, widget, event):\n # Zoom in and out with the middle and right mouse buttons\n if event.type == gtk.gdk.BUTTON_PRESS:\n if event.button == 2:\n self.ruler.zoom(2)\n elif event.button == 3:\n self.ruler.zoom(0.5)\n\n # Use the ruler widget's coordinate system to update the time cursor\n x, y, mask = self.ruler.canvas.window.get_pointer()\n scroll = self.ruler.canvas.get_scroll_offsets()[0]\n t = (x + scroll) / self.ruler.scale\n self.cursor.value = t\n\n # If the mouse button is down, try to find the canvas item under the cursor.\n # We use the row's collision detection tree for this, for the same reason\n # we use it for everything else: gnome-canvas' built-in collision detection\n # works poorly on very small items.\n if mask & gtk.gdk.BUTTON1_MASK:\n\n # Search every row in every canvas\n for obj in self.canvasList:\n y = obj.canvas.get_pointer()[1]\n for row in obj.rows:\n if y >= row.top and y <= row.bottom:\n\n # Give a few pixels of slack on either side\n slack = 2.0 / self.ruler.scale\n cursorInterval = (t - slack, t + slack)\n\n # The mouse is in this row. Use the row's collision detection\n # to find a nearby item.\n tag = row.intervalOccupied(*cursorInterval)\n if tag and tag[0] != self.hilightWidget:\n self.notifyHilightChanged(tag[0].dataTransaction)\n self.setHilightWidget(tag[0])\n return False\n return False", "def __handleMouseEvents(self, event):\n if not self.enabled:\n return\n\n x, y = event.GetPosition()\n\n # First make sure we have started a box.\n if self.currentBox == None and not event.LeftDown():\n # No box started yet. Set cursor to the initial kind.\n self.__setCursor(wx.CURSOR_CROSS)\n return\n\n if event.LeftDown():\n if self.currentBox == None:\n # No RB Box, so start a new one.\n self.currentBox = (x, y, 0, 0)\n self.hasLetUp = 0\n elif self.__isSizingCursor():\n # Starting a sizing operation. Change the origin.\n position = getCursorPosition(x, y, self.currentBox, thickness=self.__THICKNESS)\n self.currentBox = self.__denormalizeBox(position, self.currentBox)\n\n elif event.Dragging() and event.LeftIsDown():\n # Use the cursor type to determine operation\n if self.__isMovingCursor():\n if self.currentlyMoving or pointInBox(x, y, self.currentBox):\n if not self.currentlyMoving:\n self.currentlyMoving = (x - self.currentBox[0], y - self.currentBox[1])\n self.__moveTo(x - self.currentlyMoving[0], y - self.currentlyMoving[1])\n elif self.__isSizingCursor():\n self.__resizeBox(x, y)\n\n elif event.LeftUp():\n self.hasLetUp = 1\n self.currentlyMoving = None\n self.__normalizeBox()\n\n elif event.Moving() and not event.Dragging():\n # Simple mouse movement event\n self.__mouseMoved(x,y)", "def _update_cursor(self) -> None:\n # get the brush size (get a local reference in case another process\n # changes it between the different accesses in this method)\n brush_size = self.brush_size\n # if there is not update, return\n if not self.is_cursor_change:\n return\n # otherwise dequeue the update\n self.is_cursor_change = False\n # make a static border ring for the cursor\n ring = make_ring(brush_size - 1, brush_size)\n cursor = make_cursor(ring, self._brush_border_color)\n # make a circle with the current color\n brush_circle = make_circle(brush_size) - ring\n cursor = cursor + make_cursor(brush_circle, self._color)\n # create the pyglet cursor object and set it\n mouse = pyglet_cursor(cursor)\n self._view.set_cursor(mouse)", "def move_start(event):\n nonlocal x, y\n x = event.x \n y = event.y\n window['cursor'] = utils.CURSORS['move_item']", "def mouse_position_event(self, x: int, y: int):\n pass", "def mouseMoveEvent(self, event):\n self.end = event.pos()\n self.update()", "def handle_mouse(obj, event):\n if event:\n x = event.globalX()\n y = event.globalY()\n x_w = obj.offset.x()\n y_w = obj.offset.y()\n obj.move(x - x_w, y - y_w)", "def _onmove(self, event):", "def on_mouse_move(self, event):\n self.mouse = [event.xdata, event.ydata]\n\n # Update pan view on mouse move\n if self.panning is True:\n for a in self.pan_axes:\n a.drag_pan(1, event.key, event.x, event.y)\n\n # Async re-draw (redraws only on thread idle state, uses timer on backend)\n self.canvas.draw_idle()\n\n ##### Temporary place-holder for cached update #####\n self.update_screen_request.emit([0, 0, 0, 0, 0])", "def on_mouse_move(self, win, xpos, ypos):\n old = self.mouse\n self.mouse = (xpos, glfw.get_window_size(win)[1] - ypos)\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_LEFT):\n self.drag(old, self.mouse, glfw.get_window_size(win))\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_RIGHT):\n self.pan(old, self.mouse)", "def on_mouse_move(self, win, xpos, ypos):\n old = self.mouse\n self.mouse = (xpos, glfw.get_window_size(win)[1] - ypos)\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_LEFT):\n self.drag(old, self.mouse, glfw.get_window_size(win))\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_RIGHT):\n self.pan(old, self.mouse)", "def _hover(self, event):\n if self.ignore(event):\n return\n\n if self._active_handle is not None or not self._selection_completed:\n # Do nothing if button is pressed and a handle is active, which may\n # occur with drag_from_anywhere=True.\n # Do nothing if selection is not completed, which occurs when\n # a selector has been cleared\n return\n\n _, e_dist = self._edge_handles.closest(event.x, event.y)\n self._set_cursor(e_dist <= self.grab_range)", "def OnMouseMotion(self, evt):\n if evt.Dragging() and evt.LeftIsDown():\n self.lastx, self.lasty = self.x, self.y\n self.x, self.y = evt.GetPosition()\n self.Refresh(False)", "def update(self):\n self.mousePos = pygame.mouse.get_pos()\n self.update_button_hover_status()", "def setMousePositionCallback(self, callback):\n\n self.mouse_position_callback = callback", "def mouseDragged(self, point, delta):\n pass", "def on_mouse_move(self, event):\n if event.is_dragging and event.buttons[0] == 1:\n x0, y0 = event.last_event.pos[0], event.last_event.pos[1]\n x1, y1 = event.pos[0], event.pos[1]\n X0, Y0, Z0 = self.pixel_to_coords(float(x0), float(y0))\n X1, Y1, Z1 = self.pixel_to_coords(float(x1), float(y1))\n self.translate_center(X1 - X0, Y1 - Y0, Z1 - Z0)", "def mouseMoveEvent(self, event):\n # super(PlotWidget, self).mouseMoveEvent(event)\n event.accept()", "def _on_canvas_mouse(self, event):\n if event.GetEventType() in [wx.wxEVT_MOTION, wx.wxEVT_LEFT_DOWN, \n wx.wxEVT_LEFT_UP, wx.wxEVT_MOTION|wx.wxEVT_LEFT_DOWN]:\n new_event = wx.MouseEvent(event.GetEventType())\n pos = self.tc.ScreenToClient(wx.GetMousePosition())\n new_event.SetPosition(pos)\n new_event.Skip()\n self.tc.GetEventHandler().ProcessEvent(new_event)", "def on_mouse_motion(self, x, y, delta_x, delta_y):\n\n print(x)\n print(y)\n print(delta_x)\n print(delta_y)\n\n\n #self.manage_crosshair()\n \n \n\n #self.crosshair_sprite.center_x += delta_x\n #self.crosshair_sprite.center_y += delta_y\n\n\n self.crosshair_relative_xoffset += delta_x\n self.crosshair_relative_yoffset += delta_y", "def mouse_move(self, pos):\n if (self.setup_type == \"position\"):\n x, y = pos\n self.canvas.move(x, y)", "def mouseReleaseEvent(self, event):\n width = self.frameGeometry().width()\n height = self.frameGeometry().height()\n cursor = QtGui.QCursor()\n new_pos = self.mapFromGlobal(cursor.pos())\n x = new_pos.x()\n y = new_pos.y()\n self.__selector_y = y/float(height) # normalized value of the y position\n \tself.__selector_x = x/float(width) #normalised value of the x position\n self.updatePixelColor()\n self.repaint()", "def handle_mouse_press(self, event):", "def update(self):\n # Get the current mouse position. This returns the position\n # as a list of two numbers.\n pos = pygame.mouse.get_pos()\n\n # Set the player x position to the mouse x position\n self.rect.x = pos[0]", "def motion_notify_cb(self, darea, event):\n if event.is_hint:\n x, y, state = event.window.get_pointer()\n else:\n x = event.x\n y = event.y\n state = event.state\n if state & gdk.BUTTON1_MASK or state & gdk.BUTTON3_MASK:\n self.draw_pointer(self.cr, x, y)\n self.queue_draw()\n self.oldx, self.oldy = x, y\n rel_x, rel_y = self.absolute_to_relative(x, y)\n self.emit('dnd-value', rel_x, rel_y)\n return True", "def mousePressEvent(self, event):\n self.begin = event.pos()\n self.end = event.pos()\n self.update()", "def moveCursor(self, QAbstractItemView_CursorAction, Qt_KeyboardModifiers): # real signature unknown; restored from __doc__\r\n pass", "def update(self):\n # Get the current mouse position. This returns the position\n # as a list of two numbers.\n pos = pygame.mouse.get_pos()\n \n # Set the player x position to the mouse x position\n self.rect.x = pos[0]", "def append_cursor_pos_callback(self, callbacked, *args, **kwargs):\n pass", "def append_cursor_enter_callback(self):", "def _(event):\n system_line.cursor_left()", "def emitMouseMoveEvent(self, location, currentKbKey, draggedItems, items):\n # emit the mouseMoveEvent signal\n self.mouseMove.emit(self, location, currentKbKey, draggedItems, items)", "def watchCursor(self, cursor):\n cursor.observers.append(self._cursorCallback)", "def handle_mousemotion(self, change):\r\n if widget.Widget.handle_mousemotion(self, change):\r\n app.App.handle_mousemotion(self, change)\r\n return True\r\n return False", "def handleMousePositionCallback(self, xy):\n\n if self.mouse_position_callback:\n (x, y) = xy\n posn = self.convertView2Geo(x, y)\n self.mouse_position_callback(posn)", "def cursor_cb(self, scene_pos):\n if self.is_within_image(scene_pos):\n pos = self.vb_image.mapSceneToView(scene_pos)\n\n self.cursor_v.setPos(pos)\n self.cursor_h.setPos(pos)\n self.cursor_text.setText(\n \"({:.1f}, {:.1f}) px\".format(pos.x(), pos.y()))\n if self._mark is not None:\n delta = pos - self._mark\n self.cursor_delta.setPos(pos)\n self.cursor_delta.setText(\n \"Δ = ({:.1f}, {:.1f}) μm\".format(\n self.px_to_um(delta.x()), self.px_to_um(delta.y())))\n\n self.cursor_v.show()\n self.cursor_h.show()\n self.cursor_text.show()\n self.cursor_delta.show()\n\n elif self.is_within_zoom(scene_pos):\n pos = self.vb_zoom.mapSceneToView(scene_pos)\n\n if self._up is not None:\n self.zoom_text.setPos(pos)\n self.zoom_text.setText(\"I = {:.0f}\".format(\n self.zoom.image[int(pos.x()), int(pos.y())]))\n self.zoom_text.show()\n\n elif self.is_within_residuals(scene_pos):\n pos = self.vb_residuals.mapSceneToView(scene_pos)\n\n if self._up is not None:\n self.residuals_text.setPos(pos)\n self.residuals_text.setText(\"r = {:.2f}\".format(\n self.residuals.image[int(pos.x()),int(pos.y())]))\n self.residuals_text.show()\n\n else:\n for w in [self.cursor_v, self.cursor_h,\n self.cursor_text, self.cursor_delta,\n self.zoom_text, self.residuals_text]:\n w.hide()", "def mouseMoveEvent(self, event):\n if self._ignore_mouse_events:\n event.ignore()\n return\n\n event.accept()\n\n if self._selection_mode != SelectionMode.NONE:\n x = event.x()\n y = event.y()\n xdiff = float(x - self._selection_position_start[0])\n ydiff = float(y - self._selection_position_start[1])\n if abs(xdiff) < 0.0001:\n xdiff = 1\n if abs(ydiff) < 0.0001:\n ydiff = 1\n xoff = float(self._selection_position_start[0]) / xdiff + 0.5\n yoff = float(self._selection_position_start[1]) / ydiff + 0.5\n self._addUpdateSelectionBox(xdiff, ydiff, xoff, yoff)\n\n elif self._use_zinc_mouse_event_handling:\n scene_input = self._sceneviewer.createSceneviewerinput()\n scene_input.setPosition(event.x(), event.y())\n scene_input.setEventType(Sceneviewerinput.EVENT_TYPE_MOTION_NOTIFY)\n if event.type() == QtCore.QEvent.Leave:\n scene_input.setPosition(-1, -1)\n self._sceneviewer.processSceneviewerinput(scene_input)", "def mouseMoveEvent (self, event):\n self.itemMoved = True\n super(DiagramItem, self).mouseMoveEvent(event)", "def mouseDragged():\n if mousePressed:\n mousePressed()", "def on_mouse_movement(self, event: wx.MouseEvent) -> None:\n if not event.Dragging():\n self._drag_start_pos = None\n return\n # self.CaptureMouse()\n if self._drag_start_pos is None:\n self._drag_start_pos = event.GetPosition()\n else:\n current_pos = event.GetPosition()\n change = self._drag_start_pos - current_pos\n self.SetPosition(self.GetPosition() - change)", "def on_mouse_movement(self, event: wx.MouseEvent) -> None:\n if not event.Dragging():\n self._drag_start_pos = None\n return\n # self.CaptureMouse()\n if self._drag_start_pos is None:\n self._drag_start_pos = event.GetPosition()\n else:\n current_pos = event.GetPosition()\n change = self._drag_start_pos - current_pos\n self.SetPosition(self.GetPosition() - change)", "def __isMovingCursor(self):\n return self.__currentCursor == wx.CURSOR_HAND", "def hoverMoveEvent(self, moveEvent):\n if self.isSelected():\n handle = None\n if self.handle.contains(moveEvent.pos()):\n handle = \"k\" # something not None\n cursor = Qt.ArrowCursor if handle is None else Qt.SizeFDiagCursor\n self.setCursor(cursor)\n super().hoverMoveEvent(moveEvent)", "def OnMouse(self, event):\n\n self.Refresh()\n event.Skip()", "def mousePositionRaw(self):", "def mousePositionRaw(self):", "def _onmove(self, event):\n\n # self._prev are deprecated but we still need to maintain it\n self._prev = self._get_data(event)\n\n v = event.xdata if self.direction == 'horizontal' else event.ydata\n if self.direction == 'horizontal':\n vpress = self._eventpress.xdata\n else:\n vpress = self._eventpress.ydata\n\n # move existing span\n # When \"dragging from anywhere\", `self._active_handle` is set to 'C'\n # (match notation used in the RectangleSelector)\n if self._active_handle == 'C' and self._extents_on_press is not None:\n vmin, vmax = self._extents_on_press\n dv = v - vpress\n vmin += dv\n vmax += dv\n\n # resize an existing shape\n elif self._active_handle and self._active_handle != 'C':\n vmin, vmax = self._extents_on_press\n if self._active_handle == 'min':\n vmin = v\n else:\n vmax = v\n # new shape\n else:\n # Don't create a new span if there is already one when\n # ignore_event_outside=True\n if self.ignore_event_outside and self._selection_completed:\n return\n vmin, vmax = vpress, v\n if vmin > vmax:\n vmin, vmax = vmax, vmin\n\n self.extents = vmin, vmax\n\n if self.onmove_callback is not None:\n self.onmove_callback(vmin, vmax)\n\n return False", "def update(self):\n\n\t\tself.x = games.mouse.x\n\t\tself.y = games.mouse.y\n\t\tself.check_collide()", "def handle_mouse(self, x, y):\n self.last_x = x\n self.last_y = y\n if self.min_x is not None:\n self.last_x = max(self.last_x, self.min_x)\n if self.max_x is not None:\n self.last_x = min(self.last_x, self.max_x)\n # we are in region mode\n if self.region_id is not None:\n start = self.last_x\n end = self.region_edge\n self.region_model.adjust_region(self.region_id, start, end)\n return False", "def on_mousemove(event, x, y, flags, userparam):\n global mouse_pos\n global source_img, source_msk, display_img\n global DRAW_MODE\n\n if event == cv.EVENT_MOUSEMOVE:\n mouse_pos = (x, y)\n\n if flags & cv.EVENT_FLAG_SHIFTKEY:\n current_label = LABEL_BACKGROUND\n else:\n current_label = CURRENT_LABEL\n\n if DRAW_MODE == \"point\":\n if flags & cv.EVENT_FLAG_CTRLKEY:\n cv.circle(source_msk, (x, y), SHAPE_SIZE, current_label, -1)\n elif DRAW_MODE == \"line\":\n # line drawing is done in the line-mode keypress handler (keydown())\n pass", "def ev_mousemotion(self, event: tcod.event.MouseMotion) -> T | None:", "def on_mouse_motion(self, x, y, dx, dy):\n if self.exclusive:\n self.gamestatemanager.peek().on_mouse_motion(x, y, dx, dy)", "def on_mouse_release(self, x, y, button):\n pass", "def mouseMoveEvent(self, e):\r\n \r\n self.label.setText('mouseMoveEvent')", "def handle_mouse_data(data):\n pass", "def cursorPositionChanged(self):\r\n cursor = self.text_area.textCursor()\r\n line_no = cursor.blockNumber()\r\n col_no = cursor.columnNumber()\r\n self.statusBar.showMessage(\"Line \"+str(line_no)+\", Column \"+str(col_no))", "def OnSetCursor(self, event):\r\n \r\n # determine cursor\r\n part = self.HitTest(event.GetX(), event.GetY())\r\n cursor = wx.NullCursor\r\n\r\n if part:\r\n if part.type in [AuiDockUIPart.typeDockSizer, AuiDockUIPart.typePaneSizer]:\r\n\r\n if not self.CheckMovableSizer(part):\r\n return\r\n \r\n if part.orientation == wx.VERTICAL:\r\n cursor = wx.StockCursor(wx.CURSOR_SIZEWE)\r\n else:\r\n cursor = wx.StockCursor(wx.CURSOR_SIZENS)\r\n \r\n elif part.type == AuiDockUIPart.typeGripper:\r\n cursor = wx.StockCursor(wx.CURSOR_SIZING)\r\n\r\n event.SetCursor(cursor)", "def move( self, event ):\n self.lastMotion = time()\n if self.follow == False: # If the follow flag is not set, motion within the widget will make the ToolTip dissapear\n self.withdraw()\n self.visible = 1\n self.geometry( '+%i+%i' % ( event.x_root+10, event.y_root+10 ) ) # Offset the ToolTip 10x10 pixes southwest of the pointer\n try:\n self.msgVar.set( self.msgFunc() ) # Try to call the message function. Will not change the message if the message function is None or the message function fails\n except:\n pass\n self.after( int( self.delay * 1000 ), self.show )", "def move(self, event):\r\n self.lastMotion = time()\r\n # If the follow flag is not set, motion within the\r\n # widget will make the ToolTip disappear\r\n #\r\n if self.follow is False:\r\n self.withdraw()\r\n self.visible = 1\r\n\r\n # Offset the ToolTip 10x10 pixes southwest of the pointer\r\n self.geometry('+%i+%i' % (event.x_root+20, event.y_root-10))\r\n try:\r\n # Try to call the message function. Will not change\r\n # the message if the message function is None or\r\n # the message function fails\r\n self.msgVar.set(self.msgFunc())\r\n except:\r\n pass\r\n self.after(int(self.delay * 1000), self.show)", "def move(self, event):\r\n self.lastMotion = time()\r\n # If the follow flag is not set, motion within the\r\n # widget will make the ToolTip disappear\r\n #\r\n if self.follow is False:\r\n self.withdraw()\r\n self.visible = 1\r\n\r\n # Offset the ToolTip 10x10 pixes southwest of the pointer\r\n self.geometry('+%i+%i' % (event.x_root+20, event.y_root-10))\r\n try:\r\n # Try to call the message function. Will not change\r\n # the message if the message function is None or\r\n # the message function fails\r\n self.msgVar.set(self.msgFunc())\r\n except:\r\n pass\r\n self.after(int(self.delay * 1000), self.show)", "def OnMoveEvent(self, event):\r\n\r\n win_rect = self.GetRect()\r\n\r\n if win_rect == self._last_rect:\r\n return\r\n\r\n # skip the first move event\r\n if self._last_rect.IsEmpty(): \r\n self._last_rect = wx.Rect(*win_rect)\r\n return\r\n \r\n # skip if moving too fast to avoid massive redraws and\r\n # jumping hint windows\r\n if abs(win_rect.x - self._last_rect.x) > 3 or abs(win_rect.y - self._last_rect.y) > 3:\r\n self._last3_rect = wx.Rect(*self._last2_rect)\r\n self._last2_rect = wx.Rect(*self._last_rect)\r\n self._last_rect = wx.Rect(*win_rect)\r\n return\r\n\r\n # prevent frame redocking during resize\r\n if self._last_rect.GetSize() != win_rect.GetSize():\r\n self._last3_rect = wx.Rect(*self._last2_rect)\r\n self._last2_rect = wx.Rect(*self._last_rect)\r\n self._last_rect = wx.Rect(*win_rect)\r\n return\r\n\r\n self._last3_rect = wx.Rect(*self._last2_rect)\r\n self._last2_rect = wx.Rect(*self._last_rect)\r\n self._last_rect = wx.Rect(*win_rect)\r\n\r\n if _VERSION_STRING < \"2.9\":\r\n leftDown = wx.GetMouseState().LeftDown()\r\n else:\r\n leftDown = wx.GetMouseState().LeftIsDown()\r\n\r\n if not leftDown:\r\n return\r\n\r\n if not self._moving: \r\n self.OnMoveStart(event)\r\n self._moving = True\r\n\r\n if self._last3_rect.IsEmpty():\r\n return\r\n\r\n self.OnMoving(event)", "def update(self):\r\n self.x = games.mouse.x\r\n self.y = games.mouse.y\r\n self.check_collide()", "def ev_MOUSEUP(self, event):", "def handle_event(self, event):\n if event.type != MOUSEMOTION:\n return\n self.model.slider.left = event.pos[0]", "def mouse_middle_down(self):\n pass", "def update(self):\n self.x = games.mouse.x\n self.y = games.mouse.y\n self.check_collide()", "def cursor_placement_thread(self):\r\n while self.editing:\r\n # pylint: disable=W0212\r\n with goxapi.Signal._lock:\r\n curses.curs_set(2)\r\n self.win.touchwin()\r\n self.win.refresh()\r\n time.sleep(0.1)\r\n curses.curs_set(0)", "def mouseMoveEvent(self, event):\n if self.view_state.tracking == TrackingMode.FREE and event.buttons() == QtCore.Qt.LeftButton:\n # Calculate the change in mouse position.\n new_mouse_pos = np.array([event.x(), event.y()])\n mouse_delta = new_mouse_pos - self.view_state.mouse\n\n # Add this to the view centre.\n self.view_state.centre = self.view_state.centre - mouse_delta * (1 / self.view_state.scale)\n self.view_state.mouse = new_mouse_pos", "def _updateOnMouseState(self, state):\n x = state.X.abs\n y = state.Y.abs\n \n mscale = self.mouse_icon.getScale() \n \n if (x + mscale[0] + self.mouse_offset) > render_engine.Window.width:\n x = x - mscale[0] - 10\n else:\n x += self.mouse_offset\n \n if (y + mscale[1] + self.mouse_offset) > render_engine.Window.height:\n y = y - mscale[1] - 10\n else:\n y += self.mouse_offset\n \n self.mouse_icon.setPosition((x, y))", "def move(self, event):\n self.lastMotion = time()\n # If the follow flag is not set, motion within the\n # widget will make the ToolTip disappear\n #\n if self.follow is False:\n self.withdraw()\n self.visible = 1\n\n # Offset the ToolTip 10x10 pixes southwest of the pointer\n self.geometry('+%i+%i' % (event.x_root+20, event.y_root-10))\n try:\n # Try to call the message function. Will not change\n # the message if the message function is None or\n # the message function fails\n self.msgVar.set(self.msgFunc())\n except:\n pass\n self.after(int(self.delay * 1000), self.show)", "def mouseMoveEvent(self, event):\n self.setCursor(qtc.Qt.SizeVerCursor)\n\n multiplier = self.singleStep()\n valueOffset = ((self.mouseStartPosY - event.pos().y()) * multiplier)\n value = self.startValue + valueOffset\n\n if value != self.current_value:\n self.current_value = value\n self.setValue(self.current_value)", "def change_cursor(self, cursor):\n self.setCursor(cursor)", "def on_mouse_motion(self, x: float, y: float, dx: float, dy: float):\n if self.player_enabled:\n super().on_mouse_motion(x, y, dx, dy)", "def setCursor(self, _name = None):\n\n\t\t_before = self._cursor\n\t\tself._cursor = _name\n\t\tif _before != _name:\n\t\t\tself._updated.append(tuple(self.rect))\n\t\t\tself.updateCursor()\n\t\t\tself._updated.append(tuple(self.rect))", "def ev_windowmoved(self, event: WindowMoved) -> None:", "def hoverMoveEvent(self, event):\n activeTool = self._activeTool()\n toolMethodName = str(activeTool) + \"HoverMove\"\n if hasattr(self, toolMethodName):\n getattr(self, toolMethodName)(event.pos())", "def do_motion_notify_event(self, event):\n\t\t# if this is a hint, then let's get all the necessary \n\t\t# information, if not it's all we need.\n\t\tif event.is_hint:\n\t\t\tx, y, state = event.window.get_pointer()\n\t\telse:\n\t\t\tx = event.x\n\t\t\ty = event.y\n\t\t\tstate = event.state\n\t\t\n\t\t# Update box underneath cursor, for tooltip\n\t\tix, iy = icoords = self.widget2imgcoords(x,y)\n\t\tif __debug__: \n\t\t\tsys.stdout.write(repr((x,y))+' '+repr(icoords)+'\\r')\n\t\t\tsys.stdout.flush()\n\t\t# Update the box cache\n\t\tif self._update_boxes(*icoords):\n\t\t\t# Cache changed, update tooltips\n\t\t\tself.set_tooltip_text(self.get_tooltip_text(self._boxes_under_cursor)) #XXX: Why is this needed to get the tooltip to query?\n\t\t\tself.trigger_tooltip_query()\n\t\t\n\t\tif self.mode == self.INSERT and self._insert_start_coords is not None and state & gtk.gdk.BUTTON1_MASK:\n\t\t\t# Adjust temporary box (for use in insertion)\n\t\t\tnr = pt2rect(icoords, self._insert_start_coords)\n\t\t\tredraw = nr.union(self._temporary_box.rect)\n\t\t\tself._temporary_box.rect = nr\n\t\t\t#self.queue_draw_area(*self.rect2widget(redraw))\n\t\t\tself.queue_draw() #REDRAW: If we implement partial redraw, fix this\n\t\t\t#XXX: Should we draw immediately instead of queueing one?\n\t\t\tself.emit('insert-box-changed', self._temporary_box)\n\t\telif self._box_is_resizing is not None and state & gtk.gdk.BUTTON1_MASK:\n\t\t\t# Update the size of the box we're resizing\n\t\t\td = self._box_is_resizing_dir\n\t\t\tb = self._box_is_resizing\n\t\t\tr = frect(*b.rect)\n\t\t\tobox = frect(*b.rect)\n\t\t\tif 'W' in d:\n\t\t\t\tr.x = ix\n\t\t\t\tr.width = self._box_resize_east - r.x # Use r.x because it's pre-rounded\n\t\t\telif 'E' in d:\n\t\t\t\tr.width = (ix - r.x)\n\t\t\tif 'N' in d:\n\t\t\t\tr.y = iy\n\t\t\t\tr.height = self._box_resize_south - r.y # Use r.y because it's pre-rounded\n\t\t\telif 'S' in d:\n\t\t\t\tr.height = (iy - r.y)\n\t\t\tb.rect = r\n#\t\t\tif __debug__: print \"Resizing: %r (%r,%r) (%r,%r) %r->%r\" % (d, x,y, ix,iy, list(obox), list(b.rect))\n\t\t\t#self.queue_draw_area(*self.rect2widget(obox.union(b.rect)))\n\t\t\tself.queue_draw() #REDRAW: If we implement partial redraw, fix this\n\t\t\t#XXX: Should we draw immediately instead of queueing one?\n\t\telif not state & (gtk.gdk.BUTTON1_MASK | gtk.gdk.BUTTON2_MASK | \n\t\t\t\tgtk.gdk.BUTTON3_MASK | gtk.gdk.BUTTON4_MASK | \n\t\t\t\tgtk.gdk.BUTTON5_MASK): # Hover\n\t\t\t# Update the current cursor icon\n\t\t\tboxes = tuple(self.find_boxes_coord_near(*icoords)) #FIXME: Use cache\n\t\t\tif len(boxes):\n\t\t\t\t#if __debug__: print \"Nearby Boxes: %r\" % (boxes,)\n\t\t\t\tbox, dir = boxes[0]\n\t\t\t\tself._box_may_resize = box\n\t\t\t\tself._box_may_resize_dir = dir\n\t\t\t\tself.window.set_cursor(gtk.gdk.Cursor(self.window.get_display(), self.RESIZE_CURSORS[dir]))\n\t\t\telse:\n\t\t\t\tself._box_may_resize = self._box_may_resize_dir = None\n\t\t\t\tself.window.set_cursor(None)", "def ev_MOUSEDOWN(self, event):", "def on_eventBox_motion_notify_event(self, widget, data=None):\n\n if self.enabled == True:\n found = False\n for m in self.map:\n x1, y1, x2, y2, xpage, xpart = m\n if x1 <= data.x <= x2 and y1 <= data.y <= y2:\n found = True\n break\n if found == True:\n widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.HAND1))\n else:\n widget.window.set_cursor(None)", "def mouseMoveEvent(self, e):\n if e.pos().y() == self._offset:\n return\n adder = (self._offset - e.y())\n self.deltacount += adder\n adder *= (abs(adder) * 0.01)\n f = self._max[0] - self._min[0]\n self._state[0] = min(self._max[0], max(self._min[0], self._state[0] + (adder * f / 1000.0)))\n self._param.update()\n QtGui.QCursor.setPos(self._origo)", "def update(self):\n self.x = games.mouse.x\n #self.y = games.mouse.y\n self.check_collide()", "def mouseMoveEvent(self, e):\n if e.pos().y() == self.offset:\n return\n adder = (self.offset - e.y())\n self.deltacount += adder\n #adder *= self.accelerator\n adder *= (abs(adder) * 0.01)\n #self._state[0] = max(self._min[0], min(self._max[0], self._state[0] + adder))\n QtGui.qApp.emit( QtCore.SIGNAL(\"deltaChanged\"), self, adder)\n #self._param.update()\n QtGui.QCursor.setPos(self.origo)", "def on_mouse_motion(self, x, y, dx, dy):\n # hazlo aparecer donde este mi jugador en el mouse\n self.player_sprite.center_x = x\n self.player_sprite.center_y = y", "def on_mouse_leave (self, event):\n\n\t\tif not self.clicked:\n\n\t\t\tself.cursor_position = [-1,-1]\n\t\t\tself.redraw_canvas()\n\t\t\tself.hide_tip()#self.timer1 = gobject.timeout_add(2000, self.hide_tip)", "def onMove(self, event):\n\n # get current mouse position\n (x, y) = event.GetPositionTuple()\n\n self.handleMousePositionCallback((x, y))\n\n if event.Dragging() and event.LeftIsDown():\n # are we doing box select?\n if self.is_box_select:\n # set select box point 2 at mouse position\n (self.sbox_w, self.sbox_h) = (x - self.sbox_1_x,\n y - self.sbox_1_y)\n elif not self.last_drag_x is None:\n # no, just a map drag\n self.was_dragging = True\n dx = self.last_drag_x - x\n dy = self.last_drag_y - y\n\n # move the map in the view\n self.view_offset_x += dx\n self.view_offset_y += dy\n\n # limit drag at edges of map\n if self.map_width > self.view_width:\n # if map > view, don't allow edge to show background\n if self.view_offset_x < 0:\n self.view_offset_x = 0\n elif self.view_offset_x > self.max_x_offset:\n self.view_offset_x = self.max_x_offset\n else:\n # else map < view, centre X\n self.view_offset_x = (self.map_width - self.view_width)/2\n\n if self.map_height > self.view_height:\n # if map > view, don't allow edge to show background\n if self.view_offset_y < 0:\n self.view_offset_y = 0\n elif self.view_offset_y > self.max_y_offset:\n self.view_offset_y = self.max_y_offset\n else:\n # else map < view, centre Y\n self.view_offset_y = (self.map_height - self.view_height)/2\n\n # adjust remembered X,Y\n self.last_drag_x = x\n self.last_drag_y = y\n\n self.recalc_view_lonlat_limits()\n\n # redraw client area\n self.drawTilesLayers()" ]
[ "0.7342505", "0.7273285", "0.7090941", "0.70634186", "0.7034477", "0.69676363", "0.6943248", "0.68984425", "0.669294", "0.659831", "0.65874934", "0.6549803", "0.6516948", "0.6500888", "0.6497365", "0.6471458", "0.6461945", "0.64583486", "0.6448528", "0.6439387", "0.63827825", "0.6339865", "0.63394064", "0.63264203", "0.63040274", "0.63040274", "0.6294191", "0.6292217", "0.62897956", "0.6275114", "0.6256434", "0.62540966", "0.62476194", "0.62473583", "0.62395257", "0.62332404", "0.62240326", "0.6189821", "0.6180184", "0.6166513", "0.6152779", "0.6139905", "0.613767", "0.6130255", "0.6123355", "0.61155677", "0.6115425", "0.6094332", "0.6083725", "0.6083417", "0.6073236", "0.6066993", "0.6023811", "0.6021578", "0.60187143", "0.60187143", "0.6018018", "0.5983891", "0.5976568", "0.5976197", "0.5976197", "0.5974011", "0.5951451", "0.5949799", "0.5949049", "0.5948798", "0.5945873", "0.59444124", "0.59394956", "0.592916", "0.59259427", "0.59179217", "0.59129095", "0.59128374", "0.59128374", "0.5910285", "0.5909145", "0.59012675", "0.5896457", "0.5895733", "0.5894689", "0.58929735", "0.5870668", "0.58639693", "0.5860507", "0.5858588", "0.58515155", "0.5846347", "0.5830622", "0.58124393", "0.58104986", "0.5801034", "0.579941", "0.57943857", "0.57862514", "0.57709384", "0.5768258", "0.5768086", "0.5767833", "0.5767529" ]
0.64577484
18
Called when a drop (drag/drop) event happens in the widget. Adjust method signature as appropriate for callback.
def drop_event(self, widget, event): # make a call back with a list of URLs that were dropped #self.logger.debug("dropped filename(s): %s" % (str(paths))) #self.make_ui_callback('drag-drop', paths) raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dropEvent(self, de):\n # dragging a track\n if hasattr(Globals.dragObject, \"trackFrame\"):\n de.accept()\n trackFrame = Globals.dragObject.trackFrame\n oldParent = trackFrame.parentWidget()\n if oldParent:\n args = (trackFrame, self, oldParent.parentWidget())\n else:\n args = (trackFrame, self, None)\n self.emit(PYSIGNAL('dropped'), (args))\n # not yet used\n #Animation.animate(trackFrame, self, doneFunc=self.slotAnimationDone)", "def on_drop(self):\n print(\"You have dropped\", self.name)", "def dropEvent(self, event):\n\n # Get the id color to drop the items into\n drop_id_color = self.itemAt(event.pos())\n drop_id_color = self.invisibleRootItem() \\\n if drop_id_color is None else drop_id_color\n\n # If the drop position is not valid we pass\n if drop_id_color is None:\n event.ignore()\n return\n\n # If the drop position is not an id color item we pass\n if drop_id_color.data(0, QtCore.Qt.UserRole) != \"color\":\n event.ignore()\n return\n\n # Get the drop items - the selected tree items\n drop_items = [x for x in self.selectedItems()\n if x.data(0, QtCore.Qt.UserRole) == \"object\"] or None\n\n # If not items selected we pass\n if drop_items is None:\n event.ignore()\n return\n\n # Drop the items into the new tree parent\n self._drop_tree_items(drop_items, drop_id_color)\n\n event.accept()\n\n return None", "def dropEvent(self, event):\n if event.mimeData().hasImage:\n event.setDropAction(Qt.CopyAction)\n file_path = event.mimeData().urls()[0].toLocalFile()\n self.set_image(file_path)\n self.folderLocation.setText(file_path)\n \n event.accept()\n else:\n event.ignore()", "def dropEvent(self, event):\r\n source = event.mimeData()\r\n if source.hasUrls():\r\n files = mimedata2url(source)\r\n if files:\r\n files = [\"r'%s'\" % path for path in files]\r\n if len(files) == 1:\r\n text = files[0]\r\n else:\r\n text = \"[\" + \", \".join(files) + \"]\"\r\n self.shell.insert_text(text)\r\n elif source.hasText():\r\n lines = unicode(source.text())\r\n self.shell.set_cursor_position('eof')\r\n self.shell.execute_lines(lines)\r\n event.acceptProposedAction()", "def dropEvent(self, event):\r\n source = event.mimeData()\r\n if source.hasUrls():\r\n files = mimedata2url(source)\r\n if files:\r\n self.plugin.load(files)\r\n elif source.hasText():\r\n editor = self.currentWidget()\r\n if editor is not None:\r\n editor.insert_text( source.text() )\r\n event.acceptProposedAction()", "def _on_drop(self, event):\n data = event.mimeData().data(constants.QGRAPH_DD_MIME_TYPE)\n if not data.isNull():\n data_stream = QDataStream(data, QIODevice.ReadOnly)\n parsed = json.loads(data_stream.readString().decode('utf8'))\n\n # Refer to `mime.py` for docs about format\n version = parsed['version']\n if version not in (1, 2):\n raise ValueError(\"Unsupported version of QmxGraph MIME data: {}\".format(version))\n\n x = event.pos().x()\n y = event.pos().y()\n\n if version in (1, 2):\n vertices = parsed.get('vertices', [])\n scale = self.api.get_zoom_scale()\n for v in vertices:\n # place vertices with an offset so their center falls\n # in the event point.\n vertex_x = x + (v['dx'] - v['width'] * 0.5) * scale\n vertex_y = y + (v['dy'] - v['height'] * 0.5) * scale\n self.api.insert_vertex(\n x=vertex_x,\n y=vertex_y,\n width=v['width'],\n height=v['height'],\n label=v['label'],\n style=v.get('style', None),\n tags=v.get('tags', {}),\n )\n\n if version in (2,):\n decorations = parsed.get('decorations', [])\n for v in decorations:\n self.api.insert_decoration(\n x=x,\n y=y,\n width=v['width'],\n height=v['height'],\n label=v['label'],\n style=v.get('style', None),\n tags=v.get('tags', {}),\n )\n\n event.acceptProposedAction()\n else:\n event.ignore()", "def dropEvent(self, event: QtGui.QDropEvent) -> None:\n if event.mimeData().hasImage:\n event.setDropAction(Qt.CopyAction)\n self.image = event.mimeData().urls()[0].toLocalFile()\n x = self.width()\n y = self.height()\n im = QPixmap(self.image).scaled(x, y) # , aspectRatioMode=Qt.KeepAspectRatio)\n im.save(os.getcwd() + \"/tmp.jpg\")\n self.image = (os.getcwd() + \"/tmp.jpg\")\n self.setPixmap(im)\n # self.setPixmap(QPixmap(self.image))\n self.setStyleSheet(\"\")\n event.accept()\n else:\n event.ignore()", "def addDropListener(self, callback: 'callable'):\n self.getView().addDropListener(callback)", "def dropEvent(self, QDropEvent):\n srcItems = self.selectedItems()\n dstInd = (self.indexAt(QDropEvent.pos()).row() + 1)\n kbMod = QDropEvent.keyboardModifiers()\n #-- Create New Items --#\n for n, srcItem in enumerate(srcItems):\n itemDict = self.treeParent.getItemDict(srcItem)\n newItem = self.treeParent.on_addVar(index=(dstInd + n))\n self.treeParent.setItem(newItem, **itemDict)\n #-- Remove Items --#\n if not kbMod == QtCore.Qt.ControlModifier:\n for srcItem in srcItems:\n self.takeTopLevelItem(self.indexOfTopLevelItem(srcItem))\n self.treeParent.reindexVar()", "def dropMimeData(self, p_int, QMimeData, Qt_DropAction): # real signature unknown; restored from __doc__\r\n return False", "def on_item_dropped(self, url):\n print 'Weld.on_item_dropped:', url\n #make sure all struct are present\n if not(self.project and self.project.level):\n print >> sys.stderr, 'it\\'s too early to drop stuff: '\\\n 'create a project and a level first !'\n return\n\n #retrieve data if it comes from weld\n if url in self.resMan:\n props = self.resMan.file_props(url)\n if props is None:\n print >> sys.stderr, curr_f(), ': url(\\'%s\\') in self.resMan '\\\n 'but can\\'t retrieve props.' % (url)\n return\n props = self.project.level.resMan.add_resource(self.resMan.base_path,\n props)\n url = props['url']\n if props == {} or url not in self.project.level.resMan:\n print >> sys.stderr, curr_f(), 'could not retrieve file and/or '\\\n 'dependencies for props:', pp(props)\n return\n\n #instanciate it\n if url in self.project.level.resMan:\n props = self.project.level.resMan.file_props(url)\n dtp = self.project.level.qsteelwidget.dropTargetPosition(Config.instance().drop_target_vec)\n props['position'] = dtp\n props['rotation'] = self.project.level.qsteelwidget.dropTargetRotation()\n if props['resource_type'] == 'meshes':\n props['meshName'] = props['name']\n self.project.level.instanciate(props)\n s = 'dropped agent \\'%s\\' with id %i' % (props['name'], props['agentId'])\n print s\n Ui.instance().show_status(s)\n else:\n Ui.instance().show_status('can only drop meshes so far')", "def DoDrop(self, docks, panes, target, pt, offset=wx.Point(0, 0)):\r\n\r\n if target.IsToolbar():\r\n return self.DoDropToolbar(docks, panes, target, pt, offset)\r\n elif target.IsFloating():\r\n return self.DoDropFloatingPane(docks, panes, target, pt)\r\n else:\r\n return self.DoDropNonFloatingPane(docks, panes, target, pt)", "def drag_and_drop_attempt():\n\n class InitialState(BaseState):\n \"\"\"\n Initial state for the SimpleGUI.\n \"\"\"\n\n def _on_enter(self, gui):\n \"\"\"\n Construct the buttons upon entering the state.\n\n :return:\n \"\"\"\n print(\"In initial state.\")\n\n '''Create drag and drop window'''\n gui.entry_sv = tk.StringVar()\n gui.drop_box_list = []\n gui.drop_box_items = tk.Listbox(master=gui.root, listvariable=gui.drop_box_list)\n gui.drop_box_text = tk.StringVar()\n gui.drop_box_text.set(\"Drop images here\")\n gui.entry = tk.Entry(gui.root, textvar=gui.drop_box_text, justify='center')\n gui.entry.config(font=(\"Courier\", 44))\n gui.entry.place(x = 200, y=200, width=800, height=800)\n #gui.entry.pack()\n gui.entry.drop_target_register(DND_FILES)\n gui.entry.dnd_bind('<<Drop>>', self.drop(gui))\n gui.update()\n\n def _on_exit(self, gui):\n \"\"\"\n Return the next state.\n\n :param gui:\n :return:\n \"\"\"\n gui.update()\n return WaitForDrop()\n\n def drop(self, gui):\n def _drop(event):\n files = root.tk.splitlist(event.data)\n gui.entry_sv.set(files)\n return _drop\n\n class WaitForDrop(BaseState):\n \"\"\"\n State for having buttons on.\n \"\"\"\n\n def _on_enter(self, gui):\n \"\"\"\n\n :param gui:\n :return:\n \"\"\"\n print(\"In wait for drop state.\")\n\n def _state_main(self, gui):\n \"\"\"\n The main code for the ButtonsOn state.\n\n :param gui:\n :return:\n \"\"\"\n gui.entry.wait_variable(gui.entry_sv)\n\n '''Clean string'''\n files = literal_eval(gui.entry_sv.get())\n\n '''Remove previous images'''\n if hasattr(gui, \"panel\"):\n gui.panel.destroy()\n\n '''Load each image'''\n for file_name in files:\n file_name = file_name.replace(\"{\", \"\").replace(\"}\", \"\")\n # image = tk.PhotoImage(file=file_name)\n if \".CR2\" in file_name:\n '''Rawpy implementation'''\n file_image = rawpy.imread(file_name)\n file_image = file_image.postprocess()\n '''Rawkit implementation'''\n '''file_image = Raw(file_name)\n file_image = np.array(file_image.to_buffer())'''\n '''OpenCV implementation'''\n '''file_image = cv2.imread(file_name)'''\n else:\n file_image = Image.open(file_name)\n '''image = file_image.resize((500, 500), Image.ANTIALIAS)\n image = ImageTk.PhotoImage(image)\n gui.panel = tk.Label(gui.root, image=image)\n gui.panel.image = image\n gui.panel.pack()'''\n # panel.grid(row=2)\n\n image_data = np.array(file_image)\n image_data = cv2.cvtColor(image_data, cv2.COLOR_RGB2GRAY)\n '''print(image_data.shape)\n print(image_data)\n print(len(image_data))\n print(len(image_data[0]))'''\n returned_image = Image.fromarray(image_data)\n '''cv2.imshow(\"Gray\", image_data)\n cv2.waitKey()\n cv2.destroyWindow(\"Gray\")'''\n\n '''enhanced_contrast = ImageEnhance.Contrast(Image.fromarray(file_image))\n enhanced_image = enhanced_contrast.enhance(255)\n enhanced_data = np.array(enhanced_image)\n plot_functions.imshow(enhanced_image)\n plot_functions.show()'''\n\n # color_space = cv2.cvtColor(image_data, cv2.COLOR_RGB2HSV)\n # print(color_space)\n \n '''Create mask for white-ish pixels'''\n '''lower_background = np.array([150, 150, 150])\n upper_background = np.array([255, 255, 255])\n print(image_data)\n white_mask = cv2.inRange(image_data, lower_background, upper_background)\n white_mask = cv2.morphologyEx(white_mask, cv2.MORPH_OPEN, np.ones((3,3),np.uint8))\n white_mask = cv2.morphologyEx(white_mask, cv2.MORPH_DILATE, np.ones((3, 3), np.uint8))\n white_mask = white_mask / 255'''\n\n '''Create mask for black-ish pixels'''\n '''lower_background = np.array([0, 0, 0])\n upper_background = np.array([25, 25, 25])\n black_mask = cv2.inRange(image_data, lower_background, upper_background)\n black_mask = cv2.morphologyEx(black_mask, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8))\n black_mask = cv2.morphologyEx(black_mask, cv2.MORPH_DILATE, np.ones((3, 3), np.uint8))\n black_mask = black_mask / 255'''\n\n '''Add masks together'''\n '''background_mask = white_mask\n # Ensure no value is above 1\n background_mask = np.clip(background_mask, 0, 1)'''\n \n copied_image_data = np.asarray(returned_image).copy()\n # background_mask = np.logical_not(background_mask)\n '''for row_index, [mask_row, image_row] in enumerate(zip(background_mask, copied_image_data)):\n # place black pixel on corresponding masked pixels\n # copied_image_data[row_index] = np.array([image_row[pixel] * int(mask_row[pixel]) for pixel in range(len(mask_row))])\n # make pixel fully white on corresponding masked pixels\n copied_image_data[row_index] = np.array([np.array([255, 255, 255]) if int(mask_row[pixel]) else image_row[pixel] for pixel in range(len(mask_row))])'''\n\n '''Turn removed pixels red'''\n '''mask_image = Image.fromarray(copied_image_data)\n plot_functions.imshow(mask_image)\n plot_functions.show()'''\n trapezoid_data = copied_image_data.copy()\n\n enhanced_contrast = ImageEnhance.Contrast(Image.fromarray(trapezoid_data))\n enhanced_image = enhanced_contrast.enhance(255)\n trapezoid_data = np.array(enhanced_image)\n\n '''Detect lines'''\n edges = cv2.Canny(trapezoid_data, 75, 150)\n lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 100, maxLineGap=1000)\n # print(lines)\n for line in lines:\n x1, y1, x2, y2 = line[0]\n if y1 == y2:\n cv2.line(copied_image_data, (x1, y1), (x2, y2), (255, 255, 255), 1)\n\n '''Trapezoid attempt'''\n\n # filters image bilaterally and displays it\n bilatImg = cv2.bilateralFilter(trapezoid_data, 5, 175, 175)\n\n # finds edges of bilaterally filtered image and displays it\n edgeImg = cv2.Canny(bilatImg, 75, 200)\n\n # gets contours (outlines) for shapes and sorts from largest area to smallest area\n contours, hierarchy = cv2.findContours(edgeImg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours, key=cv2.contourArea, reverse=True)\n\n # drawing red contours on the image\n for con in contours:\n cv2.drawContours(trapezoid_data, con, -1, (255, 255, 255), 3)\n\n '''Detect corners'''\n dst = cv2.cornerHarris(edges, 30, 31, 0.001)\n dst = cv2.dilate(dst, None)\n ret, dst = cv2.threshold(dst, 0.01 * dst.max(), 255, 0)\n dst = np.uint8(dst)\n\n # find centroids\n ret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst)\n # define the criteria to stop and refine the corners\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100,\n 0.001)\n corners = cv2.cornerSubPix(edges, np.float32(centroids), (5, 5),\n (-1, -1), criteria)\n\n good_corners = []\n for corner in corners:\n if (corner[1] < 1000) & (corner[1] > 650) & (corner[0] > 250) & (corner[0] < 2250):\n good_corners.append(corner)\n cv2.circle(edges, (corner[0], corner[1]), 10, (255, 255, 255))\n\n print(good_corners)\n if len(good_corners) >= 3:\n corner_combos = itertools.combinations(good_corners, 3)\n elif len(good_corners) > 1:\n corner_combos = itertools.combinations(good_corners, 2)\n\n best_corner_combo = None\n best_coef = np.inf\n for corner_combo in corner_combos:\n regression = LinearRegression().fit(np.array([corner[0] for corner in corner_combo]).reshape(-1, 1),\n np.array([corner[1] for corner in corner_combo]))\n if np.abs(regression.coef_) < best_coef:\n best_coef = np.abs(regression.coef_)\n best_corner_combo = np.array([corner[1] for corner in corner_combo])\n\n y_edge = int(round(np.mean(best_corner_combo)))\n edges = edges[y_edge:3000, 200:2200]\n copied_image_data = copied_image_data[y_edge:2500, 200:2200]\n trapezoid_data = trapezoid_data[y_edge:2500, 200:2200]\n\n # and double-checking the outcome\n cv2.imshow(\"linesEdges\", edges)\n cv2.imshow(\"linesDetected\", copied_image_data)\n cv2.imshow(\"Contours check\", trapezoid_data)\n cv2.waitKey()\n cv2.destroyWindow(\"Contours check\")\n\n # find the perimeter of the first closed contour\n perim = cv2.arcLength(contours[0], True)\n # setting the precision\n epsilon = 0.02 * perim\n # approximating the contour with a polygon\n approxCorners = cv2.approxPolyDP(contours[0], epsilon, True)\n # check how many vertices has the approximate polygon\n approxCornersNumber = len(approxCorners)\n\n for corners in approxCorners:\n cv2.circle(trapezoid_data, (corners[0], corners[1]), radius=10, color=(255, 255, 255), thickness=-1)\n cv2.imshow(\"Vertex position\", trapezoid_data)\n cv2.waitKey()\n cv2.destroyWindow(\"Vertex position\")\n cv2.imshow(\"linesEdges\", edges)\n cv2.imshow(\"linesDetected\", copied_image_data)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n def _on_exit(self, gui):\n if gui.program_running:\n gui.update()\n return WaitForDrop()\n else:\n return None\n\n class DragAndDropGUI:\n \"\"\"\n Object for a simple gui.\n \"\"\"\n\n def __init__(self, root):\n \"\"\"\n Initializing the SimpleGUI object.\n \"\"\"\n self.root = root\n w, h = root.winfo_screenwidth(), self.root.winfo_screenheight()\n self.root.geometry(\"%dx%d+0+0\" % (w, h))\n self.root.protocol(\"WM_DELETE_WINDOW\", self.end_program)\n self.program_running = True\n\n def update(self):\n \"\"\"\n Update the GUI.\n\n :return:\n \"\"\"\n self.root.update_idletasks()\n self.root.update()\n return self.root\n\n def end_program(self):\n \"\"\"\n Ends the program.\n\n :return:\n \"\"\"\n if self.entry_sv.get() != \" \":\n self.entry_sv.set(\" \")\n else:\n self.entry_sv.set(\"!\")\n self.root.destroy()\n self.program_running = False\n\n '''Initialize and run GUI object'''\n root = tkinterdnd2.Tk()\n # Maximize window while maintaining title bar\n gui = DragAndDropGUI(root)\n state_machine = StateMachine(initial_state=InitialState())\n state_machine.run(gui)", "def dropEvent(self, e: QtGui.QDropEvent):\n src = e.source()\n if src is not self:\n for item in src.selectedItems():\n clone = item.clone()\n clone.setFlags(clone.flags() | Qt.ItemIsEditable)\n self.addTopLevelItem(clone)\n super().dropEvent(e) # Call the original function", "def drop(self, event):\n self.config(cursor='arrow')", "def player_drop(self, item):\n dropped = self.drop(item)\n if dropped:\n self.handler.message_box.add_msg(\"You drop the {}!\".format(dropped),\n data.COLOURS['player_item_text'])", "def mouseReleaseEvent (self, event):\n if self.itemMoved:\n self.parentWidget.DataChanged.emit()\n self.itemMoved = False; \n super(DiagramItem, self).mouseReleaseEvent(event)", "def mouseMoveEvent(self, e):\n if e.buttons() != Qt.LeftButton:\n return\n\n mimeData = QtCore.QMimeData()\n mimeData.setData(\n app.NODE_MIMETYPE,\n QtCore.QByteArray(bytes('data string', 'utf-8')),\n )\n\n drag = QtGui.QDrag(self)\n drag.setMimeData(mimeData)\n drag.setHotSpot(e.pos() - self.rect().topLeft())\n \n dropAction = drag.exec_(Qt.MoveAction)", "def drop(self):\n init_x = self.x\n init_y = self.y\n init_z = self.z\n drop_z = self.drop_height\n \n #drop to given height\n self.move_to(init_x, init_y, drop_z)\n \n #open gripper\n self.gripper.command_position(100)\n \n #return to initial position\n self.move_to(init_x, init_y, init_z)", "def dragEnterEvent(self, event):", "def drag_and_drop(self, droppable):\n self.scroll_to()\n ActionChains(self.driver).drag_and_drop(\n self._element,\n droppable._element,\n ).perform()", "def slider_dragged(self):\n pass", "def SetCallbackFunc( self, dropCallbacFunc=None ) :\n \n # Create a dropFiles event association for this control.\n # [ SetDropTarget ] is a built-in method for (all ?) controls.\n self.folderDropTarget.SetDropTarget( ddt.FilesDropTarget( self.folderDropTarget ) )\n \n # Install the callback-function for this class's parent-widget dropFiles-event.\n self.folderDropTarget.dropFunc = dropCallbacFunc", "def drag_data_received(self, widget, context, x, y, sel_data, info, time):\n if not sel_data:\n return\n #modern file managers provide URI_LIST. For Windows split sel_data.data\n files = sel_data.get_uris()\n for file in files:\n if win():\n clean_string = conv_to_unicode(\n file.replace('\\0',' ').replace(\"\\r\", \" \").strip(),\n None)\n else:\n clean_string = file\n protocol, site, mfile, j, k, l = urlparse(clean_string)\n if protocol == \"file\":\n name = url2pathname(mfile)\n mime = get_type(name)\n if not is_valid_type(mime):\n return\n photo = MediaObject()\n self.uistate.set_busy_cursor(True)\n photo.set_checksum(create_checksum(name))\n self.uistate.set_busy_cursor(False)\n base_dir = cuni(media_path(self.dbstate.db))\n if os.path.exists(base_dir):\n name = relative_path(name, base_dir)\n photo.set_path(name)\n photo.set_mime_type(mime)\n basename = os.path.basename(name)\n (root, ext) = os.path.splitext(basename)\n photo.set_description(root)\n with DbTxn(_(\"Drag Media Object\"), self.dbstate.db) as trans:\n self.dbstate.db.add_object(photo, trans)\n widget.emit_stop_by_name('drag_data_received')", "def dragMoveEvent(self, event):\n if event.mimeData().hasImage:\n event.accept()\n else:\n event.ignore()", "def drag_and_drop(self,param,ignore_error_handle = False):\n message = {}\n origin_element = param.get('origin',None);\n destination_element = param.get('destination',None);\n step = 'drag a element to another element'\n try:\n self.driver.drag_and_drop(origin_element,destination_element);\n message = self.feedback.feedback_action_ok(step);\n except BaseException,e:\n message = self.feedback.feedback_action_fail(step,str(e),ignore_error_handle);\n finally:\n return message;", "def button_release_cb(self, darea, event):\n self.oldx, self.oldy = event.x, event.y\n self.draw_pointer(self.cr, None, None)\n self.queue_draw()\n self.oldx, self.oldy = None, None\n self.emit('end-dnd')\n return True", "def _fire_dropping(self):\n\t\tif len(self.droppings) < self.settings.droppings_allowed:\n\t\t\tnew_dropping = Dropping(self)\n\t\t\tself.droppings.add(new_dropping)", "def button_press_cb(self, darea, event):\n x, y = event.x, event.y\n self.draw_pointer(self.cr, x, y)\n self.queue_draw()\n self.oldx, self.oldy = x, y\n rel_x, rel_y = self.absolute_to_relative(x, y)\n self.emit('dnd-value', rel_x, rel_y)\n self.emit('start-dnd')\n return True", "def mouseMoveEvent(self, e):\n if self.mousePressed:\n Globals.dragObject = QTextDrag('PKSampler: dragging a track', self)\n Globals.dragObject.trackFrame = self\n Globals.dragObject.dragCopy()", "def mouseDragged(self, point, delta):\n pass", "def _dropMove(self, point: QPoint, selectedFiles: List[QModelIndex]) -> None:\n selectedFiles = [self._currPath.joinpath(x.data()) for i, x in enumerate(selectedFiles)\n if i % len(self._modelHeaders) == 0]\n try:\n filename = self._mainFileView.indexAt(point).siblingAtColumn(0).data()\n dest = self._currPath.joinpath(filename)\n if dest.is_file():\n return\n duplicates = []\n for src in selectedFiles:\n dest = self._currPath.joinpath(filename).joinpath(src.name)\n if str(src) in str(dest):\n return\n if dest.exists():\n duplicates.append(dest)\n if duplicates:\n if self._overwriteFileMsgBox(duplicates) == QMessageBox.Cancel:\n return\n for src in selectedFiles:\n dest = self._currPath.joinpath(filename).joinpath(src.name)\n if not src.exists():\n raise FileNotFoundError\n if src.is_file():\n shutil.move(str(src), str(dest))\n elif src.is_dir():\n dir_util.copy_tree(str(src), str(dest))\n shutil.rmtree(src)\n except FileNotFoundError:\n self._statusBar.showMessage('File not found!', 3000)\n except TypeError: # when the files are dropped on empty area\n pass\n finally:\n self._listDirectories()", "def do_drop(self, arg):\r\n\r\n # put this value in a more suitably named variable\r\n itemToDrop = arg.lower()\r\n\r\n # get a list of all \"description words\" for each item in the inventory\r\n invDescWords = getAllDescWords(inventory)\r\n\r\n # find out if the player doesn't have that item\r\n if itemToDrop not in invDescWords:\r\n print('You do not have \"%s\" in your inventory.' % (itemToDrop))\r\n return\r\n\r\n # get the item name that the player's command describes\r\n item = getFirstItemMatchingDesc(itemToDrop, inventory)\r\n if item != None:\r\n print('You drop %s.' % (worldItems[item][SHORTDESC]))\r\n inventory.remove(item) # remove from inventory\r\n worldRooms[location][GROUND].append(item) # add to the ground\r", "def OnTabEndDrag(self, event):\r\n\r\n tabs = event.GetEventObject()\r\n if not tabs.GetEnabled(event.GetSelection()):\r\n return\r\n\r\n self._mgr.HideHint()\r\n\r\n src_tabs = event.GetEventObject()\r\n if not src_tabs:\r\n raise Exception(\"no source object?\")\r\n\r\n src_tabs.SetCursor(wx.StockCursor(wx.CURSOR_ARROW))\r\n\r\n # get the mouse position, which will be used to determine the drop point\r\n mouse_screen_pt = wx.GetMousePosition()\r\n mouse_client_pt = self.ScreenToClient(mouse_screen_pt)\r\n\r\n # check for an external move\r\n if self._agwFlags & AUI_NB_TAB_EXTERNAL_MOVE:\r\n tab_ctrl = wx.FindWindowAtPoint(mouse_screen_pt)\r\n\r\n while tab_ctrl:\r\n \r\n if isinstance(tab_ctrl, AuiTabCtrl):\r\n break\r\n \r\n tab_ctrl = tab_ctrl.GetParent()\r\n \r\n if tab_ctrl:\r\n \r\n nb = tab_ctrl.GetParent()\r\n\r\n if nb != self:\r\n \r\n # find out from the destination control\r\n # if it's ok to drop this tab here\r\n e = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_ALLOW_DND, self.GetId())\r\n e.SetSelection(event.GetSelection())\r\n e.SetOldSelection(event.GetSelection())\r\n e.SetEventObject(self)\r\n e.SetDragSource(self)\r\n e.Veto() # dropping must be explicitly approved by control owner\r\n\r\n nb.GetEventHandler().ProcessEvent(e)\r\n\r\n if not e.IsAllowed():\r\n \r\n # no answer or negative answer\r\n self._mgr.HideHint()\r\n return\r\n \r\n # drop was allowed\r\n src_idx = event.GetSelection()\r\n src_page = src_tabs.GetWindowFromIdx(src_idx)\r\n\r\n # Check that it's not an impossible parent relationship\r\n p = nb\r\n while p and not p.IsTopLevel():\r\n if p == src_page:\r\n return\r\n \r\n p = p.GetParent()\r\n\r\n # get main index of the page\r\n main_idx = self._tabs.GetIdxFromWindow(src_page)\r\n if main_idx == wx.NOT_FOUND:\r\n raise Exception(\"no source page?\")\r\n\r\n # make a copy of the page info\r\n page_info = self._tabs.GetPage(main_idx)\r\n\r\n # remove the page from the source notebook\r\n self.RemovePage(main_idx)\r\n\r\n # reparent the page\r\n src_page.Reparent(nb)\r\n\r\n # Reparent the control in a tab (if any)\r\n if page_info.control:\r\n self.ReparentControl(page_info.control, tab_ctrl)\r\n\r\n # find out the insert idx\r\n dest_tabs = tab_ctrl\r\n pt = dest_tabs.ScreenToClient(mouse_screen_pt)\r\n\r\n target = dest_tabs.TabHitTest(pt.x, pt.y)\r\n insert_idx = -1\r\n if target:\r\n insert_idx = dest_tabs.GetIdxFromWindow(target)\r\n\r\n # add the page to the new notebook\r\n if insert_idx == -1:\r\n insert_idx = dest_tabs.GetPageCount()\r\n \r\n dest_tabs.InsertPage(page_info.window, page_info, insert_idx)\r\n nb._tabs.AddPage(page_info.window, page_info)\r\n\r\n nb.DoSizing()\r\n dest_tabs.DoShowHide()\r\n dest_tabs.Refresh()\r\n\r\n # set the selection in the destination tab control\r\n nb.SetSelectionToPage(page_info)\r\n\r\n # notify owner that the tab has been dragged\r\n e2 = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_DRAG_DONE, self.GetId())\r\n e2.SetSelection(event.GetSelection())\r\n e2.SetOldSelection(event.GetSelection())\r\n e2.SetEventObject(self)\r\n self.GetEventHandler().ProcessEvent(e2)\r\n\r\n return\r\n\r\n if self._agwFlags & AUI_NB_TAB_FLOAT:\r\n self._mgr.HideHint() \r\n if self.IsMouseWellOutsideWindow(): \r\n # Use CallAfter so we our superclass can deal with the event first\r\n wx.CallAfter(self.FloatPage, self.GetSelection())\r\n event.Skip()\r\n return\r\n \r\n # only perform a tab split if it's allowed\r\n dest_tabs = None\r\n\r\n if self._agwFlags & AUI_NB_TAB_SPLIT and self._tabs.GetPageCount() >= 2:\r\n \r\n # If the pointer is in an existing tab frame, do a tab insert\r\n hit_wnd = wx.FindWindowAtPoint(mouse_screen_pt)\r\n tab_frame = self.GetTabFrameFromTabCtrl(hit_wnd)\r\n insert_idx = -1\r\n \r\n if tab_frame:\r\n \r\n dest_tabs = tab_frame._tabs\r\n\r\n if dest_tabs == src_tabs:\r\n return\r\n\r\n pt = dest_tabs.ScreenToClient(mouse_screen_pt)\r\n target = dest_tabs.TabHitTest(pt.x, pt.y)\r\n \r\n if target: \r\n insert_idx = dest_tabs.GetIdxFromWindow(target)\r\n \r\n else:\r\n \r\n zero = wx.Point(0, 0)\r\n rect = self._mgr.CalculateHintRect(self._dummy_wnd, mouse_client_pt, zero)\r\n \r\n if rect.IsEmpty():\r\n # there is no suitable drop location here, exit out\r\n return\r\n \r\n # If there is no tabframe at all, create one\r\n new_tabs = TabFrame(self)\r\n new_tabs._rect = wx.RectPS(wx.Point(0, 0), self.CalculateNewSplitSize())\r\n new_tabs.SetTabCtrlHeight(self._tab_ctrl_height)\r\n self._tab_id_counter += 1\r\n new_tabs._tabs = AuiTabCtrl(self, self._tab_id_counter)\r\n new_tabs._tabs.SetArtProvider(self._tabs.GetArtProvider().Clone())\r\n new_tabs._tabs.SetAGWFlags(self._agwFlags)\r\n\r\n self._mgr.AddPane(new_tabs, framemanager.AuiPaneInfo().Bottom().CaptionVisible(False), mouse_client_pt)\r\n self._mgr.Update()\r\n dest_tabs = new_tabs._tabs\r\n \r\n # remove the page from the source tabs\r\n page_info = src_tabs.GetPage(event.GetSelection())\r\n\r\n if page_info.control:\r\n self.ReparentControl(page_info.control, dest_tabs)\r\n\r\n page_info.active = False\r\n src_tabs.RemovePage(page_info.window)\r\n\r\n if src_tabs.GetPageCount() > 0: \r\n src_tabs.SetActivePage(0)\r\n src_tabs.DoShowHide()\r\n src_tabs.Refresh()\r\n\r\n # add the page to the destination tabs\r\n if insert_idx == -1:\r\n insert_idx = dest_tabs.GetPageCount()\r\n\r\n dest_tabs.InsertPage(page_info.window, page_info, insert_idx)\r\n \r\n if src_tabs.GetPageCount() == 0:\r\n self.RemoveEmptyTabFrames()\r\n\r\n self.DoSizing()\r\n dest_tabs.DoShowHide()\r\n dest_tabs.Refresh()\r\n\r\n # force the set selection function reset the selection\r\n self._curpage = -1\r\n\r\n # set the active page to the one we just split off\r\n self.SetSelectionToPage(page_info)\r\n\r\n self.UpdateHintWindowSize()\r\n \r\n # notify owner that the tab has been dragged\r\n e = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_DRAG_DONE, self.GetId())\r\n e.SetSelection(event.GetSelection())\r\n e.SetOldSelection(event.GetSelection())\r\n e.SetEventObject(self)\r\n self.GetEventHandler().ProcessEvent(e)", "def _on_enter(self, gui):\n print(\"In initial state.\")\n\n '''Create drag and drop window'''\n gui.entry_sv = tk.StringVar()\n gui.drop_box_list = []\n gui.drop_box_items = tk.Listbox(master=gui.root, listvariable=gui.drop_box_list)\n gui.drop_box_text = tk.StringVar()\n gui.drop_box_text.set(\"Drop images here\")\n gui.entry = tk.Entry(gui.root, textvar=gui.drop_box_text, justify='center')\n gui.entry.config(font=(\"Courier\", 44))\n gui.entry.place(x = 200, y=200, width=800, height=800)\n #gui.entry.pack()\n gui.entry.drop_target_register(DND_FILES)\n gui.entry.dnd_bind('<<Drop>>', self.drop(gui))\n gui.update()", "def DoDropPane(self, panes, target, dock_direction, dock_layer, dock_row, dock_pos):\r\n \r\n drop = self.CopyTarget(target)\r\n panes = DoInsertPane(panes, dock_direction, dock_layer, dock_row, dock_pos)\r\n\r\n drop.Dock().Direction(dock_direction).Layer(dock_layer).Row(dock_row).Position(dock_pos)\r\n return self.ProcessDockResult(target, drop)", "def drag_and_drop(self, source_selector, target_selector):\n self._wait_element_localed(self.driver, source_selector)\n source = self.get_element(source_selector)\n self._wait_element_localed(self.driver, target_selector)\n target = self.get_element(target_selector)\n ActionChains(self.driver).drag_and_drop(source, target)", "def _ondrag(self, item, fun, num=1, add=None):\n if fun is None:\n self.cv.tag_unbind(item, \"<Button%s-Motion>\" % num)\n else:\n def eventfun(event):\n try:\n x, y = (self.cv.canvasx(event.x)/self.xscale,\n -self.cv.canvasy(event.y)/self.yscale)\n fun(x, y)\n except Exception:\n pass\n self.cv.tag_bind(item, \"<Button%s-Motion>\" % num, eventfun, add)", "def process_dropped_data(self, item, origin: str, path: Path):\n\n layer_type_origin_map = {\n PRESENTATION: LayerType.PRESENTATION,\n PRIMARY: LayerType.PRIMARY,\n SECONDARY: LayerType.SECONDARY\n }\n\n bounding_rect = item.boundingRect()\n\n pos = Position(item.x() * SVG_SCALE_FACTOR, item.y() * SVG_SCALE_FACTOR, item.zValue())\n\n size = Size(bounding_rect.width() * SVG_SCALE_FACTOR, bounding_rect.height() * SVG_SCALE_FACTOR)\n\n if origin == BACKGROUND:\n self.__template.set_background(str(path), size=size)\n else:\n try:\n layer = self.__template.add_layer(pos=pos, size=size, _type=layer_type_origin_map[origin])\n except NoBaseSvgError as err:\n self.removeItem(item)\n error_dialog = QErrorMessage(self.parent())\n error_dialog.showMessage(str(err))\n else:\n self.__template.map_layer_with_item(layer, graphic_item=item)", "def dragMoveEvent(self, event: QtGui.QDragMoveEvent) -> None:\n if event.mimeData().hasImage:\n event.accept()\n else:\n event.ignore()", "def OnTokenButtonRelease(self, event):\n self._drag_data = {\"x\": 0, \"item\": None}\n\n # Rebind the main GUI buttons because they are unbinded while dragging the beats\n self.myMainGUI.root.after(200, self.myMainGUI.bindButtons)", "def drop_inventory(self):\n header = \"Choose item to drop:\\n\"\n def drop(get_gameworld_cell, x, y, item):\n item_entity = ItemPickup([item], x, y, get_gameworld_cell)\n events.trigger_event(\"world_add_entity\", item_entity)\n self.inventory.remove(item)\n action_list = [(item, functools.partial(drop, get_gameworld_cell=self.get_gameworld_cell, x=self.x, y=self.y, item=item)) for item in self.inventory]\n if len(action_list) == 0:\n header += \"You hold nothing!\"\n events.trigger_event(\"print_list\", action_list, header=header)", "def OnLeftUp_DragFloatingPane(self, eventOrPt):\r\n\r\n if isinstance(eventOrPt, wx.Point):\r\n clientPt = self._frame.ScreenToClient(eventOrPt)\r\n screenPt = wx.Point(*eventOrPt)\r\n else:\r\n clientPt = eventOrPt.GetPosition()\r\n screenPt = self._frame.ClientToScreen(clientPt)\r\n\r\n # try to find the pane\r\n paneInfo = self.GetPane(self._action_window)\r\n if not paneInfo.IsOk():\r\n raise Exception(\"Pane window not found\")\r\n\r\n ret = False\r\n \r\n if paneInfo.frame:\r\n \r\n # calculate the offset from the upper left-hand corner\r\n # of the frame to the mouse pointer\r\n framePos = paneInfo.frame.GetPosition()\r\n action_offset = screenPt - framePos\r\n\r\n # is the pane dockable?\r\n if self.CanDockPanel(paneInfo):\r\n # do the drop calculation\r\n indx = self._panes.index(paneInfo)\r\n ret, paneInfo = self.DoDrop(self._docks, self._panes, paneInfo, clientPt, action_offset)\r\n\r\n if ret:\r\n e = self.FireEvent(wxEVT_AUI_PANE_DOCKING, paneInfo, canVeto=True)\r\n if e.GetVeto():\r\n self.HideHint()\r\n ShowDockingGuides(self._guides, False)\r\n return\r\n\r\n e = self.FireEvent(wxEVT_AUI_PANE_DOCKED, paneInfo, canVeto=False)\r\n\r\n if self._agwFlags & AUI_MGR_SMOOTH_DOCKING:\r\n self.SmoothDock(paneInfo)\r\n\r\n self._panes[indx] = paneInfo\r\n \r\n # if the pane is still floating, update it's floating\r\n # position (that we store)\r\n if paneInfo.IsFloating():\r\n paneInfo.floating_pos = paneInfo.frame.GetPosition()\r\n if paneInfo.frame._transparent != paneInfo.transparent or self._agwFlags & AUI_MGR_TRANSPARENT_DRAG:\r\n paneInfo.frame.SetTransparent(paneInfo.transparent)\r\n paneInfo.frame._transparent = paneInfo.transparent\r\n \r\n elif self._has_maximized:\r\n self.RestoreMaximizedPane()\r\n \r\n # reorder for dropping to a new notebook\r\n # (caution: this code breaks the reference!)\r\n tempPaneInfo = self.CopyTarget(paneInfo)\r\n self._panes.remove(paneInfo)\r\n self._panes.append(tempPaneInfo)\r\n\r\n if ret:\r\n self.Update()\r\n\r\n self.HideHint()\r\n ShowDockingGuides(self._guides, False)", "def _on_exit(self, gui):\n gui.update()\n return WaitForDrop()", "def _drop_tree_items(self, drop_items, drop_id_color):\n\n # Block the selection signals while we process the drop\n self.selectSignalBlocked = True\n\n # Get the drop id color parent - the aov id tree widget item\n drop_id_set = drop_id_color.parent()\n\n # Drop the items into the new parent\n for item in drop_items:\n if item.parent().parent().text(0) != drop_id_color.parent().text(0):\n drop_items.remove(item)\n else:\n item.parent().removeChild(item)\n\n drop_id_color.insertChildren(0, drop_items)\n\n # Set the items as selected\n for item in drop_items:\n item.setSelected(True)\n\n # Set new idColor - need to optimize!\n utils.set_attribute_id(item.data(1, QtCore.Qt.UserRole),\n drop_id_set.data(1, QtCore.Qt.UserRole),\n drop_id_color.data(1, QtCore.Qt.UserRole))\n\n # Set the new parent as expanded so we can see the dropped items\n drop_id_color.setExpanded(True)\n\n # Unblock the selection change signals\n self.selectSignalBlocked = False\n\n return None", "def handle_actor_drop(data: bytes) -> Tuple[bytes, str]:\n # TODO: reverse first 9 bytes\n item_id = struct.unpack('I', data[:4])[0]\n unknown = struct.unpack('I', data[4:8])[0] # noqa: F841\n unknown2 = data[9] # noqa: F841\n item_name_length = struct.unpack('H', data[9:11])[0]\n item_name = data[11:11+item_name_length].decode(helpers.ENCODING)\n x, y, z = struct.unpack('fff',\n data[11+item_name_length:11+item_name_length+3*4])\n\n message = f'[{item_id}] {item_name} dropped at: {x} {y} {z}'\n\n # Pick up drops automatically\n if \"Drop\" in item_name:\n message += f'\\n\\t;) Auto-looting {item_id}'\n packet = struct.pack('=HI', 0x6565, item_id)\n helpers.PACKET_QUEUE.put(packet)\n # TODO: not sure about last few bytes\n return data[11+item_name_length+3*4:], message", "def drop(self):\n pass", "def drop(self):\n pass", "def drop(self, item: Item):\n self.items.remove(item)\n item.place(self.parent.x, self.parent.y, self.gamemap)\n\n self.engine.message_log.add_message(f'You yeeted the {item.name}.')", "def toolDropped(*args, **kwargs)->None:\n pass", "def DoDropLayer(self, docks, target, dock_direction):\r\n\r\n drop = self.CopyTarget(target)\r\n \r\n if dock_direction == AUI_DOCK_LEFT:\r\n drop.Dock().Left()\r\n drop_new_layer = max(max(GetMaxLayer(docks, AUI_DOCK_LEFT),\r\n GetMaxLayer(docks, AUI_DOCK_BOTTOM)),\r\n GetMaxLayer(docks, AUI_DOCK_TOP)) + 1\r\n\r\n elif dock_direction == AUI_DOCK_TOP:\r\n drop.Dock().Top()\r\n drop_new_layer = max(max(GetMaxLayer(docks, AUI_DOCK_TOP),\r\n GetMaxLayer(docks, AUI_DOCK_LEFT)),\r\n GetMaxLayer(docks, AUI_DOCK_RIGHT)) + 1\r\n\r\n elif dock_direction == AUI_DOCK_RIGHT:\r\n drop.Dock().Right()\r\n drop_new_layer = max(max(GetMaxLayer(docks, AUI_DOCK_RIGHT),\r\n GetMaxLayer(docks, AUI_DOCK_TOP)),\r\n GetMaxLayer(docks, AUI_DOCK_BOTTOM)) + 1\r\n\r\n elif dock_direction == AUI_DOCK_BOTTOM:\r\n drop.Dock().Bottom()\r\n drop_new_layer = max(max(GetMaxLayer(docks, AUI_DOCK_BOTTOM),\r\n GetMaxLayer(docks, AUI_DOCK_LEFT)),\r\n GetMaxLayer(docks, AUI_DOCK_RIGHT)) + 1\r\n\r\n else:\r\n return False, target\r\n \r\n\r\n drop.Dock().Layer(drop_new_layer)\r\n return self.ProcessDockResult(target, drop)", "def OnCaptureLost(self, event):\r\n\r\n if self._is_dragging:\r\n self._is_dragging = False\r\n self._on_button = False\r\n\r\n if self._drag_image:\r\n self._drag_image.EndDrag()\r\n del self._drag_image\r\n self._drag_image = None\r\n \r\n event = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_CANCEL_DRAG, self.GetId())\r\n event.SetSelection(self.GetIdxFromWindow(self._click_tab))\r\n event.SetOldSelection(event.GetSelection())\r\n event.SetEventObject(self)\r\n self.GetEventHandler().ProcessEvent(event)", "def process_IN_UNMOUNT(self, event):", "def _on_articles_dropped_to_trash(self, evt):\n \n # get articles\n articles = [core.Article(dbid=i) for i in evt.articles_dbids]\n \n # update library\n self._library.trash(articles, True)\n \n # refresh collections view\n self._collections_view.UpdateCounts()\n \n # refresh articles view\n self._articles_view.ShowArticles()", "def drop(self, oldpos, fieldgroup, currentplayer):\n\t\ttry:\n\t\t\tdropped_on = pygame.sprite.spritecollide(self,\n\t\t\t\t\t\t\t\t\t fieldgroup,\n\t\t\t\t\t\t\t\t\t False)[0]\n\t\t\tif dropped_on.type == \"home\":\n\t\t\t\tcurrentplayer[\"meeples_home\"] += 1\n\t\t\telif dropped_on.id == currentplayer.name*10:\n\t\t\t\tcurrentplayer.meeples_out -= 1\n\t\t\tself.grabbed = False\n\n\t\t\treturn True\n\t\texcept:\n\t\t\tself.rect = oldpos\n\t\t\tself.grabbed = False\n\t\t\treturn False", "def dnd_end(self, target, event):\n\n self._dnd_started = False\n\n if self._dnd_source and self.rgb:\n # Re-bind events that are dropped by dnd.py\n self._canvas.bind('<B1-Motion>', self._start_dnd)\n self._canvas.bind('<B1-ButtonRelease-1>', self._update_color)", "def OnMoveFinished(self):\r\n\r\n # notify the owner manager that the pane has finished moving\r\n if self._owner_mgr:\r\n self._owner_mgr._action_window = self._pane_window\r\n point = wx.GetMousePosition()\r\n if self._is_toolbar:\r\n self._owner_mgr.OnLeftUp_DragToolbarPane(point)\r\n else:\r\n self._owner_mgr.OnLeftUp_DragFloatingPane(point)\r\n\r\n self._owner_mgr.OnFloatingPaneMoved(self._pane_window, point)", "def __init__(self, obj):\n \n wx.TextDropTarget.__init__(self) # Initialize the wx.TextDropTarget Object\n self.obj = obj# Store the Object Reference for dropped text", "def mouseMoveEvent (self, event):\n self.itemMoved = True\n super(DiagramItem, self).mouseMoveEvent(event)", "def drop(self):\n Game.instance.area_map.entities.append(self.owner)\n Game.instance.inventory.remove(self.owner)\n self.owner.x = Game.instance.player.x\n self.owner.y = Game.instance.player.y\n message('You dropped a ' + self.owner.name + '.', palette.yellow)", "def drop_field(self, field):\r\n self.dropped_fields.add(field)", "def mounted(self):\n self.vue.dragAndDropCapable = self.determineDragAndDropCapable();\n \"\"\"\n If drag and drop capable, then we continue to bind events to our elements.\n \"\"\"\n if( self.vue.dragAndDropCapable ):\n \"\"\"\n Listen to all of the drag events and bind an event listener to each\n for the fileform.\n \"\"\"\n \n def for_events(evt):\n def add_event(e):\n e.preventDefault();\n e.stopPropagation();\n add_event.bind(self.vue)\n \"\"\"\n For each event add an event listener that prevents the default action\n (opening the file in the browser) and stop the propagation of the event (so\n no other elements open the file in the browser)\n \"\"\"\n self.vue[\"$refs\"].fileform.addEventListener(evt,add_event,False)\n for_events.bind(self.vue)\n ['drag', 'dragstart', 'dragend', 'dragover', 'dragenter', 'dragleave', 'drop'].forEach(for_events);\n \"\"\"\n Add an event listener for drop to the form\n \"\"\"\n def capture_files(e):\n \"\"\"\n Capture the files from the drop event and add them to our local files\n array.\n \"\"\"\n i = 0\n \n while i < e.dataTransfer.files.length:\n self.vue.files.push( e.dataTransfer.files[i] );\n i+=1\n self.getImagePreviews()\n self.submitFiles()\n capture_files.bind(this)\n self.vue[\"$refs\"].fileform.addEventListener('drop', capture_files);\n __pragma__ ('jsiter') \n fetch('/json/media/',\n {\n \"method\":\"GET\",\n })\\\n .then(lambda res:res.json())\\\n .then(self.drawImages)\\\n .catch(lambda e:console.log('FAILURE!!',e));\n __pragma__ ('nojsiter')", "def _on_motion(self, event):\n if not self._visual_drag.winfo_ismapped():\n return\n\n if self._drag_cols and self._dragged_col is not None:\n self._drag_col(event)\n elif self._drag_rows and self._dragged_row is not None:\n self._drag_row(event)", "def _handle_player_collide_item(self, player: Player, dropped_item: DroppedItem,\n data, arbiter: pymunk.Arbiter) -> bool:\n\n dropped_item.collect(self._player)\n self._world.remove_item(dropped_item)\n return False", "def dragEnterEvent(self, e):\n # TODO: Do it properly.\n # TODO: Redraw widget while dragging.\n e.accept()", "def OnMotion(self, event):\r\n \r\n # start a drag event\r\n if not self._dragging and self._action_item != None and self._action_pos != wx.Point(-1, -1) and \\\r\n abs(event.m_x - self._action_pos.x) + abs(event.m_y - self._action_pos.y) > 5:\r\n \r\n self.SetToolTipString(\"\")\r\n self._dragging = True\r\n\r\n e = AuiToolBarEvent(wxEVT_COMMAND_AUITOOLBAR_BEGIN_DRAG, self.GetId())\r\n e.SetEventObject(self)\r\n e.SetToolId(self._action_item.id)\r\n self.ProcessEvent(e)\r\n self.DoIdleUpdate()\r\n return\r\n \r\n hit_item = self.FindToolForPosition(*event.GetPosition())\r\n \r\n if hit_item: \r\n if not hit_item.state & AUI_BUTTON_STATE_DISABLED:\r\n self.SetHoverItem(hit_item)\r\n else:\r\n self.SetHoverItem(None)\r\n \r\n else: \r\n # no hit item, remove any hit item\r\n self.SetHoverItem(hit_item)\r\n \r\n # figure out tooltips\r\n packing_hit_item = self.FindToolForPositionWithPacking(*event.GetPosition())\r\n \r\n if packing_hit_item:\r\n \r\n if packing_hit_item != self._tip_item:\r\n self._tip_item = packing_hit_item\r\n\r\n if packing_hit_item.short_help != \"\":\r\n self.StartPreviewTimer()\r\n self.SetToolTipString(packing_hit_item.short_help)\r\n else:\r\n self.SetToolTipString(\"\")\r\n self.StopPreviewTimer()\r\n \r\n else:\r\n \r\n self.SetToolTipString(\"\")\r\n self._tip_item = None\r\n self.StopPreviewTimer()\r\n \r\n # if we've pressed down an item and we're hovering\r\n # over it, make sure it's state is set to pressed\r\n if self._action_item:\r\n \r\n if self._action_item == hit_item:\r\n self.SetPressedItem(self._action_item)\r\n else:\r\n self.SetPressedItem(None)\r\n \r\n # figure out the dropdown button state (are we hovering or pressing it?)\r\n self.RefreshOverflowState()", "def panelDropLoad( dragControl, dropControl, messages, x, y, dragType ):\n # Get name of the asset that was dragged\n loadedObject = cmds.iconTextCheckBox(dragControl, query=True, label=True)\n # Get mode from standin or assembly\n selectedRadio = cmds.radioCollection(loadMethodRadio, query=True, select=True)\n # Instantiate AssetIcon\n asset = AssetIcon(loadedObject)\n \n loadedAssetNode = None\n\n # Load asset using correct function\n if \"standin\" in selectedRadio:\n loadedAssetNode = asset.loadArnoldAsset()\n else: \n loadedAssetNode = asset.loadAsset()\n \n # Get a position in the world using the mouse pointer as reference\n loadedLocation = cmds.autoPlace(useMouse=True)\n # Move the asset to that position\n cmds.move(loadedLocation[0], loadedLocation[1], loadedLocation[2], loadedAssetNode, absolute=True)", "def is_droppable ( self, object, row, value ):\n return self.droppable", "def execute_drop(item_id):\r\n if (item_id in inventory):\r\n current_room[\"items\"][item_id] = inventory[item_id]\r\n del inventory[item_id]\r\n wrap_print(\"You dropped \" + items[item_id][\"name\"] + \".\")\r\n global valid_move\r\n valid_move = True\r\n else:\r\n wrap_print(\"You cannot drop that.\")", "def dropObject(player):\n for treasure in Treasure.List:\n if player.treasureCaptured:\n player.treasureCaptured = False\n treasure.x = player.x\n treasure.y = player.y\n treasure.img = pygame.image.load(Treasure.treasure_img[0])", "def on_moved(self, event):\n print(\"Moved\")\n time.sleep(5)\n self.moveFile(event.dest_path)", "def OnLeftUp(self, event): # ANDY PAN\n if event.ShiftDown():\n event.Skip()\n return\n self.last_drag_x = self.last_drag_y = None\n self.SetCursor(wx.Cursor(wx.CURSOR_DEFAULT))\n # turn off drag\n self.was_dragging = False\n # force PAINT event to remove selection box (if required)\n # self.Update()\n event.Skip()", "def quantity_dropped(self, quantity_dropped):\n\n self._quantity_dropped = quantity_dropped", "def drag_and_drop_by_offset(self, elem, x, y):\n ActionChains(self.driver).drag_and_drop_by_offset(elem, xoffset=x, yoffset=y).perform()", "def OnTokenButtonPress(self, event):\n\n # Unbind main GUI buttons\n self.myMainGUI.unbindButtons()\n\n # Store drag data\n self._drag_data = {\"x\": event.x, \"item\": self.canvas_SG[0].find_closest(self.canvas_SG[0].canvasx(event.x),\n self.canvas_SG[0].canvasy(event.y))[0]}", "def drag(self, source_index, target_index):\r\n draggables = self.q(css='.drag-handle')\r\n source = draggables[source_index]\r\n target = draggables[target_index]\r\n action = ActionChains(self.browser)\r\n # When dragging before the target element, must take into account that the placeholder\r\n # will appear in the place where the target used to be.\r\n placeholder_height = 40\r\n action.click_and_hold(source).move_to_element_with_offset(\r\n target, 0, placeholder_height\r\n ).release().perform()\r\n wait_for_notification(self)", "def _onmove(self, event):", "def DoDropRow(self, panes, target, dock_direction, dock_layer, dock_row):\r\n \r\n drop = self.CopyTarget(target)\r\n panes = DoInsertDockRow(panes, dock_direction, dock_layer, dock_row)\r\n\r\n drop.Dock().Direction(dock_direction).Layer(dock_layer).Row(dock_row).Position(0)\r\n return self.ProcessDockResult(target, drop)", "def _drag_col(self, event):\n x = self._dx + event.x # get dragged column new left x coordinate\n self._visual_drag.place_configure(x=x) # update column preview position\n # if one border of the dragged column is beyon the middle of the\n # neighboring column, swap them\n if (self._dragged_col_neighbor_widths[0] is not None and\n x < self._dragged_col_x - self._dragged_col_neighbor_widths[0] / 2):\n self._swap_columns('left')\n elif (self._dragged_col_neighbor_widths[1] is not None and\n x > self._dragged_col_x + self._dragged_col_neighbor_widths[1] / 2):\n self._swap_columns('right')\n # horizontal scrolling if the cursor reaches the side of the table\n if x < 0 and self.xview()[0] > 0:\n # scroll left and update dragged column x coordinate\n self.xview_scroll(-10, 'units')\n self._dragged_col_x += 10\n elif x + self._dragged_col_width / 2 > self.winfo_width() and self.xview()[1] < 1:\n # scroll right and update dragged column x coordinate\n self.xview_scroll(10, 'units')\n self._dragged_col_x -= 10", "def acceptDrops(self) -> bool:\n ...", "def touch_moved(self, touch):\n\t\tpass", "def onLeftUp(self, event):\n\n self.last_drag_x = self.last_drag_y = None\n\n if self.ignore_next_up:\n self.ignore_next_up = False\n return\n\n self.SetCursor(wx.StockCursor(wx.CURSOR_DEFAULT))\n\n # we need a repaint to remove any selection box, but NOT YET!\n delayed_paint = self.sbox_1_x\n\n # if any layers interested, inform of possible select\n if not self.was_dragging:\n if self.is_box_select:\n self.is_box_select = False\n\n # box selection\n (lon_1, lat_1) = self.convertView2Geo(self.sbox_1_x,\n self.sbox_1_y)\n (lon_2, lat_2) = self.convertView2Geo(self.sbox_1_x+self.sbox_w,\n self.sbox_1_y+self.sbox_h)\n\n # check each layer for a box select callback\n copy_layers = copy.copy(self.layer_z_order)\n handled_layers = []\n for id in copy_layers:\n # if layer still exists and not already handled\n if id in self.layer_mapping and id not in handled_layers:\n l = self.layer_mapping[id]\n if l.visible and l.callback_box_select:\n # get all points selected (if any)\n points = self.getBoxSelectPoints(l.data,\n (lon_1,lat_1),\n (lon_2,lat_2))\n if points:\n # pass points to callback\n handled_layers.append(id)\n if l.callback_box_select(id, points):\n delayed_paint = True\n else:\n # possible point selection\n (cx, cy) = event.GetPositionTuple()\n clickpt = self.convertView2Geo(cx, cy)\n # check each layer for a point select callback\n copy_layers = copy.copy(self.layer_z_order)\n handled_layers = []\n for id in copy_layers:\n # if layer still exists and not already handled\n if id in self.layer_mapping and id not in handled_layers:\n l = self.layer_mapping[id]\n if l.visible and l.callback_point_select:\n pt = self.getNearestPointInLayer(l.data,\n l.delta, clickpt)\n if pt:\n handled_layers.append(id)\n if l.callback_point_select(id, pt):\n delayed_paint = True\n\n # turn off drag\n self.was_dragging = False\n\n # turn off box selection mechanism\n self.is_box_select = False\n self.sbox_1_x = self.sbox_1_y = None\n\n # force PAINT event to remove selection box (if required)\n if delayed_paint:\n self.Refresh()", "def __init__(self):\n \n __gsignals__ = { 'expose-event' : 'override'}\n\n self.filename = \"\"\n self.source_id = 0\n self.dy = 0\n\n # Create a top level window\n self.window = gtk.Window()\n \n #Get y position of mouse at start of drag \n self.mouse_click_point = 0\n \n #Create a TextArea class instance\n self.drawing = TextArea()\n\n self.drawing.show()\n \n self.window.connect('drag-begin', self.start_refresh)\n self.window.connect('drag-motion', self.drag_motion)\n self.window.connect('drag-end', self.stop_drag_motion)\n \n self.window.drag_source_set(gtk.gdk.BUTTON1_MASK,\n [(\"\", gtk.TARGET_SAME_APP, 1)],\n gtk.gdk.ACTION_PRIVATE)\n\n self.window.drag_dest_set(gtk.DEST_DEFAULT_MOTION,\n [(\"\", gtk.TARGET_SAME_APP, 1)],\n gtk.gdk.ACTION_PRIVATE)\n \n self.window.connect('destroy', lambda w: gtk.main_quit())\n \n\n self.window.set_default_size(600,900)\n self.window.move(300,100)\n\n #Create a TextArea class instance\n self.drawing = TextArea()\n self.drawing.show()\n \n \n vbox = gtk.VBox()\n self.window.add(vbox)\n\n #Create a UIManager instance\n uimanager = gtk.UIManager()\n self.current_scale = 16\n\n #Add the accelerator group to the toplevel window\n accelgroup = uimanager.get_accel_group()\n self.window.add_accel_group(accelgroup)\n\n #Create an ActionGroup\n actiongroup = gtk.ActionGroup('PyViewer')\n self.actiongroup = actiongroup\n \n #Create actions\n actiongroup.add_actions([('Open', gtk.STOCK_OPEN, '_Open', None, None, self.open_file),\n ('Quit', gtk.STOCK_QUIT, '_Quit', None, None, self.quit_viewer),\n ('File', None, '_File')])\n \n #Add the actiongroup to the uimanager\n uimanager.insert_action_group(actiongroup, 0)\n\n #Add a UI description\n uimanager.add_ui_from_string(self.ui)\n\n #Create a MenuBar\n menubar = uimanager.get_widget('/MenuBar')\n \n #Pack the menubar and the drawing area into a vbox\n vbox.pack_start(menubar, False)\n vbox.pack_start(self.drawing) \n\n self.window.show_all()\n\n return", "def _on_press(self, event):\n if tk.DISABLED in self.state():\n return\n\n region = self.identify_region(event.x, event.y)\n\n if self._drag_cols and region == 'heading':\n self._start_drag_col(event)\n elif self._drag_rows and region == 'cell':\n self._start_drag_row(event)", "def _on_mouse(self, event):\n x, y = event.GetPosition()\n if self._drag_mode == DepthCanvas.SASH_DRAG_NONE: \n self._canvas_hit_test(x, y) \n if event.LeftDown():\n self.start_dragging(y)\n elif self._drag_mode == DepthCanvas.SASH_DRAG_DRAGGING:\n if event.LeftIsDown():\n self.drag_it(y) \n elif event.LeftUp():\n self.end_dragging()\n event.Skip()", "def detach(self, *items):\n self._visual_drag.detach(*items)\n ttk.Treeview.detach(self, *items)", "def btn_upload_callback(self):\n # Create File Select Dialog\n dialog = QFileDialog(parent=self, caption='Images')\n dialog.setMimeTypeFilters(\n [\"image/jpeg\", \"image/png\", \"image/tiff\", 'application/zip'])\n dialog.setFileMode(QFileDialog.ExistingFile)\n\n if dialog.exec_() == QDialog.Accepted:\n\n filename = dialog.selectedFiles()[0]\n\n with open(filename, 'rb') as f:\n file_b64s = fio_to_b64s(f)\n\n if ext_from_path(filename) == '.zip':\n ret = api.upload_zip(\n file_b64s,\n nameext_from_path(filename),\n self.user_hash\n )\n else:\n ret = api.upload_image(\n file_b64s,\n nameext_from_path(filename),\n self.user_hash\n )\n if ret.get('success') is False:\n self.show_error(ret['error_msg'])\n self.update_table()", "def _update_droppings(self):\n\t\t# Update dropping positions.\n\t\tself.droppings.update()\n\n\t\t# Get rid of the droppings that have disappeared.\n\t\tfor dropping in self.droppings.copy():\n\t\t\tif dropping.rect.top >= 1050:\n\t\t\t\tself.droppings.remove(dropping)\n\n\t\tself._check_dropping_auto_collisions()", "def OnLeftUp(self, event):\r\n\r\n if self._action == actionResize:\r\n## self._frame.Freeze()\r\n self.OnLeftUp_Resize(event)\r\n## self._frame.Thaw()\r\n \r\n elif self._action == actionClickButton:\r\n self.OnLeftUp_ClickButton(event)\r\n \r\n elif self._action == actionDragFloatingPane:\r\n self.OnLeftUp_DragFloatingPane(event)\r\n \r\n elif self._action == actionDragToolbarPane:\r\n self.OnLeftUp_DragToolbarPane(event)\r\n \r\n else:\r\n event.Skip() \r\n\r\n if self._frame.HasCapture():\r\n self._frame.ReleaseMouse()\r\n \r\n self._action = actionNone", "def callback(self, event):\n # IN_CLOSE_WRITE -> 0x00000008\n if event.mask == 0x00000008:\n if event.name.endswith('.json'):\n print_success(\"Ldapdomaindump file found\")\n if event.name in ['domain_groups.json', 'domain_users.json']:\n if event.name == 'domain_groups.json':\n self.domain_groups_file = event.pathname\n if event.name == 'domain_users.json':\n self.domain_users_file = event.pathname\n if self.domain_groups_file and self.domain_users_file:\n print_success(\"Importing users\")\n subprocess.Popen(['jk-import-domaindump', self.domain_groups_file, self.domain_users_file])\n elif event.name == 'domain_computers.json':\n print_success(\"Importing computers\")\n subprocess.Popen(['jk-import-domaindump', event.pathname])\n\n # Ldap has been dumped, so remove the ldap targets.\n self.ldap_strings = []\n self.write_targets()\n\n if event.name.endswith('_samhashes.sam'):\n host = event.name.replace('_samhashes.sam', '')\n # TODO import file.\n print_success(\"Secretsdump file, host ip: {}\".format(host))\n subprocess.Popen(['jk-import-secretsdump', event.pathname])\n\n # Remove this system from this ip list.\n self.ips.remove(host)\n self.write_targets()", "def help_drop(self):\n print(DROP)", "def dropped(self, dropped):\n if self.local_vars_configuration.client_side_validation and dropped is None: # noqa: E501\n raise ValueError(\"Invalid value for `dropped`, must not be `None`\") # noqa: E501\n\n self._dropped = dropped", "def getTransferData(self, f: java.awt.datatransfer.DataFlavor) -> object:\n ...", "def drag(self, event):\n self.yview('scroll', self.ypos - event.y, 'units')\n self.xview('scroll', self.xpos - event.x, 'units')\n self.ypos = event.y\n self.xpos = event.x", "def OnMouse(self, event):\n if not event.Dragging():\n self._dragPos = None\n if self.HasCapture():\n self.ReleaseMouse()\n return\n else:\n if not self.HasCapture():\n self.CaptureMouse()\n\n if not self._dragPos:\n self._dragPos = event.GetPosition()\n else:\n pos = event.GetPosition()\n displacement = self._dragPos - pos\n self.SetPosition(self.GetPosition() - displacement)", "def DoDropNonFloatingPane(self, docks, panes, target, pt):\r\n \r\n screenPt = self._frame.ClientToScreen(pt)\r\n clientSize = self._frame.GetClientSize()\r\n frameRect = GetInternalFrameRect(self._frame, self._docks)\r\n\r\n drop = self.CopyTarget(target)\r\n\r\n # The result should always be shown\r\n drop.Show()\r\n\r\n part = self.HitTest(pt.x, pt.y)\r\n\r\n if not part:\r\n return False, target\r\n\r\n if part.type == AuiDockUIPart.typeDockSizer:\r\n \r\n if len(part.dock.panes) != 1:\r\n return False, target\r\n \r\n part = self.GetPanePart(part.dock.panes[0].window)\r\n if not part:\r\n return False, target\r\n \r\n if not part.pane:\r\n return False, target\r\n\r\n part = self.GetPanePart(part.pane.window)\r\n if not part:\r\n return False, target\r\n\r\n insert_dock_row = False\r\n insert_row = part.pane.dock_row\r\n insert_dir = part.pane.dock_direction\r\n insert_layer = part.pane.dock_layer\r\n\r\n direction = part.pane.dock_direction\r\n \r\n if direction == AUI_DOCK_TOP:\r\n if pt.y >= part.rect.y and pt.y < part.rect.y+auiInsertRowPixels:\r\n insert_dock_row = True\r\n\r\n elif direction == AUI_DOCK_BOTTOM:\r\n if pt.y > part.rect.y+part.rect.height-auiInsertRowPixels and \\\r\n pt.y <= part.rect.y + part.rect.height:\r\n insert_dock_row = True\r\n\r\n elif direction == AUI_DOCK_LEFT:\r\n if pt.x >= part.rect.x and pt.x < part.rect.x+auiInsertRowPixels:\r\n insert_dock_row = True\r\n\r\n elif direction == AUI_DOCK_RIGHT:\r\n if pt.x > part.rect.x+part.rect.width-auiInsertRowPixels and \\\r\n pt.x <= part.rect.x+part.rect.width:\r\n insert_dock_row = True\r\n\r\n elif direction == AUI_DOCK_CENTER:\r\n \r\n # \"new row pixels\" will be set to the default, but\r\n # must never exceed 20% of the window size\r\n new_row_pixels_x = auiNewRowPixels\r\n new_row_pixels_y = auiNewRowPixels\r\n\r\n if new_row_pixels_x > (part.rect.width*20)/100:\r\n new_row_pixels_x = (part.rect.width*20)/100\r\n\r\n if new_row_pixels_y > (part.rect.height*20)/100:\r\n new_row_pixels_y = (part.rect.height*20)/100\r\n\r\n # determine if the mouse pointer is in a location that\r\n # will cause a new row to be inserted. The hot spot positions\r\n # are along the borders of the center pane\r\n\r\n insert_layer = 0\r\n insert_dock_row = True\r\n pr = part.rect\r\n \r\n if pt.x >= pr.x and pt.x < pr.x + new_row_pixels_x:\r\n insert_dir = AUI_DOCK_LEFT\r\n elif pt.y >= pr.y and pt.y < pr.y + new_row_pixels_y:\r\n insert_dir = AUI_DOCK_TOP\r\n elif pt.x >= pr.x + pr.width - new_row_pixels_x and pt.x < pr.x + pr.width:\r\n insert_dir = AUI_DOCK_RIGHT\r\n elif pt.y >= pr.y+ pr.height - new_row_pixels_y and pt.y < pr.y + pr.height:\r\n insert_dir = AUI_DOCK_BOTTOM\r\n else:\r\n return False, target\r\n\r\n insert_row = GetMaxRow(panes, insert_dir, insert_layer) + 1\r\n \r\n if insert_dock_row:\r\n \r\n panes = DoInsertDockRow(panes, insert_dir, insert_layer, insert_row)\r\n drop.Dock().Direction(insert_dir).Layer(insert_layer). \\\r\n Row(insert_row).Position(0)\r\n \r\n return self.ProcessDockResult(target, drop)\r\n\r\n # determine the mouse offset and the pane size, both in the\r\n # direction of the dock itself, and perpendicular to the dock\r\n\r\n if part.orientation == wx.VERTICAL:\r\n \r\n offset = pt.y - part.rect.y\r\n size = part.rect.GetHeight()\r\n \r\n else:\r\n \r\n offset = pt.x - part.rect.x\r\n size = part.rect.GetWidth()\r\n \r\n drop_position = part.pane.dock_pos\r\n\r\n # if we are in the top/left part of the pane,\r\n # insert the pane before the pane being hovered over\r\n if offset <= size/2:\r\n \r\n drop_position = part.pane.dock_pos\r\n panes = DoInsertPane(panes,\r\n part.pane.dock_direction,\r\n part.pane.dock_layer,\r\n part.pane.dock_row,\r\n part.pane.dock_pos)\r\n\r\n # if we are in the bottom/right part of the pane,\r\n # insert the pane before the pane being hovered over\r\n if offset > size/2:\r\n \r\n drop_position = part.pane.dock_pos+1\r\n panes = DoInsertPane(panes,\r\n part.pane.dock_direction,\r\n part.pane.dock_layer,\r\n part.pane.dock_row,\r\n part.pane.dock_pos+1)\r\n \r\n\r\n drop.Dock(). \\\r\n Direction(part.dock.dock_direction). \\\r\n Layer(part.dock.dock_layer).Row(part.dock.dock_row). \\\r\n Position(drop_position)\r\n \r\n return self.ProcessDockResult(target, drop)", "def handle_event(self, event):\n if event.key == BattleActions.SELECT.value:\n prev_item = self.pokemon.held_item\n self.pokemon.held_item = self.item\n self.bag.subtract_item(self.item)\n self.bag.add_item(prev_item)\n self.is_dead = True", "def handle(self, event):\n\n if event == FL_PUSH:\n if Fl.event_button1():\n if self.gamewin.placing >= 0:\n self.gamewin.place_boat()\n \n else:\n self.gamewin.tile_clicked(self)\n return 1\n \n if event == FL_DRAG:\n return 0\n \n return super().handle(event)", "def on_moved(self, event):\n super(myEventHandler,self).on_moved(event)\n #moveto events from external folders have no src_path\n source = event.src_path\n dest = event.dest_path\n if event.is_directory:\n splitpath = split(source)\n splitdest = split(dest)\n if splitpath[1] == splitdest[1]:\n try:\n #where are we moving from\n pass\n #file = splitpath[1]\n #pathtoonedir = self.onedir.getonedirrectory()\n #oldpath = splitpath[0].replace(pathtoonedir ,\"\")\n #calculate new path\n #newpath = splitdest[0].replace(pathtoonedir ,\"\")\n #if oldpath is \"\":\n # oldpath = os.path.sep\n #self.onedir.movefile(file,newpath,oldpath)\n except OSError as e:\n print \"Error copying file! \" + e\n exit(1)\n else:\n #rename!!!!!!!!\n oldname = source\n newname = dest\n pathtoonedir = self.onedir.getonedirrectory()\n oldname = oldname.replace(pathtoonedir ,\"\")\n newname = newname.replace(pathtoonedir ,\"\")\n self.onedir.renamedirectory(oldname,newname)\n else:\n #if it comes from outside the folder structure\n if source is None:\n try:\n #use os.path.split to get file name and path\n splitpath = split(dest)\n file = splitpath[1]\n pathtoonedir = self.onedir.getonedirrectory()\n relpath = splitpath[0].replace(pathtoonedir ,\"\")\n self.onedir.sendfile(file, relpath)\n except OSError as e:\n print \"Error copying file! \" + e.strerror\n exit(1)\n except IOError as e:\n print \"IOerror creating file \" + e.strerror\n else:\n #file was moved!\n #check if name stays the same i.e. it's a move not a rename!\n splitpath = split(source)\n splitdest = split(dest)\n if splitpath[1] == splitdest[1]:\n try:\n #where are we moving from\n file = splitpath[1]\n pathtoonedir = self.onedir.getonedirrectory()\n oldpath = splitpath[0].replace(pathtoonedir ,\"\")\n #calculate new path\n newpath = splitdest[0].replace(pathtoonedir ,\"\")\n if oldpath is \"\":\n oldpath = os.path.sep\n self.onedir.movefile(file,newpath,oldpath)\n except OSError as e:\n print \"Error copying file! \" + e\n exit(1)\n else:\n #rename!!!!!!!!\n file = splitpath[1]\n newname = splitdest[1]\n pathtoonedir = self.onedir.getonedirrectory()\n path = splitpath[0].replace(pathtoonedir ,\"\")\n if path is \"\":\n path = os.path.sep\n else:\n path = path[1:]\n self.onedir.rename(file,path,newname)" ]
[ "0.72224736", "0.7178574", "0.7033632", "0.69308865", "0.69137365", "0.68517953", "0.66538286", "0.660371", "0.6481881", "0.6399347", "0.6314507", "0.63019335", "0.6220606", "0.6072313", "0.60482043", "0.6036351", "0.59223694", "0.59200025", "0.58687717", "0.58277893", "0.5734516", "0.56892866", "0.56366456", "0.56327146", "0.5616902", "0.55873185", "0.5570762", "0.5556108", "0.5525009", "0.55130804", "0.5495399", "0.5482396", "0.5450135", "0.54215217", "0.5369904", "0.53605336", "0.53423023", "0.53269815", "0.531934", "0.53087324", "0.5283762", "0.5241791", "0.52326345", "0.5222645", "0.52175194", "0.5214214", "0.51947457", "0.51713085", "0.51713085", "0.5170968", "0.51621383", "0.5153949", "0.51286787", "0.5117842", "0.51163244", "0.51041967", "0.5077833", "0.5052566", "0.5039383", "0.50068283", "0.4988413", "0.49841556", "0.49797967", "0.49678066", "0.49637955", "0.48908144", "0.48854396", "0.4880978", "0.4865574", "0.48463678", "0.48407745", "0.4836152", "0.48303056", "0.4824467", "0.4820127", "0.48152485", "0.48148572", "0.48119757", "0.48106006", "0.48065785", "0.4794525", "0.47882456", "0.4778417", "0.477379", "0.47688243", "0.47669476", "0.476431", "0.4757513", "0.47523054", "0.47477016", "0.47464442", "0.47421065", "0.47363034", "0.47358024", "0.4735249", "0.47333997", "0.4733353", "0.47230282", "0.47169688", "0.4713779" ]
0.81672764
0
Creates a data folder containing a 100class subset of ImageNet, then creates a zipped copy of it
def zip_imagenet100c(): #First make sure the directory we are given is correct! if not os.path.isdir(DATA_SRC_ROOT): raise Exception("Bad filepath given") #create the destiantion directories if they don't exist if not os.path.isdir(IMAGENET100_DIR): os.mkdir(IMAGENET100_DIR) #grab the subset wnids for the 100 class-subset with open(IMAGENET100_CLASSES) as f: subset_wnids = f.readlines() subset_wnids = [x.strip() for x in subset_wnids] #list of the 100 WNIDs we grab #Grab the names of all of the folders inside the root data source #Structure is distortion/sub_distortion/level/wnids for distortion in os.listdir(DATA_SRC_ROOT): if distortion != "meta.bin": print(distortion) folder_path = os.path.join(DATA_SRC_ROOT, distortion) if not os.path.isdir(folder_path): continue for sub_distortion in os.listdir(folder_path): print(sub_distortion) subfolder_path = os.path.join(folder_path, sub_distortion) if not os.path.isdir(subfolder_path): continue for level in os.listdir(subfolder_path): print(level) level_path = os.path.join(subfolder_path, level) #grab the correcrt validation d9recotires for wnid in os.listdir(level_path): wnid_path = os.path.join(level_path, wnid) if not os.path.isdir(wnid_path): continue if wnid in subset_wnids: dest_path = os.path.join(IMAGENET100_DIR, distortion, sub_distortion, level, wnid) shutil.copytree(wnid_path, dest_path) #copy the metadata bin file meta_file = os.path.join(DATA_SRC_ROOT, 'meta.bin') meta_dest = os.path.join(IMAGENET100_DIR, 'meta.bin') shutil.copy(meta_file, meta_dest) #Zip the destinatio file shutil.make_archive(ZIP_PATH + '/ImageNet100C', 'tar', IMAGENET100_DIR)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_dataset(data_folder: str, dataset_file: str, targets_file: str = os.path.join('data', 'targets.pkl')):\n files = sorted(glob.glob(os.path.join(data_folder, '**/*.jpg'), recursive=True))\n images = []\n crop_sizes = []\n crop_centers = []\n targets = []\n for image in tqdm(files, desc='creating dataset', total=len(files)):\n img = Image.open(image)\n # quadruple dataset by vertical and horizontal flipping\n for i in range(4):\n if i == 1 or i == 3:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n if i == 2:\n img = img.transpose(Image.FLIP_TOP_BOTTOM)\n x, y, w, h, cx, cy = get_random_image_values()\n resized = img.resize((y, x), Image.LANCZOS) # mind thee: x and y swapped\n arr = np.array(resized, dtype=np.float32)\n arr, target_array = create_cropped_data(np.copy(arr), (w, h), (cx, cy), crop_only=False)\n images.append(arr)\n crop_sizes.append((w, h))\n crop_centers.append((cx, cy))\n targets.append(target_array)\n data = {'images': images, 'crop_sizes': crop_sizes, 'crop_centers': crop_centers}\n # persist on harddrive\n with open(dataset_file, 'wb') as f:\n pickle.dump(data, f)\n with open(targets_file, 'wb') as f:\n pickle.dump(targets, f)\n print(f'created datset and saved it to {dataset_file} and targets to {targets_file}')", "def zip_files():\n zipper = ZipFile(\"Moritz_Bunse_ML_project.zip\", \"w\")\n files_to_write = [\"poi_id.py\",\n \"my_classifier.pkl\",\n \"my_dataset.pkl\",\n \"my_feature_list.pkl\",\n \"tester.py\",\n \"Look+At+Enron+data+set.html\",\n \"Look At Enron data set.ipynb\",\n \"data_dict.pkl\",\n \"final_project_dataset.pkl\",\n \"img/Flow chart feature selection.png\"\n ]\n for filename in files_to_write:\n zipper.write(filename)\n\n zipper.close()", "def data_directory(class_labels):\n\n dataset_folders = ['train','validation','test']\n object_class = class_labels\n os.mkdir(BASE_DIR)\n\n for folder in dataset_folders:\n for obj_cls in object_class:\n training_dir = BASE_DIR + os.sep +'{}'.format(folder)\n if not os.path.exists(BASE_DIR+os.sep +'{}'.format(folder)):\n os.mkdir(training_dir)\n class_dir = training_dir + os.sep + '{}'.format(obj_cls)\n if not os.path.exists(training_dir + os.sep + '{}'.format(obj_cls)):\n os.mkdir(class_dir)", "def create_folders():\n if not os.path.exists(\"data/train-npy/\"):\n os.makedirs(\"data/train-npy/\")\n if not os.path.exists(\"data/test-npy/\"):\n os.makedirs(\"data/test-npy/\")\n if not os.path.exists(\"data/valid-npy/\"):\n os.makedirs(\"data/valid-npy/\")", "def MakeDataSetFiles(dirname):\n\n\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n if not os.path.exists(os.path.join(dirname, 'train')):\n os.mkdir(os.path.join(dirname, 'train'))\n if not os.path.exists(os.path.join(dirname, 'test')):\n os.mkdir(os.path.join(dirname, 'test'))\n data_train = fetch_20newsgroups(subset='train', categories=None, shuffle=True, random_state=42)\n data_test = fetch_20newsgroups(subset='test', categories=None, shuffle=True, random_state=42)\n\n if dirname[-1] == '/' or dirname[-1] == '\\\\':\n dirname = dirname[:-1]\n \n Util.WriteClassFile(data_train.target, os.path.join(dirname, 'train_classes.txt'))\n Util.WriteClassFile(data_test.target,os.path.join(dirname, 'test_classes.txt'))\n\n\n train_counter = 0;\n for doc in data_train.data:\n filename = 'train_' + str(train_counter).zfill(5);\n f = file(os.path.join(dirname, 'train', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n train_counter = train_counter + 1;\n\n test_counter = 0;\n for doc in data_test.data:\n filename = 'test_' + str(test_counter).zfill(5);\n f = file(os.path.join(dirname, 'test', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n test_counter = test_counter + 1;\n\n class_index = file(os.path.join(dirname, 'class_label_index.txt'), 'w')\n for label in data_train.target_names:\n class_index.write(label + '\\n')\n class_index.close()", "def distributeDataset(destinationFolder, testFolder, trainFolder):\n \n # Set up directories for test and training data sets\n if not os.path.exists(testFolder):\n os.makedirs(testFolder)\n if not os.path.exists(trainFolder):\n os.makedirs(trainFolder)\n\n # Generate list of directories\n dirs = []\n for i in range(0,8):\n dirs.append(os.path.join(destinationFolder, \"NISTSpecialDatabase4GrayScaleImagesofFIGS\\\\sd04\\\\png_txt\\\\figs_\" + str(i)))\n\n # Extract Test data\n files = os.listdir(dirs[0])\n\n for filename in files:\n shutil.copy(os.path.join(dirs[0], filename), testFolder)\n shutil.rmtree(dirs[0])\n\n # Extract Train data\n for i in range(1,8):\n\n files = os.listdir(dirs[i])\n for filename in files:\n shutil.copy(os.path.join(dirs[i], filename), trainFolder)\n shutil.rmtree(dirs[i])\n shutil.rmtree(os.path.join(destinationFolder, \"NISTSpecialDatabase4GrayScaleImagesofFIGS\"))", "def prepare_data_for_training(args):\n # Form the train/test splits and write them to disk\n dataset = data.Dataset(args)\n # get image classes and image counts in each class\n label_map = dataset.get_class_info()\n class_count = len(list(label_map.values()))\n # split the data and store it in log dir\n df_train, df_test = dataset.split_dataset()\n\n # perform dataset augmentations\n image_data = augment.Augmentation(args)\n # get the data gens for training and test images\n train_data_gen, _ = image_data.map_fn_train(df_train)\n test_data_gen, _ = image_data.map_fn_test(df_test)\n\n return train_data_gen, test_data_gen, df_train, df_test, class_count", "def training_data_generation(DATA_DIR, img_height_size, img_width_size, label_list):\r\n \r\n img_ms_files = glob.glob(DATA_DIR + '\\\\Train_MS' + '\\\\Train_*.tif')\r\n img_pan_files = glob.glob(DATA_DIR + '\\\\Train_Pan' + '\\\\Train_*.tif')\r\n polygon_files = glob.glob(DATA_DIR + '\\\\Train_Polygons' + '\\\\Train_*.geojson')\r\n \r\n img_ms_array_list = []\r\n img_pan_array_list = []\r\n mask_array_list = []\r\n \r\n for file in range(len(img_ms_files)):\r\n with rasterio.open(img_ms_files[file]) as f:\r\n metadata = f.profile\r\n img_ms = np.transpose(f.read(tuple(np.arange(metadata['count']) + 1)), [1, 2, 0])\r\n \r\n with rasterio.open(img_pan_files[file]) as g:\r\n metadata_pan = g.profile\r\n img_pan = np.expand_dims(g.read(1), axis = 2)\r\n \r\n ms_to_pan_ratio = metadata['transform'][0] / metadata_pan['transform'][0]\r\n \r\n if (img_height_size % ms_to_pan_ratio) != 0 or (img_width_size % ms_to_pan_ratio) != 0:\r\n raise ValueError('Please make sure that both img_height_size and img_width_size can be divided by {}'.format(int(ms_to_pan_ratio)))\r\n \r\n mask = training_mask_generation(img_pan_files[file], polygon_files[file], labels = label_list)\r\n \r\n img_ms_array, img_pan_array, mask_array = image_clip_to_segment_and_convert(img_ms, img_pan, mask, ms_to_pan_ratio, \r\n img_height_size, img_width_size)\r\n \r\n img_ms_array_list.append(img_ms_array)\r\n img_pan_array_list.append(img_pan_array)\r\n mask_array_list.append(mask_array)\r\n \r\n img_ms_full_array = np.concatenate(img_ms_array_list, axis = 0)\r\n img_pan_full_array = np.concatenate(img_pan_array_list, axis = 0)\r\n mask_full_array = to_categorical(np.concatenate(mask_array_list, axis = 0), num_classes = len(label_list))\r\n \r\n return img_ms_full_array, img_pan_full_array, mask_full_array", "def create_train_folder(df_train, target_path):\n folder_path = os.path.join(target_path, 'xray_preprocess/train')\n print(f'Create train set at: {folder_path}')\n for _, row in tqdm(df_train.iterrows(), total=df_train.shape[0]):\n if row['class']=='negative':\n destination_path = os.path.join(folder_path, 'negative')\n elif row['class']=='positive':\n destination_path = os.path.join(folder_path, 'positive')\n if not os.path.exists(destination_path):\n os.makedirs(destination_path) \n img = os.path.join(target_path, 'xray', 'train', row['filename'])\n shutil.copy(img, destination_path )", "def prepare_data(src, dst):\n\n data_prefix = 'miniCelebA_'\n for split in ['train', 'val', 'test']:\n print('processing %s split' % split)\n if (not os.path.exists(os.path.join(dst, 'x_' + split + '.npy')) or not\n os.path.exists(os.path.join(dst, 'y_' + split + '.npy'))):\n labels = glob(os.path.join(src, split, '*'))\n no_sample = 0\n for lb in labels:\n no_sample += len(os.listdir(lb))\n\n x = np.zeros((no_sample, 224, 224, 3))\n y = np.zeros((no_sample, 20))\n count = 0\n for lb in labels:\n files = glob(os.path.join(lb, '*.png'))\n for f in files:\n print('processing file: %s, with label %s' % (f, lb.split('/')[-1]))\n y[count] = to_categorical(int(lb.split('/')[-1]), 20)\n img = misc.imresize(misc.imread(f), (224, 224), 'bicubic')\n if img.ndim == 2:\n img = np.expand_dims(img, -1)\n img = np.concatenate((img, img, img), axis=-1)\n x[count] = img\n\n count += 1\n\n assert count == no_sample, \"number of sample (%d) is different than number of read image (%d)\" % (\n no_sample, count)\n\n x = get_deep_feature(x)\n np.save(os.path.join(dst, data_prefix + 'x_' + split + '.npy'), x)\n np.save(os.path.join(dst, data_prefix + 'y_' + split + '.npy'), y)", "def _make_dataset(input_dir, output_dir, image_size, margin, split='train'):\n input_dir = os.path.join(input_dir, split)\n\n output_root = os.path.join(output_dir, split)\n if not os.path.exists(output_root):\n os.makedirs(output_root)\n\n class_folders = glob.glob(os.path.join(input_dir, '*'))\n detector = MTCNN()\n\n for class_folder in class_folders:\n target_output_dir = os.path.join(output_root, class_folder.split('/')[-1])\n if not os.path.exists(target_output_dir):\n os.makedirs(target_output_dir)\n\n target_files = glob.glob(os.path.join(class_folder, '*'))\n logger.debug('processing %s...', class_folder)\n for file in target_files:\n img = cv2.imread(file)\n detect_result = detector.detect_faces(img)\n\n if not detect_result:\n logger.warning('WARNING: failed to detect face in file %s, skip', file)\n continue\n\n x0, y0, width, height = detect_result[0]['box']\n x1, y1 = x0 + width, y0 + height\n\n x0 = max(x0 - margin // 2, 0)\n y0 = max(y0 - margin // 2, 0)\n x1 = min(x1 + margin // 2, img.shape[1])\n y1 = min(y1 + margin // 2, img.shape[0])\n\n face_img = img[y0:y1, x0:x1, :]\n face_img = cv2.resize(face_img, dsize=(image_size, image_size),\n interpolation=cv2.INTER_LINEAR)\n\n filename = file.split('/')[-1]\n img_name = filename.split('.')[0]\n cv2.imwrite(os.path.join(target_output_dir, filename),\n face_img)\n with open(os.path.join(target_output_dir, img_name + '.txt'), 'w') as f:\n f.write('%d %d %d %d\\n' % (x0, y0, x1, y1))\n logger.debug('processing %s finished!', class_folder)", "def make_dataset():\n\n\tnumberOfTrials = dataset_params.num_of_samples\n\tnumberOfTrials_train = int(numberOfTrials*0.8)\n\tnumberOfTrials_test = int(numberOfTrials*0.2)\n\n\tprint(\"==================================================\")\n\tprint(\"1. Generating Train images ......\")\n\tprint(\"\\nTrain image per variation\", numberOfTrials_train)\n\tmakeDataset(numberOfTrials_train, \"train\")\n\n\tprint(\"==================================================\")\n\tprint(\"2. Generating Test images ......\")\n\tprint(\"\\nTest image per variation\", numberOfTrials_test)\n\tmakeDataset(numberOfTrials_test, \"test\")\n\n\tprint(\"==================================================\")\n\tprint(\"Done!!!\")", "def save_all_features(nb_samples, source=\"./datasets/D1/images/\", dest=\"./datasets/D1/features/\", input_size=(416, 416), batch_size=16):\n\n # check if the directory exists, and if not make it\n if not os.path.exists(dest):\n os.makedirs(dest)\n\n # define image height and width\n (img_height, img_width) = input_size\n\n # build the VGG16 network and extract features after every MaxPool layer\n model = VGG16(weights='imagenet', include_top=False)\n\n c1 = model.layers[-16].output\n c1 = GlobalAveragePooling2D()(c1)\n\n c2 = model.layers[-13].output\n c2 = GlobalAveragePooling2D()(c2)\n\n c3 = model.layers[-9].output\n c3 = GlobalAveragePooling2D()(c3)\n\n c4 = model.layers[-5].output\n c4 = GlobalAveragePooling2D()(c4)\n\n c5 = model.layers[-1].output\n c5 = GlobalAveragePooling2D()(c5)\n\n\n model = Model(inputs=model.input, outputs=(c1, c2, c3, c4, c5))\n\n # always save your weights after training or during training\n model.save_weights('first_try.h5')\n model.save('model_save')\n\n # define image generator without augmentation\n datagen = ImageDataGenerator(rescale=1. / 255.)\n\n generator = datagen.flow_from_directory(\n source,\n target_size=(img_height, img_width),\n batch_size=batch_size,\n class_mode=\"sparse\",\n shuffle=False)\n\n # generate and save features, labels and respective filenames\n steps = nb_samples / batch_size + 1\n X = model.predict_generator(generator, steps)\n Y = np.concatenate([generator.next()[1] for i in range(0, generator.samples, batch_size)])\n names = generator.filenames\n\n for n, i in enumerate(X):\n print(\"Saving \" + n + \" and \" + i)\n with open(dest + \"X-\" + str(img_height) + \"-c\" + str(n + 1) + \"-AVG.npy\", 'w') as f:\n np.save(f.name, i)\n\n if not os.path.exists(dest + \"Y.npy\"):\n with open(dest + \"Y.npy\", 'w') as f:\n np.save(f.name, Y)\n\n if not os.path.exists(dest + \"filenames.npy\"):\n with open(dest + \"filenames.npy\", 'w') as f:\n np.save(f.name, names)", "def gen_data_dir(img_dir, id_label_dict, num_class, shuffle=True):\n img_file_path = gen_img_files(img_dir, shuffle)\n return gen_data_file(img_file_path, id_label_dict, num_class)", "def create_noobj_folder(\n folder: PathLike, \n img_ext: str = \".jpg\",\n):\n folder = Path(folder).expanduser().resolve()\n images = glob(folder, img_ext)\n \n for image in images:\n filename = image.name\n _folder = image.parent.name\n path = folder / (image.stem + \".xml\")\n img_w, img_h = get_image_size(image)\n\n tree = ET.Element(\"annotation\")\n\n et_folder = ET.SubElement(tree, \"folder\")\n et_folder.text = _folder\n\n et_filename = ET.SubElement(tree, \"filename\")\n et_filename.text = filename\n\n et_path = ET.SubElement(tree, \"path\")\n et_path.text = str(path)\n\n et_img_size = ET.SubElement(tree, \"size\")\n ET.SubElement(et_img_size, \"width\").text = str(img_w)\n ET.SubElement(et_img_size, \"height\").text = str(img_h)\n ET.SubElement(et_img_size, \"depth\").text = \"3\"\n\n content = ET.tostring(tree, encoding=\"unicode\", pretty_print=True)\n try: \n path.write_text(content)\n except KeyboardInterrupt:\n path.write_text(content)\n exit()", "def preprocess(data_path, dataset):\n il_data_path = os.path.join(data_path, 'il' + dataset)\n train_path = os.path.join(il_data_path, 'train')\n val_path = os.path.join(il_data_path, 'val')\n\n if os.path.isdir(il_data_path):\n return\n\n os.makedirs(train_path)\n os.makedirs(val_path)\n\n train_set = _datasets[dataset](data_path, train=True, download=True)\n val_set = _datasets[dataset](data_path, train=False, download=True)\n\n # dump pickles for each class\n for cur_set, cur_path in [[train_set, train_path], [val_set, val_path]]:\n for idx, item in enumerate(cur_set):\n label = item[1]\n if not os.path.exists(os.path.join(cur_path, str(label))):\n os.makedirs(os.path.join(cur_path, str(label)))\n with open(os.path.join(cur_path, str(label), str(idx) + '.p'), 'wb') as f:\n pickle.dump(item, f)", "def exporting_cropped_images (fpath_tiff):\n src = rasterio.open(fpath_tiff, 'r')\n outfolder_irregular = '/train/irregular'\n outfolder_healthy = '/train/healthy'\n outfolder_concrete = '/train/concrete'\n outfolder_incomplete = '/train/incomplete'\n outfolder_other = '/train/other'\n outfolder = '/train/batch'\n #os.makedirs (outfolder, exist_ok = True)", "def generate_nmnist_dataset(initial_size, input_dir, num_spikes, step_factor):\n image_dataset = np.rec.array(None, dtype=[('height', np.uint16),\n ('width', np.uint16),\n ('image_data', 'object'),\n ('label', np.uint32)],\n shape=initial_size)\n num_images = 0\n\n # loop through each folder within the test directories\n for i in range(0, 10):\n current_dir = input_dir + os.path.sep + str(i) + os.path.sep + '*.bin'\n print('Processing {}...'.format(current_dir))\n for filename in glob.iglob(current_dir):\n images = prepare_n_mnist(filename, True, num_spikes, step_factor)\n if num_images + len(images) >= image_dataset.size:\n image_dataset = np.resize(image_dataset,\n (num_images + len(images)) * 2)\n add_images_to_dataset(image_dataset, images, num_images, i, 28, 28)\n num_images += len(images)\n\n return image_dataset[0:num_images]", "def compress_wrapper(args: Namespace) -> None:\n directory_path = os.path.join(DATASETS_DIR, args.directory)\n compress_datasets(directory_path, args.holdout)", "def create_random_data(output_path: str, num_images: int = 5) -> None:\n train_path = os.path.join(output_path, \"train\")\n class1_train_path = os.path.join(train_path, \"class1\")\n class2_train_path = os.path.join(train_path, \"class2\")\n\n val_path = os.path.join(output_path, \"val\")\n class1_val_path = os.path.join(val_path, \"class1\")\n class2_val_path = os.path.join(val_path, \"class2\")\n\n test_path = os.path.join(output_path, \"test\")\n class1_test_path = os.path.join(test_path, \"class1\")\n class2_test_path = os.path.join(test_path, \"class2\")\n\n paths = [\n class1_train_path,\n class1_val_path,\n class1_test_path,\n class2_train_path,\n class2_val_path,\n class2_test_path,\n ]\n\n for path in paths:\n try:\n os.makedirs(path)\n except FileExistsError:\n pass\n\n for i in range(num_images):\n pixels = numpy.random.rand(64, 64, 3) * 255\n im = Image.fromarray(pixels.astype(\"uint8\")).convert(\"RGB\")\n im.save(os.path.join(path, f\"rand_image_{i}.jpeg\"))\n\n process_images(output_path)", "def makeDataset(numberOfTrials, data_type):\n\n\tdata_folder = data_type + \"_images\"\n\tlabel_file = os.path.join(dataset_params.data_path, data_type + \"_lables.csv\")\n\n\tutils.create_directory(dataset_params.data_path)\n\tutils.create_directory(os.path.join(dataset_params.data_path, data_folder))\n\n\tallowedRadius = utils.defineShapePerimeter()\n\tcolorsRGB = utils.defineColorValues()\n\tshapeDict = utils.defineShapeSides()\n\tpadding = dataset_params.padding\n\n\tnum = 0\n\toutput_images = [[\"figNum\", \"shape\", \"color\", \"size\", \"background\", \"quadrant\", \"radius\"]]\n\tfor c in dataset_params.colors: # for all 7 foreground colors \n\t\tfor q in dataset_params.quadrants: # for all 4 quadratns \n\t\t\tfor s in dataset_params.shapes: # for all 5 shapes\n\t\t\t\tfor k in dataset_params.sizes: # for all 3 sizes\n\t\t\t\t\tfor b in dataset_params.backgrounds: # for all 3 background colors\n\t\t\t\t\t\tfor i in range(numberOfTrials):\n\t\t\t\t\t\t\tfileName = os.path.join(dataset_params.data_path, data_folder, str(num) + \".png\")\n\t\t\t\t\t\t\tpresentQuadrant = dataset_params.quadrants[q]\n\t\t\t\t\t\t\tradius = random.randint(allowedRadius[s][k][0],allowedRadius[s][k][1])\n\n\t\t\t\t\t\t\tif(presentQuadrant == 3):\n\t\t\t\t\t\t\t\txMin = 128 + padding\n\t\t\t\t\t\t\t\txMax = 255 - radius\n\t\t\t\t\t\t\t\tyMin = 128 + padding\n\t\t\t\t\t\t\t\tyMax = 255 - radius\n\n\t\t\t\t\t\t\telif(presentQuadrant == 2):\n\t\t\t\t\t\t\t\txMin = 0 + radius\n\t\t\t\t\t\t\t\txMax = 128 - padding\n\t\t\t\t\t\t\t\tyMin = 128 + padding\n\t\t\t\t\t\t\t\tyMax = 255 - radius\n\n\t\t\t\t\t\t\telif(presentQuadrant == 1):\n\t\t\t\t\t\t\t\txMin = 0 + radius\n\t\t\t\t\t\t\t\txMax = 128 - padding\n\t\t\t\t\t\t\t\tyMin = 0 + radius\n\t\t\t\t\t\t\t\tyMax = 128 - padding\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\txMin = 128 + padding\n\t\t\t\t\t\t\t\txMax = 255 - radius\n\t\t\t\t\t\t\t\tyMin = 0 + radius\n\t\t\t\t\t\t\t\tyMax = 128 - padding\n\n\t\t\t\t\t\t\txCenter = random.randint(xMin, xMax)\n\t\t\t\t\t\t\tyCenter = random.randint(yMin, yMax)\n\t\t\t\t\t\t\tcenter = [xCenter, yCenter]\n\n\t\t\t\t\t\t\tif(s == \"circle\"):\n\t\t\t\t\t\t\t\toutput_images.append([num, \"circle\", c, k, b, presentQuadrant, radius])\n\t\t\t\t\t\t\t\timg = makeCircle(c, radius, center, b, colorsRGB)\n\t\t\t\t\t\t\t\timg = img[:,:,::-1]\n\t\t\t\t\t\t\t\tcv2.imwrite(fileName, img)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tn = shapeDict[s]\n\t\t\t\t\t\t\t\timg = makePolygon(center, n, radius, b, c, colorsRGB)\n\t\t\t\t\t\t\t\timg = img[:,:,::-1]\n\t\t\t\t\t\t\t\tcv2.imwrite(fileName, img)\n\t\t\t\t\t\t\t\toutput_images.append([num, s, c, k, b, presentQuadrant, radius])\n\t\t\t\t\t\t\tnum += 1\n\t\n\tprint(\"Number of image generated\", num)\n\n\tprint(\"Saving \" + data_type + \" data meta information to CSV ......\")\n\tdf = pd.DataFrame(output_images[1:], columns=output_images[0])\n\tdf.to_csv(label_file, index=False)\n\tprint(\"Saved \" + data_type + \" data meta information: \" + data_folder)\n\t\n\n\tprint(\"Saving \" + data_type + \" images data to npz(numpy) compressed file ......\")\n\tmake_npz_file(data_type)\n\tprint(\"Saved \" + data_type + \" images data to npz(numpy) compressed file!\")\n\t\n\treturn None", "def creation_data_sets(quality, dataset, test_case=False):\n current_path = Path.cwd()\n if dataset == 0:\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n del y_train, y_test\n train_path = current_path.joinpath(\"Mnist_{}\".format(quality))\n test_path = current_path.joinpath(\"Mnist_{}_test\".format(quality))\n else:\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n del y_train, y_test\n train_path = current_path.joinpath(\"Cifar-10_{}\".format(quality))\n test_path = current_path.joinpath(\"Cifar-10_{}_test\".format(quality))\n\n create_directories(train_path, test_path)\n convert(train_path, x_train, dataset, quality, test_case)\n convert(test_path, x_test, dataset, quality, test_case)", "def make_data(config, data, label):\n if not os.path.isdir(os.path.join(os.getcwd(), config.checkpoint_dir)):\n os.makedirs(os.path.join(os.getcwd(), config.checkpoint_dir))\n\n if config.is_train:\n savepath = os.path.join(os.getcwd(), config.checkpoint_dir +'/train.h5')\n else:\n savepath = os.path.join(os.getcwd(), config.checkpoint_dir +'/test.h5')\n\n with h5py.File(savepath, 'w') as hf:\n hf.create_dataset('data', data=data)\n hf.create_dataset('label', data=label)", "def copy_files():\n\n # Load the Knifey-Spoony dataset.\n # This is very fast as it only gathers lists of the files\n # and does not actually load the images into memory.\n dataset = load()\n\n # Copy the files to separate training- and test-dirs.\n dataset.copy_files(train_dir=train_dir, test_dir=test_dir)", "def make_npz_file(data_type):\n\n\tdata_folder = data_type + \"_images\"\n\tlabel_file = os.path.join(dataset_params.data_path, data_type + \"_lables.csv\")\n\toutput_file = os.path.join(dataset_params.data_path, \"synthetic_\" + data_type + \"_data\")\n\tline_reader = csv.DictReader(open(label_file,\"r\"))\n\n\tdata = []\n\tlabels = []\n\tdata_points = 0\n\tfor row in line_reader:\n\t\timage_name = os.path.join(dataset_params.data_path,data_folder,row[\"figNum\"] + \".png\")\n\t\timage_data = cv2.imread(image_name, cv2.IMREAD_COLOR)\n\t\timage_data = cv2.cvtColor(image_data, cv2.COLOR_BGR2RGB)\n\t\timage_label = [int(dataset_params.shapes[row[\"shape\"]]), int(dataset_params.colors[row[\"color\"]]), int(dataset_params.sizes[row[\"size\"]]), int(row[\"quadrant\"]), int(dataset_params.backgrounds[row[\"background\"]]) ]\n\t\tdata.append(image_data)\n\t\tlabels.append(image_label)\n\t\tdata_points += 1\n\n\t# Converting list to data to np array\n\tdata = np.asarray(data)\n\tlabels = np.asarray(labels)\n\n\t# Printing log information\n\tprint(data_type, \"statistics being saved: \")\n\tprint(data_type, \"data shape\", data.shape)\n\tprint(data_type, \"label shape\", labels.shape)\n\n\t# saveing the file as npz file\n\tnp.savez_compressed(output_file, data=data, lables=labels)", "def createDataset(sources,output,labels,sparse):\n global has_joblib\n out_path = str(output)\n # delete the output file\n if os.path.exists(os.path.abspath(out_path)):\n os.remove(os.path.abspath(out_path))\n \n # first, list the source files\n fpaths_src, fnames_src = utils.listFiles(directory=os.path.abspath(sources), ext='png')\n \n label_map={}\n \n # read the label file\n if not (labels == None):\n label_map = utils.readLabelMap(labels)\n # check that the numbers match\n print(\"Number of images in label map : %s\"%str(len(label_map.keys())-1))\n print(\"Number of images in source dir: %s\"%str(len(fpaths_src)))\n assert len(label_map.keys())-1 == len(fpaths_src)\n \n # generate KNN classifier\n if not (args.codebook == 'None' or args.codebook == None):\n args.knn = getKNNClassifier() \n else:\n args.knn = None\n \n # precompute number of images\n n_imgs = len(fpaths_src)\n \n # preallocate array\n # if augmentation, calculate (9*4+1)*n samples\n all_features_list = []\n \n # parallel implementation (default, if joblib available)\n if has_joblib:\n image_features = Parallel(n_jobs=args.njobs,verbose=5) (delayed(processImage)(fpaths_src, label_map, fnames_src, img_idx) for img_idx in range(n_imgs))\n # collect all images into a single matrix\n image_features = np.concatenate(image_features, axis=0)\n all_features_list.append(image_features)\n else:\n for img_idx in xrange(n_imgs):\n image_features = processImage(fpaths_src, label_map, fnames_src, img_idx)\n all_features_list.append(image_features)\n \n # make a 2D matrix from the list of features (stack all images vertically)\n feat_matrix = np.concatenate(all_features_list, axis=0).astype(np.float32) \n \n # do scaling of each feature dimension \n #if False:\n if not (args.scale == 0):\n print \"Scaling data...\"\n \n # preserve the labels\n label_vec = feat_matrix[:,0]\n feat_matrix = np.delete(feat_matrix,0,1)\n \n featurestats = np.zeros((2,feat_matrix.shape[1]))\n \n # use soft-normalization (zero-mean, unit var whitening)\n if (args.scale == 1):\n # if we specified featurestats from a training set, use them\n if not (args.featurestats == None):\n # load the statistics\n featurestats = loadFeatureStats()\n # featurestats contains 2 rows, first row = mean, second row = std\n # and n feature dimensions\n assert feat_matrix.shape[1]==featurestats.shape[1]\n else:\n pass\n \n \n # use hard-normalization \n elif (args.scale == 2):\n # if we specified featurestats from a training set, use them\n if not (args.featurestats == None):\n # load the statistics\n featurestats = loadFeatureStats()\n # the featurestats contains 2 rows, first row = min, second row = max \n # and n feature dimensions\n assert feat_matrix.shape[1]==featurestats.shape[1]\n else:\n pass\n \n \n # normalize each feature dimension\n for feat_idx in xrange(feat_matrix.shape[1]):\n feat_vec = feat_matrix[:,feat_idx]\n \n # soft-normalization (zero-mean, approx. unit variance)\n if (args.scale == 1): \n # if feature statistics are specified\n if not (args.featurestats == None):\n feat_mean = featurestats[0,feat_idx]\n feat_std = featurestats[1,feat_idx]\n else:\n # compute them from the data\n feat_mean = feat_vec.mean()\n feat_std = (feat_vec.std() + 1e-10)\n # store them \n featurestats[0,feat_idx] = feat_mean\n featurestats[1,feat_idx] = feat_std\n \n # shift to zero mean and (unit) variance\n feat_vec_scaled = (feat_vec - feat_mean) / (1.*feat_std)\n \n \n # hard-normalization (min/max = borders estimated from the (training) dataset)\n elif (args.scale == 2):\n if not (args.featurestats == None):\n feat_min = featurestats[0,feat_idx]\n feat_max = featurestats[1,feat_idx]\n else:\n # compute them freshly\n feat_min = np.min(feat_vec)\n feat_max = np.max(feat_vec)\n # store them \n featurestats[0,feat_idx] = feat_min\n featurestats[1,feat_idx] = feat_max\n \n # standardize/normalize between 0 and 1\n feat_vec_std = (feat_vec - feat_min) / (feat_max - feat_min + 1e-10) \n \n # linearly scale between -1 and 1 \n feat_vec_scaled = (1.0*feat_vec_std * (1 - -1)) - 1\n \n \n # set column back to matrix\n feat_matrix[:,feat_idx] = feat_vec_scaled\n \n # finally prepend the label_vec again\n feat_matrix = np.concatenate((np.reshape(label_vec,(feat_matrix.shape[0],1)),feat_matrix), axis=1)\n \n print \"Done.\"\n else:\n print \"Data may not be properly scaled, use the 'svm-scale' implementation of libsvm.\"\n \n if not (args.savefeaturestats == None):\n saveFeatureStats(featurestats) \n\n #Parallel(n_jobs=args.njobs, verbose=5)(delayed(function)(params) for i in range(10))\n # open the output file\n output_file = open(os.path.abspath(out_path), 'wb')\n\n # run through the feature matrix \n print \"Writing %s rows and %s cols to file...\"%(feat_matrix.shape)\n # parallel implementation (default, if joblib available)\n if has_joblib:\n lines = Parallel(n_jobs=args.njobs, verbose=5)(delayed(writeLine)(i, feat_matrix) for i in range(feat_matrix.shape[0]))\n output_file.writelines(lines) \n else:\n for i in xrange(feat_matrix.shape[0]):\n line = writeLine(i, feat_matrix)\n output_file.writelines(line)\n \n output_file.close()\n \n return 0", "def download():\n\n trainset = torchvision.datasets.CIFAR10(root=paths.raw_cifar10_dir(), train=True, download=True)\n testset = torchvision.datasets.CIFAR10(root=paths.raw_cifar10_dir(), train=False, download=True)\n train_images = numpy.array(trainset.data)\n train_labels = numpy.array(trainset.targets)\n test_images = numpy.array(testset.data)\n test_labels = numpy.array(testset.targets)\n\n assert numpy.max(train_images) == 255\n\n train_images = train_images/255.\n test_images = test_images/255.\n\n utils.write_hdf5(paths.cifar10_train_images_file(), train_images.astype(numpy.float32))\n log('wrote %s' % paths.cifar10_train_images_file())\n utils.write_hdf5(paths.cifar10_test_images_file(), test_images.astype(numpy.float32))\n log('wrote %s' % paths.cifar10_test_images_file())\n utils.write_hdf5(paths.cifar10_train_labels_file(), train_labels.reshape(-1, 1).astype(numpy.int))\n log('wrote %s' % paths.cifar10_train_labels_file())\n utils.write_hdf5(paths.cifar10_test_labels_file(), test_labels.reshape(-1, 1).astype(numpy.int))\n log('wrote %s' % paths.cifar10_test_labels_file())", "def prepare_data(self, *args, **kwargs):\n # get paths to train and test splits\n _split_paths = [os.path.join(self.path_to_data, split)\n for split in os.listdir(self.path_to_data)]\n\n # for each split [train, test]\n for _path in _split_paths:\n _img_classes = os.listdir(_path) # get subfolders representing each class\n self.splits[os.path.basename(_path)] = []\n\n # get the images in pairs with its corresponding class\n for _class in _img_classes:\n _data = self.get_img_text_pair(os.path.join(_path, _class))\n\n if os.path.basename(_path) == 'train':\n self.weights[self.encode_label(_class)] = len(_data)\n self.splits[os.path.basename(_path)].extend(_data)", "def create_train_file(img_folder_path: str, train_file_path: str) -> None:\n files = []\n for ext in (\"*.gif\", \"*.png\", \"*.jpg\", \"*.bmp\"):\n img_path = glob(join(img_folder_path, ext))\n if img_path:\n files.extend(img_path)\n\n write_to_train_file(files, train_file_path)\n\n print(\"Training files are created in \" + img_folder_path)", "def _create_layout(root_dir, subsets):\n _create_folder(os.path.join(root_dir, \"images\"))\n _create_folder(os.path.join(root_dir, \"labels\"))\n\n for subset in subsets:\n _create_folder(os.path.join(root_dir, \"images\", subset))\n _create_folder(os.path.join(root_dir, \"labels\", subset))", "def create_directories(train_path, test_path):\n train_path.joinpath(\"images\").mkdir(parents=True)\n test_path.joinpath(\"images\").mkdir(parents=True)", "def convert_dataset(src_dir, dest_dir):\n subdirs = get_subdirs(src_dir)\n detector = dlib.simple_object_detector(MODEL_PATH)\n for img_dir in tqdm(subdirs):\n\tprint(img_dir)\n jpegs = get_img_paths_in_dir(img_dir)\n target_dir = dest_dir + img_dir.split('/')[-1]\n if not os.path.exists(target_dir):\n os.makedirs(target_dir)\n for src_path in jpegs:\n target_path = target_dir + '/' + src_path.split('/')[-1]\n img = io.imread(src_path)\n dets = detector(img)\n bounding_boxes = get_bounding_boxes(dets)\n if bounding_boxes:\n square_box = find_square_box(bounding_boxes[0])\n if is_valid(square_box, img):\n box = bounding_boxes[0]\n square_box = find_square_box(box)\n cropped_img = crop_frame(img, square_box)\n PIL_img = PIL.Image.fromarray(cropped_img)\n resized_img = PIL_img.resize((54,54), PIL.Image.BILINEAR)\n\t\t resized_img.save(target_path)\n print(target_path)\n # grey_img = resized_img.convert('L')\n # grey_img.save(target_path)", "def save_data(data_dir):\r\n for k in range(1,11):\r\n fold_name = 'fold' + str(k)\r\n print \"Saving\" + fold_name\r\n features, labels = process_audio(parent_path, [fold_name])\r\n labels = encode(labels)\r\n print \"Features of\", fold_name , \" = \", features.shape\r\n print \"Labels of\", fold_name , \" = \", labels.shape\r\n feature_file = os.path.join(data_dir, fold_name + '_x.npy')\r\n labels_file = os.path.join(data_dir, fold_name + '_y.npy')\r\n np.save(feature_file, features)\r\n print \"Saved \" + feature_file\r\n np.save(labels_file, labels)\r\n print \"Saved \" + labels_file", "def generate_dataset(self):\n\t\timg_set = []\n\t\tqa_set = []\n\t\tfor i in range(self.config.dataset_size):\n\t\t\timg, r = self.generate_image()\n\t\t\tq = self.generate_question()\n\t\t\ta = self.generate_answer(r, q)\n\t\t\timg_sample = {\n\t\t\t\t'id': i,\n\t\t\t\t'image': img.tolist()\n\t\t\t}\n\t\t\timg_set.append(img_sample)\n\t\t\tfor j in range(len(q)):\n\t\t\t\tqa_sample = {\n\t\t\t\t\t'id': i,\n\t\t\t\t\t'question': q[j].tolist(),\n\t\t\t\t\t'answer': a[j].tolist()\n\t\t\t\t}\n\t\t\t\tqa_set.append(qa_sample)\n\t\tprint('Finished creating smaples')\n\t\tdataset = {\n\t\t\t'image':\timg_set,\n\t\t\t'qa':\tqa_set\n\t\t}\n\t\twith open(self.path, 'w') as f:\n\t\t\tjson.dump(dataset, f)", "def generate_data(dataset, target_filename, label):\n\n data_dir = check_data(dataset)\n\n data_x = np.empty((0, NB_SENSOR_CHANNELS))\n data_y = np.empty((0))\n\n zf = zipfile.ZipFile(dataset)\n print (\"Processing dataset files ...\")\n for filename in OPPORTUNITY_DATA_FILES:\n try:\n data = np.loadtxt(BytesIO(zf.read(filename)))\n print (\"... file {0}\".format(filename))\n x, y = process_dataset_file(data, label)\n data_x = np.vstack((data_x, x))\n data_y = np.concatenate([data_y, y])\n except KeyError:\n print (\"ERROR: Did not find {0} in zip file\".format(filename))\n\n # Dataset is divided into train and test\n nb_training_samples = 557963\n # The first 18 OPPORTUNITY data files are used for the traning dataset, having 557963 samples\n X_train, y_train = data_x[:nb_training_samples,:], data_y[:nb_training_samples]\n X_test, y_test = data_x[nb_training_samples:,:], data_y[nb_training_samples:]\n\n print (\"Final datasets with size: | train {0} | test {1} | \".format(X_train.shape,X_test.shape))\n\n obj = [(X_train, y_train), (X_test, y_test)]\n f = open(os.path.join(data_dir, target_filename), 'wb')\n cp.dump(obj, f, protocol=cp.HIGHEST_PROTOCOL)\n f.close()", "def copy_database(path_images, path_labels, path_final_images):\n\n try:\n labels = sorted(os.listdir(path_labels))\n except FileNotFoudError:\n print(\"No such file or directory \", path_labels)\n\n try:\n images = sorted(os.listdir(path_images)) #+ \"RetinaNet_I04590/\"))\n except FileNotFoudError:\n print(\"No such file or directory \", path_images)\n\n \"\"\"if not os.path.exists(path_final_images + \"I04590/\"):\n os.mkdir(path_final_images + \"I04590/\")\n\n if not os.path.exists(path_final_images + \"I045135/\"):\n os.mkdir(path_final_images + \"I045135/\")\n\n if not os.path.exists(path_final_images + \"I090135/\"):\n os.mkdir(path_final_images + \"I090135/\")\n\n if not os.path.exists(path_final_images + \"I4590135/\"):\n os.mkdir(path_final_images + \"I4590135/\")\n\n if not os.path.exists(path_final_images + \"Params/\"):\n os.mkdir(path_final_images + \"Params/\")\n\n if not os.path.exists(path_final_images + \"Pauli2/\"):\n os.mkdir(path_final_images + \"Pauli2/\")\n\n if not os.path.exists(path_final_images + \"Pauli3/\"):\n os.mkdir(path_final_images + \"Pauli3/\")\n\n if not os.path.exists(path_final_images + \"Stokes/\"):\n os.mkdir(path_final_images + \"Stokes/\")\n\n if not os.path.exists(path_final_images + \"Rachel/\"):\n os.mkdir(path_final_images + \"Rachel/\")\n\n if not os.path.exists(path_final_images + \"Rachel2/\"):\n os.mkdir(path_final_images + \"Rachel2/\")\"\"\"\n\n for k in range(len(images)):\n if str(k) + \".xml\" in labels:\n copyfile(path_images + \"/\" + images[k],\n path_final_images + \"/\" + images[k])\n \"\"\"copyfile(path_images + \"RetinaNet_I04590/\" + str(k) + \".png\",\n path_final_images + \"I04590/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_I045135/\" + str(k) + \".png\",\n path_final_images + \"I045135/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_I090135/\" + str(k) + \".png\",\n path_final_images + \"I090135/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_I4590135/\" + str(k) + \".png\",\n path_final_images + \"I4590135/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Params/\" + str(k) + \".png\",\n path_final_images + \"Params/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Pauli2/\" + str(k) + \".png\",\n path_final_images + \"Pauli2/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Pauli3/\" + str(k) + \".png\",\n path_final_images + \"Pauli3/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Stokes/\" + str(k) + \".png\",\n path_final_images + \"Stokes/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Rachel/\" + str(k) + \".png\",\n path_final_images + \"Rachel/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Rachel2/\" + str(k) + \".png\",\n path_final_images + \"Rachel2/\" + str(k) + \".png\")\n copyfile(path_labels + str(k) + \".xml\",\n path_final_labels + str(k) + \".xml\")\"\"\"\n print(k)", "def generate_transformed_data(self):\n for each_class in self.classes:\n class_directory = \"data/test/test_folder/\"\n class_directory += each_class\n class_directory += \"_test.jpg\"\n test_image = keras.preprocessing.image.load_img(class_directory)\n image_set = keras.preprocessing.image.img_to_array(test_image)\n image_set = image_set.reshape((1,) + image_set.shape)\n i = 0\n for each_batch in self.transformed_data_generator.flow(image_set, batch_size=1,\n save_to_dir=\"data/test_transformed/test_folder\", save_prefix=each_class, save_format=\"jpeg\"):\n i += 1\n if i > 20:\n break", "def GenerateImageSamples(self):\n self.generateImageSamples = GenerateImageSamples(self.trainDataDir,\n self.testDataDir,\n self.trainClassDir,\n self.testClassDir,\n self.cfgData)\n\n self.generateImageSamples.LoadDataSave(self.trainDataDir, 'train')\n # self.generateImageSamples.CopyFiles(self.trainClassDir)\n\n self.generateImageSamples.LoadDataSave(self.testDataDir, 'test')\n # self.generateImageSamples.CopyFiles(self.testClassDir)", "def Dev_Image_data_generator(folderlist,resize = (920,1200),Transformation = True, scaling = True, batch_size = 16):\n\n while True:\n total_classes = len(folderlist.keys())\n keys = folderlist.keys()\n Images = []\n Image_label = []\n for key in folderlist.keys():\n img_label = random.choice(folderlist[key])\n img = Image.open(img_label,'r')\n h = resize[1]\n l = int(img.size[1]*h/img.size[0])\n img = img.resize((h,l), Image.ANTIALIAS)\n background = Image.new('RGB', (resize[1], resize[0]), (255, 255, 255))\n img_w, img_h = img.size\n bg_w, bg_h = background.size\n offset = (int((bg_w - img_w) / 2), int((bg_h - img_h) / 2))\n background.paste(img, offset)\n background = np.asarray(background)\n if Transformation == True:\n rotation = rotate(background,random.choice(range(360)))\n translate = translate_xy(background,random.choice(range(resize[0]/4)),random.choice(range(resize[1]/4)))\n flip = cv2.flip(rotation,1)\n Y = np.concatenate((rotation[np.newaxis,:,:,:],flip[np.newaxis,:,:,:],translate[np.newaxis,:,:,:]))\n Images.append(Y)\n Images.append(background[np.newaxis,:,:,:])\n Image_label.append([key for i in range(4)])\n else:\n Images.append(background[np.newaxis,:,:,:])\n Image_label.append([key])\n Image_label = np.concatenate(Image_label)\n Images = np.concatenate(Images)\n Image_label = np.array(pd.get_dummies(Image_label))\n X_Image , Y_Image = shuffle(Images,Image_label,random_state=0)\n if scaling == True:\n X_Image = X_Image/255\n else:\n X_Image = X_Image\n batches = int(len(X_Image)/batch_size)\n for batch in range(batches):\n x = X_Image[batch*batch_size:(batch+1)*batch_size,:,:,:]\n y = Y_Image[batch*batch_size:(batch+1)*batch_size]\n yield((x,y))", "def __download(self):\n\n if self.__check_exists():\n return\n\n print(\"Downloading AudioMNIST dataset\")\n\n # download files\n try:\n os.makedirs(self.__path)\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass\n else:\n raise\n\n if not os.path.exists(os.path.join(self.__path, 'AudioMNIST-master.zip')):\n url = 'https://github.com/soerenab/AudioMNIST/archive/master.zip'\n wget_data = wget.download(url, out=self.__path)\n\n archive = zipfile.ZipFile(wget_data)\n\n for file in archive.namelist():\n if file.startswith('AudioMNIST-master/data/'):\n archive.extract(file, self.__path)\n\n print(\"Download successful\")\n\n audio_mnist_src = os.path.join(self.__path, 'AudioMNIST-master/data/')\n data = np.array(glob.glob(os.path.join(audio_mnist_src, \"**/*.wav\")))\n\n train_images = []\n train_labels = []\n test_images = []\n test_labels = []\n\n # first 5-cross-validation set from https://github.com/soerenab/AudioMNIST/blob/master/preprocess_data.py\n train_folders = [28, 56, 7, 19, 35, 1, 6, 16, 23, 34, 46, 53, 36, 57, 9, 24, 37, 2,\n 8, 17, 29, 39, 48, 54, 43, 58, 14, 25, 38, 3, 10, 20, 30, 40, 49, 55,\n 12, 47, 59, 15, 27, 41, 4, 11, 21, 31, 44, 50]\n test_folders = [26, 52, 60, 18, 32, 42, 5, 13, 22, 33, 45, 51]\n\n print(\"Converting audio to images\")\n # create train and test folders and save audios as images\n for filepath in tqdm(data):\n # the last one is just a counter for repeat of each digit, e.g. say zero once, twice, third time..\n\n dig, vp, rep = filepath.rstrip(\".wav\").split(\"/\")[-1].split(\"_\")\n\n # according to https://github.com/soerenab/AudioMNIST/blob/master/preprocess_data.py\n fs, data = wavf.read(filepath)\n\n # resample\n data = librosa.core.resample(y=data.astype(np.float32), orig_sr=fs, target_sr=8000, res_type=\"scipy\")\n # zero padding\n if len(data) > 8000:\n raise ValueError(\"data length cannot exceed padding length.\")\n elif len(data) < 8000:\n embedded_data = np.zeros(8000)\n offset = np.random.randint(low=0, high=8000 - len(data))\n embedded_data[offset:offset + len(data)] = data\n elif len(data) == 8000:\n # nothing to do here\n embedded_data = data\n pass\n\n # 1. fourier transform\n # stft, with selected parameters, spectrogram will have shape (228, 230)\n f, t, zxx = scipy.signal.stft(embedded_data, 8000, nperseg=455, noverlap=420, window='hann')\n # get amplitude\n zxx = np.abs(zxx[0:227, 2:-1])\n\n # if not 2, then convert to decibel\n zxx = librosa.amplitude_to_db(zxx, ref=np.max)\n\n # normalize from range -80,0 to 0,1\n zxx = (zxx - zxx.min()) / (zxx.max() - zxx.min())\n\n zxx = zxx[::-1] # reverse the order of frequencies to fit the images in the paper\n zxx = np.atleast_3d(zxx).transpose(2, 0, 1) # reshape to (1, img_dim_h, img_dim_w)\n\n # decide to which list to add (train or test)\n if int(vp) in train_folders:\n train_images.append(zxx)\n train_labels.append(int(dig))\n elif int(vp) in test_folders:\n test_images.append(zxx)\n test_labels.append(int(dig))\n else:\n raise Exception('Person neither in train nor in test set!')\n\n train_images = torch.Tensor(train_images).float()\n train_labels = torch.Tensor(train_labels).long()\n test_images = torch.Tensor(test_images).float()\n test_labels = torch.Tensor(test_labels).long()\n\n torch.save(train_images, os.path.join(self.__path, 'train_images_tensor.pt'))\n torch.save(train_labels, os.path.join(self.__path, 'train_labels_tensor.pt'))\n torch.save(test_images, os.path.join(self.__path, 'test_images_tensor.pt'))\n torch.save(test_labels, os.path.join(self.__path, 'test_labels_tensor.pt'))\n\n print('Done!')", "def make_datasets(class_names, dataset_dict, path_source, path_dest, seed):\n \n create_directory_structure(path_dest)\n\n path_alldata = [path_source.joinpath(f'label_{class_}')\n for class_ in class_names]\n\n path_imagefiles = [class_path.glob('*.bin')\n for class_path in path_alldata]\n\n size = sum([v for k, v in dataset_dict.items()])\n rng = default_rng(seed)\n\n datasets_by_class = np.array([rng.choice([image_file.name\n for image_file in image_filelist],\n size=size, replace=False)\n for image_filelist in path_imagefiles])\n\n dataset_labels = np.array([np.full(size, class_)\n for class_ in class_names])\n\n if not path_dest.exists():\n path_dest.mkdir(parents=True)\n\n start=0\n for set_name, n_examples in dataset_dict.items():\n stop = start + n_examples\n\n filename = f'{set_name}_set.csv'\n path_file = path_dest.joinpath(filename)\n \n images = datasets_by_class[:,start:stop].flatten()\n labels = dataset_labels[:,start:stop].flatten()\n rows = np.transpose(np.vstack((images, labels))).tolist()\n\n with path_file.open(mode='w', newline='') as f:\n csv_writer = writer(f)\n csv_writer.writerows(rows)\n\n start = n_examples", "def decompress_data(src, dst):\n assert os.path.exists(src), \"{} does not exist. Please download the \\\n entire repository and keep it as it originally is\".format(src)\n\n # create folder layout at the destination folder\n subset_list = [\"train\", \"val\"]\n _create_layout(dst, subset_list)\n\n # extract data\n for subset in subset_list:\n subset_img_src = os.path.join(src, \"images\", subset + \".zip\")\n subset_img_dst = os.path.join(dst, \"images\", subset)\n _extract_multi_vol_zip(subset_img_src, subset_img_dst)\n _extract_all_gz_in_dir(subset_img_dst)\n\n subset_lbl_src = os.path.join(src, \"labels\", subset + \".zip\")\n subset_lbl_dst = os.path.join(dst, \"labels\", subset)\n _extract_zip(subset_lbl_src, subset_lbl_dst)\n _extract_all_gz_in_dir(subset_lbl_dst)\n\n print(\"Finished decompressing {}.\".format(subset))", "def build_dataset(\n is_train, \n data_dir: str,\n image_size: int = 224,\n color_jitter: float = 0.4, \n aa: str = \"rand-m9-mstd0.5-inc1\",\n train_interpolation: str = \"bicubic\",\n reprob: float = 0.25, \n remode: str = \"pixel\", \n recount: int = 1\n):\n transforms = build_transform(\n is_train, \n image_size, \n color_jitter, \n aa, \n train_interpolation, \n reprob, \n remode, \n recount\n )\n\n root = os.path.join(data_dir, 'train' if is_train else 'val')\n dataset = datasets.ImageFolder(root, transform=transforms)\n nb_classes = 1000\n\n return dataset, nb_classes", "def create_dataset(data_path, batch_size=32, num_parallel_workers=1):\n # Define dataset\n mnist_ds = ds.MnistDataset(data_path)\n\n resize_height, resize_width = 32, 32\n rescale = 1.0 / 255.0\n shift = 0.0\n rescale_nml = 1 / 0.3081\n shift_nml = -1 * 0.1307 / 0.3081\n\n # Define map operations\n resize_op = vision.Resize((resize_height, resize_width), interpolation=Inter.LINEAR)\n rescale_nml_op = vision.Rescale(rescale_nml, shift_nml)\n rescale_op = vision.Rescale(rescale, shift)\n hwc2chw_op = vision.HWC2CHW()\n type_cast_op = transforms.TypeCast(mstype.int32)\n\n # Apply map operations on images\n mnist_ds = mnist_ds.map(operations=type_cast_op, input_columns=\"label\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=resize_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=rescale_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=rescale_nml_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=hwc2chw_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True)\n\n return mnist_ds", "def read_classified_data(root_path, to_size = (200,200), transformation = transforms.ToTensor()):\n label_dict = {}\n # for each folder in the dataset\n # get the label\n for i, label in tqdm(enumerate(sorted(os.listdir(root_path))), desc = \"Read in...\", leave = False):\n if len(os.listdir(sub_path)) == 0:\n continue\n sub_path = os.path.join(root_path, label)\n # write the label in the label dict\n label_dict[i] = label\n # find the csv, there should be one and only one csv\n csv_path = glob.glob(os.path.join(sub_path,\"*.csv\"))[0]\n df = pd.read_csv(csv_path)\n # the csv should have a image_name list indicating the 1-1 correspondense\n image_origin = df[\"image_name\"]\n # get the rest and the features\n df.drop(labels = \"image_name\", axis = \"columns\", inplace = True)\n # concate them to our dataset\n if i == 0:\n features = torch.from_numpy(df.to_numpy())\n images = torch.stack([preprocess(Image.open(os.path.join(sub_path, i)).convert(\"RGB\"),\n to_size = to_size,\n transformation = transformation) for i in image_origin])\n labels = torch.ones(image_origin.shape[0])*label\n else:\n features = torch.cat((features,torch.from_numpy(df.to_numpy())))\n images = torch.cat(images,torch.stack([preprocess(Image.open(os.path.join(sub_path, i)).convert(\"RGB\"),\n to_size = to_size,\n transformation = transformation) for i in image_origin]))\n labels = torch.cat(labels, torch.ones(image_origin.shape[0])*label)\n # return the dataset with our label_dict\n return TensorDataset(images,features, labels),label_dict", "def generate_nmnist_continuous_dataset(initial_size, input_dir):\n image_dataset = np.rec.array(None, dtype=[('height', np.uint16),\n ('width', np.uint16),\n ('image_data', 'object'),\n ('label', np.uint32)],\n shape=initial_size)\n num_images = 0\n\n # loop through each folder within the test directories\n for i in range(0, 10):\n current_dir = input_dir + os.path.sep + str(i) + os.path.sep + '*.bin'\n print('Processing {}...'.format(current_dir))\n for filename in glob.iglob(current_dir):\n image = prepare_n_mnist_continuous(filename, False, False)\n if num_images + 1 >= image_dataset.size:\n image_dataset = np.resize(image_dataset, (num_images * 2))\n add_images_to_dataset(image_dataset, image, num_images, i, 28, 28)\n num_images += 1\n\n return image_dataset[0:num_images]", "def split_folder(data_dir, train_pct, val_pct):\n\n random.seed(1)\n\n IMG_SUFFIX = '*_sat.jpg'\n MASK_SUFFIX = '*_msk.png'\n\n glob_imgs = os.path.join(data_dir,IMG_SUFFIX)\n glob_masks = os.path.join(data_dir, MASK_SUFFIX)\n\n img_paths = np.array(sorted(glob.glob(glob_imgs)))\n mask_paths = np.array(sorted(glob.glob(glob_masks)))\n \n num_imgs = len(img_paths)\n index_lst = list(range(num_imgs))\n\n random.shuffle(index_lst)\n\n train_idx_bound = int(train_pct * num_imgs)\n train_imgs = img_paths[index_lst[:train_idx_bound]]\n train_masks = mask_paths[index_lst[:train_idx_bound]]\n\n val_idx_bound = int((train_pct + val_pct) * num_imgs)\n val_imgs = img_paths[index_lst[train_idx_bound: val_idx_bound]]\n val_masks = mask_paths[index_lst[train_idx_bound: val_idx_bound]]\n\n test_imgs = img_paths[index_lst[val_idx_bound:]]\n test_masks = mask_paths[index_lst[val_idx_bound:]]\n\n # Write the lists to their own directories\n copy_list_to_dir(train_imgs, \"train\")\n print(\"Moved images into: train\")\n copy_list_to_dir(train_masks, \"train\")\n print(\"Moved masks into: train\")\n copy_list_to_dir(val_imgs, \"val\")\n print(\"Moved images into: val\")\n copy_list_to_dir(val_masks, \"val\")\n print(\"Moved masks into: val\")\n copy_list_to_dir(test_imgs, \"test\")\n print(\"Moved images into: test\")\n copy_list_to_dir(test_masks, \"test\")\n print(\"Moved masks into: test\")", "def make_data(sess, data, data_dir):\n if FLAGS.is_train:\n #savepath = os.path.join(os.getcwd(), os.path.join('checkpoint',data_dir,'train.h5'))\n savepath = os.path.join('.', os.path.join('checkpoint',data_dir,'train.h5'))\n if not os.path.exists(os.path.join('.',os.path.join('checkpoint',data_dir))):\n os.makedirs(os.path.join('.',os.path.join('checkpoint',data_dir)))\n with h5py.File(savepath, 'w') as hf:\n hf.create_dataset('data', data=data)", "def generateDataset(self):\n if self.outdir[-1] != \"/\": \n self.outdir += \"/\"\n self.outdir += \"dataset_trackml\"\n i = 1\n while os.path.exists(self.outdir):\n self.outdir.replace(\"_\"+str(i-1), \"\")\n self.outdir += (\"_\"+str(i))\n i += 1\n cmd = \"mkdir -p \"+ self.outdir\n os.system(cmd)\n\n cont = pc.particleController()\n cont.generateEvents(self.numevents, self.hpe, self.detectors)\n\n self.generateHits(cont)\n self.generateTruths(cont)\n self.generateSolution(cont)", "def create_data_folders() -> None:\n if not os.path.exists(\"data/save\"):\n os.mkdir(\"./data\")\n os.mkdir(\"./data/save\")\n if not os.path.exists(\"data/critics\"):\n os.mkdir(\"./data/critics\")\n if not os.path.exists('data/policies/'):\n os.mkdir('data/policies/')\n if not os.path.exists('data/results/'):\n os.mkdir('data/results/')", "def __init__(self, data_folder: str = os.path.join('data', 'user_images'),\n dataset_file: str = os.path.join('data', 'dataset.pkl'),\n targets: str = os.path.join('data', 'dataset.pkl')):\n # check for existing dataset\n if not os.path.exists(dataset_file):\n create_dataset(data_folder, dataset_file)\n with open(dataset_file, 'rb') as f:\n data = pickle.load(f)\n print(f'loaded dataset from {dataset_file}')\n self.images = data['images']\n self.crop_sizes = data['crop_sizes']\n self.crop_centers = data['crop_centers']", "def create_test_folder(df_test, target_path):\n folder_path = os.path.join(target_path, 'xray_preprocess/test')\n print(f'Create test set at: {folder_path}')\n for _, row in tqdm(df_test.iterrows(), total=df_test.shape[0]):\n if row['class']=='negative':\n destination_path = os.path.join(folder_path, 'negative')\n elif row['class']=='positive':\n destination_path = os.path.join(folder_path, 'positive')\n if not os.path.exists(destination_path):\n os.makedirs(destination_path) \n img = os.path.join(target_path, 'xray', 'test', row['filename'])\n shutil.copy(img, destination_path )", "def load_datasets():\n from .dataset import num_classes, image_size\n\n train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)\n test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)\n\n train_folders = maybe_extract(train_filename)\n test_folders = maybe_extract(test_filename)\n if not (len(train_folders) == len(test_folders) == num_classes):\n raise Exception('Expected %d folders, one per class. Found %d and %d instead.' % (\n num_classes, len(train_folders), len(test_folders)))\n print(\"Dataset folders: %s, %s\" % (train_folders, test_folders))\n\n # load datasets\n train_datasets = maybe_pickle(train_folders, 45000, image_size)\n test_datasets = maybe_pickle(test_folders, 1800, image_size)\n\n return train_datasets, test_datasets", "def compress_skim_dir(directory, output=\"zarr\"):\n\n if output not in (\"zarr\", \"zarr.zip\"):\n raise NotImplementedError(output)\n\n if output == \"zarr\":\n if not os.path.exists(directory+\".zarr\"):\n os.makedirs(directory+\".zarr\")\n elif output == \"zarr.zip\":\n if os.path.exists(directory+\".zarr.zip\"):\n raise FileExistsError(directory+\".zarr.zip\")\n\n master = {}\n for f in os.walk(directory):\n for fi in f[2]:\n if \".emx\" in fi:\n arr = np.fromfile(fi, dtype='f4')\n side = int(np.sqrt(arr.size))\n arr = arr.reshape(side, side)\n tazrange = pd.RangeIndex(1, side+1)\n master[fi.replace(\".emx\", \"\")] = xr.DataArray(\n arr,\n dims=['otaz', 'dtaz'],\n coords={'otaz': tazrange, 'dtaz': tazrange}\n )\n\n master = sh.Dataset(master)\n\n if output == \"zarr\":\n master.to_zarr(directory+\".zarr\", mode='a')\n elif output == \"zarr.zip\":\n with zarr.ZipStore(directory+\".zarr.zip\", mode='w') as store:\n master.to_zarr(store)\n return master", "def get_training_data(data_dir):\n data = []\n for label in labels:\n path = os.path.join(data_dir, label)\n class_num = labels.index(label)\n img_set = os.listdir(path)\n n = len(img_set)\n for i in range(n):\n try:\n img = img_set[i]\n img_arr = cv2.imread(os.path.join(path, img))\n resized_arr = cv2.resize(img_arr, (img_size, img_size)) # Reshaping images to preferred size\n data.append([resized_arr, class_num])\n if i % 100 == 0:\n print(\"Processing images: {}/{}\".format(i + 1, n))\n except Exception as e:\n print(e)\n return np.array(data)", "def pickle_dump_files():\n with open('data/' + dataset_name + '_' + model_name + '_' + 'predictions', 'wb') as f:\n pickle.dump(predictions, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'state_sentences', 'wb') as f:\n pickle.dump(final_state_sentences, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'decoded_sentences', 'wb') as f:\n pickle.dump(final_decoded_sentences, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'ids', 'wb') as f:\n pickle.dump(idx, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'exemplars', 'wb') as f:\n pickle.dump(exemplars, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'counter_exemplars', 'wb') as f:\n pickle.dump(counter_exemplars, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'top_exemplar_words', 'wb') as f:\n pickle.dump(top_exemplar_words, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'top_counter_exemplar_words', 'wb') as f:\n pickle.dump(top_counter_exemplar_words, f)", "def generate_test_environment(tmpdir, dataset):\n\n # Overwrite settings with test settings\n generate_test_settings(tmpdir, dataset)\n\n # Generate the archive files\n for usage in ['train', 'test']:\n \n for dstype in ['images', 'labels']:\n \n dataset_type = usage + '.' + dstype\n \n mnist_dataset = 'datasets.mnist.' + dataset_type\n filepath = get_setting(mnist_dataset)\n\n test_dataset = dataset + '.' + dataset_type\n generate_test_dataset_archive(filepath, test_dataset)", "def load_data(self):\n # make sure preprocessing is same as preprocessing as the network\n # reduce mean, and divide by a value to do scaling\n self.train_datagen = ImageDataGenerator(\n rescale=1./ 255,\n shear_range=0.05,\n rotation_range=20, # randomly rotate images in the range (degrees, 0 to 180)\n zoom_range=[0.9, 1.1], # Randomly zoom image\n width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)\n height_shift_range=0.1, # randomly shift images vertically (fraction of total height)\n horizontal_flip=True, # randomly flip images\n brightness_range=[0.8, 1.2],\n fill_mode='reflect',\n validation_split=0.2)\n\n self.test_datagen = ImageDataGenerator(rescale=1. / 255)\n\n self.train_generator = self.train_datagen.flow_from_directory(\n self.train_dir,\n target_size=(224, 224),\n shuffle=True,\n batch_size=self.batchsize,\n class_mode='categorical',\n subset=\"training\")\n\n self.validation_generator = self.train_datagen.flow_from_directory(\n self.train_dir,\n target_size=(224, 224),\n shuffle=True,\n batch_size=self.batchsize,\n class_mode='categorical',\n subset=\"validation\")\n\n self.test_generator = self.test_datagen.flow_from_directory(\n self.test_dir,\n target_size=(224, 224),\n shuffle=False,\n batch_size=1,\n class_mode='categorical')", "def get_dataset(root_folder):\n # Parameters for data loader \n RESIZE_HEIGHT = 100 \n RESIZE_WIDTH = 100 \n\n cal_transform = transforms.Compose([\n transforms.Resize((RESIZE_HEIGHT,RESIZE_WIDTH)),\n transforms.ToTensor(),\n transforms.Lambda(lambda x: x.flatten())\n ])\n\n root_folder = \"../101_ObjectCategories/\"\n caltech101 = ImageFolder(root=root_folder, transform=cal_transform)\n\n return caltech101", "def create_and_write_output(predictions_path,output_path,inpDir):\n \n filenames= sorted(os.listdir(predictions_path)) \n for filename in filenames:\n \n # read the 3 channel output image from the neural network\n image=cv2.imread(os.path.join(predictions_path,filename))\n \n # create binary image output using the create_binary function\n out_image=create_binary(image) \n \n # read and store the metadata from the input image\n with BioReader(os.path.join(inpDir,filename)) as br:\n metadata = br.metadata\n\n # Write the binary output consisting of the metadata using bfio.\n output_image_5channel=np.zeros((out_image.shape[0],out_image.shape[1],1,1,1),dtype=np.uint8)\n output_image_5channel[:,:,0,0,0]=out_image \n\n with BioWriter(os.path.join(output_path,filename), metadata=metadata) as bw:\n bw.dtype = output_image_5channel.dtype\n bw.write(output_image_5channel)", "def main(args):\n\n for dir in args.dirs:\n # prepdir = mdssprep.Directory(dir,exclude=['file_*3*','file_2??'],include=['file_*5*'],maxarchivesize=mdssprep.one_meg*200.,minsize=mdssprep.one_meg*100.)\n prepdir = mdssprep.Directory(dir)\n prepdir.archive(dryrun=False)", "def generate_test_data(root: str) -> str:\n size = (64, 64)\n folder_path = os.path.join(root, \"enviroatlas_lotp\")\n\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\n for prefix in tile_list:\n for suffix, data_profile in layer_data_profiles.items():\n img_path = os.path.join(folder_path, f\"{prefix}_{suffix}.tif\")\n img_dir = os.path.dirname(img_path)\n if not os.path.exists(img_dir):\n os.makedirs(img_dir)\n\n data_profile[\"profile\"][\"height\"] = size[0]\n data_profile[\"profile\"][\"width\"] = size[1]\n data_profile[\"profile\"][\"transform\"] = Affine(\n 1.0, 0.0, 608170.0, 0.0, -1.0, 3381430.0\n )\n\n write_data(\n img_path,\n data_profile[\"profile\"],\n data_profile[\"data_type\"],\n data_profile[\"vals\"],\n )\n\n # build the spatial index\n schema = {\n \"geometry\": \"Polygon\",\n \"properties\": {\n \"split\": \"str\",\n \"naip\": \"str\",\n \"nlcd\": \"str\",\n \"roads\": \"str\",\n \"water\": \"str\",\n \"waterways\": \"str\",\n \"waterbodies\": \"str\",\n \"buildings\": \"str\",\n \"lc\": \"str\",\n \"prior_no_osm_no_buildings\": \"str\",\n \"prior\": \"str\",\n },\n }\n with fiona.open(\n os.path.join(folder_path, \"spatial_index.geojson\"),\n \"w\",\n driver=\"GeoJSON\",\n crs=\"EPSG:3857\",\n schema=schema,\n ) as dst:\n for prefix in tile_list:\n img_path = os.path.join(folder_path, f\"{prefix}_a_naip.tif\")\n with rasterio.open(img_path) as f:\n geom = shapely.geometry.mapping(shapely.geometry.box(*f.bounds))\n geom = fiona.transform.transform_geom(\n f.crs.to_string(), \"EPSG:3857\", geom\n )\n\n row = {\n \"geometry\": geom,\n \"properties\": {\n \"split\": prefix.split(\"/\")[0].replace(\"_tiles-debuffered\", \"\")\n },\n }\n for suffix, data_profile in layer_data_profiles.items():\n key = suffix_to_key_map[suffix]\n row[\"properties\"][key] = f\"{prefix}_{suffix}.tif\"\n dst.write(row)\n\n # Create archive\n archive_path = os.path.join(root, \"enviroatlas_lotp\")\n shutil.make_archive(archive_path, \"zip\", root_dir=root, base_dir=\"enviroatlas_lotp\")\n shutil.rmtree(folder_path)\n md5: str = calculate_md5(archive_path + \".zip\")\n return md5", "def dataset_convertor(dataset_directory, outfolder_random, outfolder_art):\n print(\"converting dataset...\")\n directories = next(os.walk(dataset_directory))[1]\n for directory in directories:\n for i, file_name in enumerate(next(os.walk(dataset_directory + \"/\" + directory))[2]):\n image_splitter(Image.open(dataset_directory + \"/\" + directory + \"/\" + file_name, \"r\"), file_name,\n outfolder_random, outfolder_art, directory)\n print(\"converted\", file_name, \"successfully.\")", "def populate_train_test_val_dirs_nonrandomly(root_dir, val_ratio=0.15, test_ratio=0.05, preliminary_clahe=True,\n apply_masks=True):\n\n ''' Creating partitions of the data after shuffling '''\n # Folder to copy images from\n src = join(root_dir, 'CoregisteredBlurryImages')\n\n all_file_names = [f for f in os.listdir(src) if isfile(join(src, f))]\n\n if val_ratio == 0.0:\n # Select the number of images to skip between validation images\n val_skip_number = len(all_file_names) + 1\n else:\n # Select the number of images to skip between validation images\n val_skip_number = len(all_file_names) / (val_ratio * len(all_file_names))\n\n if test_ratio == 0.0:\n # Select the number of images to skip between test images\n test_skip_number = len(all_file_names) + 1\n else:\n # Select the number of images to skip between test images\n test_skip_number = len(all_file_names) / (test_ratio * len(all_file_names))\n\n # Get the list of validation file names, test file names, and train file names\n val_file_names = all_file_names[::int(val_skip_number)]\n test_file_names = [filename for filename in all_file_names[::int(test_skip_number + 1)]\n if filename not in val_file_names]\n train_file_names = [filename for filename in all_file_names\n if filename not in val_file_names and filename not in test_file_names]\n\n # Print the file distribution among the folders\n logger.print_file_distribution(len(all_file_names), len(train_file_names), len(val_file_names),\n len(test_file_names))\n\n # Copy-Pasting images into train dataset\n for name in train_file_names:\n shutil.copy(join(root_dir, 'CoregisteredBlurryImages', name), root_dir + '/train/CoregisteredBlurryImages')\n shutil.copy(join(root_dir, 'ClearImages', name), root_dir + '/train/ClearImages')\n if apply_masks:\n shutil.copy(join(root_dir, 'Masks', name), root_dir + '/train/Masks')\n\n # Copy-Pasting images into val dataset\n for name in val_file_names:\n shutil.copy(join(root_dir, 'CoregisteredBlurryImages', name), root_dir + '/val/CoregisteredBlurryImages')\n shutil.copy(join(root_dir, 'ClearImages', name), root_dir + '/val/ClearImages')\n if apply_masks:\n shutil.copy(join(root_dir, 'Masks', name), root_dir + '/val/Masks')\n\n # Copy-Pasting images into test dataset\n for name in test_file_names:\n shutil.copy(join(root_dir, 'CoregisteredBlurryImages', name), root_dir + '/test/CoregisteredBlurryImages')\n shutil.copy(join(root_dir, 'ClearImages', name), root_dir + '/test/ClearImages')\n if apply_masks:\n shutil.copy(join(root_dir, 'Masks', name), root_dir + '/test/Masks')\n\n ''' Augment the images in each new folder '''\n # If we want to use preliminary adaptive equalization...\n if preliminary_clahe:\n pass\n # ... then first, apply Contrast Limited Adaptive Histogram Equalization to clear images in all folders\n CLAHE_image_folder(root_dir + '/train/ClearImages')\n CLAHE_image_folder(root_dir + '/val/ClearImages')\n CLAHE_image_folder(root_dir + '/test/ClearImages')\n\n # Then, apply histogram equalization to make the blurry images' histogram match that of the clear images\n hist_match_image_folder(root_dir=join(root_dir, 'train'),\n clear_dir_name='ClearImages',\n blurry_dir_name='CoregisteredBlurryImages',\n match_to_clear=True)\n hist_match_image_folder(root_dir=join(root_dir, 'val'),\n clear_dir_name='ClearImages',\n blurry_dir_name='CoregisteredBlurryImages',\n match_to_clear=True)\n hist_match_image_folder(root_dir=join(root_dir, 'test'),\n clear_dir_name='ClearImages',\n blurry_dir_name='CoregisteredBlurryImages',\n match_to_clear=True)", "def get_files(self):\n train_images = glob(os.path.join(self.images_dir, '*%s' % self.im_extension)) \n train_labels = [x.replace(self.im_extension, '.npy').replace('images', 'groundTruth') for x in train_images]\n val_images = glob(os.path.join(self.val_images_dir, '*%s' % self.im_extension))\n val_labels = [x.replace(self.im_extension, '.npy').replace('images', 'groundTruth') for x in val_images]\n train_images = np.array(train_images)\n train_labels = np.array(train_labels)\n val_images = np.array(val_images)\n val_labels = np.array(val_labels)\n test_images = np.array(\n glob('/media/data_cifs/pytorch_projects/datasets/BSDS500_crops/data/images/test_nocrop/*.jpg'))\n test_labels = np.array(\n [x.replace('images', 'groundTruth').replace('.jpg', '.npy') for x in test_images])\n test_labels = np.array(\n [np.load(x) for x in test_labels])\n keep_idx = np.array([True if x.shape[0] > x.shape[1] else False for x in test_labels])\n test_images = test_images[keep_idx]\n test_labels = test_labels[keep_idx]\n test_images = np.stack([misc.imread(x) for x in test_images], 0)\n test_labels = np.stack(test_labels, 0)\n test_labels = test_labels[..., None]\n\n # Add constant padding to bottom/right\n if self.pad:\n test_images = util.pad(test_images, ((0, 0), (self.pad // 2, self.pad - self.pad // 2), (self.pad // 2, self.pad - self.pad // 2), (0, 0)), mode='linear_ramp')\n test_labels = util.pad(test_labels, ((0, 0), (self.pad // 2, self.pad - self.pad // 2), (self.pad // 2, self.pad - self.pad // 2), (0, 0)), mode='constant', constant_values=0)\n\n # Select images for training\n sort_idx = np.argsort(train_images)\n train_images = train_images[sort_idx[:self.train_size]]\n train_labels = train_labels[sort_idx[:self.train_size]]\n\n # Build CV dict\n cv_files, cv_labels = {}, {}\n cv_files[self.folds['train']] = train_images\n cv_files[self.folds['val']] = val_images\n cv_files[self.folds['test']] = test_images\n cv_labels[self.folds['train']] = train_labels\n cv_labels[self.folds['val']] = val_labels\n cv_labels[self.folds['test']] = test_labels\n return cv_files, cv_labels", "def package_datasets(ds_all, dirname=''):\n ds_all = copy.deepcopy(ds_all)\n assert dirname != '', \"dirname required\"\n package_dataset(ds_all['ds_train_um'], dirname=join('.', dirname, 'train'))\n package_dataset(ds_all['ds_valid_um'], dirname=join('.', dirname, 'valid'))\n package_dataset(ds_all['ds_test_um'], dirname=join('.', dirname, 'test'))", "def prepare_nfold_datasets(self): # i.e. split into different train/ground-truth(test) dataset\n for alpha in range(1, self.ALPHAs+1):\n if alpha != self.ALPHAs:\n gt_years = np.array2string(self.tl_model.years[(alpha-1)*self.PSI : alpha*self.PSI], separator='-')\n else:\n gt_years = np.array2string(self.tl_model.years[(alpha-1)*self.PSI : alpha*self.PSI+self.runoff_years], separator='-')\n new_cluster_dir = str(Path(self.tl_model.cluster_dir) / f'alpha_{alpha}_GT-{gt_years}')\n os.makedirs(new_cluster_dir, exist_ok=True)\n\n new_prepared_data_dir = str(Path(self.tl_model.prepared_data_dir) / f'alpha_{alpha}')\n os.makedirs(new_prepared_data_dir, exist_ok=True)\n \n if utils.find(f'*alpha_{alpha}_preprocessed.pkl', new_prepared_data_dir) and utils.find(f'*alpha_{alpha}_standardized_stacked_arr.pkl', new_prepared_data_dir):\n pass\n else:\n if not utils.find(f'*target*alpha_{alpha}_preprocessed.pkl', new_prepared_data_dir):\n print(f\"=> No input datasets pre-processed for alpha of {alpha}\")\n prepare.cut_target_dataset(self, alpha, new_prepared_data_dir)\n\n if not utils.find(f'*rf*alpha_{alpha}_preprocessed.pkl', new_prepared_data_dir):\n print(f\"=> No rainfall datasets pre-processed for alpha of {alpha}\")\n prepare.cut_rf_dataset(self, alpha, new_prepared_data_dir)\n \n print(f'Preprocessed pickles for alpha split {alpha} can be found @:\\n{new_prepared_data_dir}')", "def splitMerge(self):\n\t\tpath_merge = self.aug_merge_path\n\t\tpath_train = self.aug_train_path\n\t\tpath_label = self.aug_label_path\n\t\tfor i in range(self.slices):\n\t\t\tpath = path_merge + \"/\" + str(i)\n\t\t\ttrain_imgs = glob.glob(path+\"/*.\"+self.img_type)\n\t\t\tsavedir = path_train + \"/\" + str(i)\n\t\t\tif not os.path.lexists(savedir):\n\t\t\t\tos.mkdir(savedir)\n\t\t\tsavedir = path_label + \"/\" + str(i)\n\t\t\tif not os.path.lexists(savedir):\n\t\t\t\tos.mkdir(savedir)", "def test_train_split(folder_name):\n\n class_folders = glob.glob(os.path.join(folder_name, '*'))\n\n class_names = [i.split('/')[-1] for i in class_folders]\n\n print(class_folders)\n\n train_folder_path = os.path.join(folder_name, 'train_dir')\n validation_folder_path = os.path.join(folder_name, 'val_dir')\n\n if not os.path.exists(train_folder_path):\n os.makedirs(train_folder_path)\n if not os.path.exists(validation_folder_path):\n os.makedirs(validation_folder_path)\n\n # Create the folder structure\n class_folders_train = []\n class_folders_val = []\n for class_name in class_names:\n # Create calss folder in the training directory\n class_folders_train.append(os.path.join(train_folder_path, class_name))\n if not os.path.exists(class_folders_train[-1]):\n os.makedirs(class_folders_train[-1])\n # Create class folder in the validation_directory\n class_folders_val.append(os.path.join(\n validation_folder_path, class_name))\n if not os.path.exists(class_folders_val[-1]):\n os.makedirs(class_folders_val[-1])\n\n class_files = []\n\n for idx, class_folder in enumerate(class_folders):\n class_files = glob.glob(os.path.join(class_folder, '*.jpg'))\n for file in class_files[:int(len(class_files) * 0.7)]:\n copyfile(file, os.path.join(\n class_folders_train[idx], file.split('/')[-1]))\n for file in class_files[int(len(class_files) * 0.7):]:\n print(file)\n print(os.path.join(class_folders_val[idx], file.split('/')[-1]))\n copyfile(file, os.path.join(\n class_folders_val[idx], file.split('/')[-1]))", "def preprocess_dir(data_path,\n output_path,\n dataset,\n n_train,\n new_size,\n ):\n img_type_dict = get_class_labels()\n\n print('Preprocessing:', dataset)\n target_data_path = data_path\n disease_dirs = os.listdir(target_data_path)\n disease_dirs = [d for d in disease_dirs if\n os.path.isdir(os.path.join(target_data_path, d))]\n img_stack, target_list = [], []\n img_names = []\n for img_type in disease_dirs:\n class_lbl = img_type_dict[img_type]\n n_class = int(n_train / len(disease_dirs))\n print('\\t', img_type)\n img_files_path = os.path.join(target_data_path, img_type)\n if not (os.path.isdir(img_files_path)):\n continue\n img_files = os.listdir(img_files_path)\n img_files = [f for f in img_files if f.endswith('.jpeg')]\n if dataset == 'train':\n img_files = img_files[0:n_class]\n for img_fname in img_files:\n img_path = os.path.join(img_files_path, img_fname)\n img_arr = np.array(Image.open(img_path))\n img_arr = skimage.transform.resize(img_arr, new_size)\n img_arr = (img_arr - img_arr.min()) / img_arr.max()\n img_stack.append(img_arr)\n target_list.append(class_lbl)\n img_names += [n.split('.')[0] for n in img_files]\n # Save preprocessed data\n save_data(output_path, img_stack, target_list,\n new_size, dataset, n_train, img_names)", "def label_training_data(input_path, output_path):\r\n import shutil\r\n image_files = [file for file in os.listdir(path=input_path) if '.JPG' in file or '.jpeg' in file]\r\n \r\n for file in image_files:\r\n file_input_path = os.path.join(input_path,file)\r\n \r\n img = cv2.imread(file_input_path)\r\n \r\n file_output_path = os.path.join(output_path, classify_face(img))\r\n \r\n try:\r\n os.makedirs(file_output_path)\r\n except FileExistsError:\r\n # directory already exists\r\n pass\r\n shutil.move(file_input_path, file_output_path)", "def _preprocess_data(self, name, directory):\n if name.endswith('data'):\n for path in glob(str(directory / '**/*.jpg'), recursive=True):\n try:\n with Image.open(path) as img:\n if not name.startswith('feature'):\n img = img.rotate(-90, 0, 1)\n img = img.resize(self.input_shape)\n except (ValueError, OSError):\n print(\"Couldn't open {}\".format(path))\n else:\n path = Path(path)\n filename = path.name.split('img-')[1]\n target = (path.parent / filename).with_suffix('.image.png')\n img.save(target, 'PNG')\n os.remove(str(path))\n elif name.endswith('targets'):\n for path in glob(str(directory / '**/*.mat'), recursive=True):\n try:\n mat = spio.loadmat(path)['depthMap']\n img = spmisc.toimage(mat).resize(self.target_shape)\n except ValueError:\n print(\"Couldn't open {}\".format(path))\n else:\n path = Path(path)\n name = path.name[path.name.index('-') + 1:]\n target = (path.parent / name).with_suffix('.depth.png')\n img.save(target, 'PNG')\n os.remove(str(path))", "def create_dataset(data_path, batch_size=32, repeat_size=1,\n num_parallel_workers=1):\n # define dataset\n mnist_ds = ds.MnistDataset(data_path)\n\n # define operation parameters\n resize_height, resize_width = 32, 32\n rescale = 1.0 / 255.0\n shift = 0.0\n rescale_nml = 1 / 0.3081\n shift_nml = -1 * 0.1307 / 0.3081\n\n # define map operations\n resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR) # Resize images to (32, 32)\n rescale_nml_op = CV.Rescale(rescale_nml, shift_nml) # normalize images\n rescale_op = CV.Rescale(rescale, shift) # rescale images\n hwc2chw_op = CV.HWC2CHW() # change shape from (height, width, channel) to (channel, height, width) to fit network.\n type_cast_op = C.TypeCast(mstype.int32) # change data type of label to int32 to fit network\n\n # apply map operations on images\n mnist_ds = mnist_ds.map(operations=type_cast_op, input_columns=\"label\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=resize_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=rescale_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=rescale_nml_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=hwc2chw_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n\n # apply DatasetOps\n buffer_size = 10000\n mnist_ds = mnist_ds.shuffle(buffer_size=buffer_size) # 10000 as in LeNet train script\n mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True)\n mnist_ds = mnist_ds.repeat(repeat_size)\n\n return mnist_ds", "def get_data(folder: str, dimensions: int):\n preprocess = transforms.Compose(\n [\n transforms.Resize(256),\n transforms.CenterCrop(dimensions),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]\n )\n ]\n )\n return datasets.ImageFolder(folder, transform=preprocess)", "def convert_hdf5_to_npz(in_dir_name, out_dir_name, cutoff=None,\n num_sampled_shards=None, max_num_atoms=None):\n\n tr_env_fn = in_dir_name+'/split/pairs_train@40'\n va_env_fn = in_dir_name+'/split/pairs_val@40'\n te_env_fn = in_dir_name+'/split/pairs_test@40'\n\n # Create the internal data sets\n ds_tr = MoleculesDataset(tr_env_fn, cutoff=cutoff, name='training')\n ds_va = MoleculesDataset(va_env_fn, cutoff=cutoff, name='validation')\n ds_te = MoleculesDataset(te_env_fn, cutoff=cutoff, name='test')\n\n print('Training: %i molecules. Validation: %i molecules. Test: %i molecules.'%(len(ds_tr),len(ds_va),len(ds_te)))\n\n # Make a directory\n try:\n os.mkdir(out_dir_name)\n except FileExistsError:\n pass\n\n # Save the data sets as compressed numpy files\n tr_file_name = out_dir_name+'/train.npz'\n va_file_name = out_dir_name+'/valid.npz'\n te_file_name = out_dir_name+'/test.npz'\n ds_tr.write_compressed(tr_file_name)\n ds_va.write_compressed(va_file_name)\n ds_te.write_compressed(te_file_name)\n\n return ds_tr, ds_va, ds_te", "def download(self):\n\n if self._check_exists():\n return\n\n os.makedirs(self.raw_folder, exist_ok=True)\n os.makedirs(self.processed_folder, exist_ok=True)\n\n # download files\n for url in self.resources:\n filename = url.rpartition('/')[2]\n download_and_extract_archive(url, download_root=self.raw_folder, filename=filename, md5=None)\n\n print('Processing...')\n\n training_set = (\n self.read_image_label_file(os.path.join(self.raw_folder, 'mnist_all_rotation_normalized_float_train_valid.amat'))\n )\n test_set = (\n self.read_image_label_file(os.path.join(self.raw_folder, 'mnist_all_rotation_normalized_float_test.amat'))\n )\n\n with open(os.path.join(self.processed_folder, self.training_file), 'wb') as f:\n torch.save(training_set, f)\n with open(os.path.join(self.processed_folder, self.test_file), 'wb') as f:\n torch.save(test_set, f)\n\n print('Done!')", "def create_output_folder(output_folder_name: str, finding_labels: list):\n if not os.path.isdir(output_folder_name):\n os.mkdir(output_folder_name)\n for type in ['/train', '/val', '/test']:\n if not os.path.isdir(output_folder_name + type):\n os.mkdir(output_folder_name + type)\n for disease in finding_labels:\n if not os.path.isdir(output_folder_name + type + '/' + disease):\n os.mkdir(output_folder_name + type + '/' + disease)", "def train_classifier_bootstrap(iterations: int = 3, files_per_iteration: int = 50,\n test_partition: float = 0.2) -> None:\n\n classifier = CnnClassifier(device).to(device)\n classifier.load_from_file()\n classifier = classifier.to(device)\n\n full_res_folder = os.path.dirname(__file__) + '/dataset/GTSRB_Negative/bootstrap_full_res/'\n output_folder = os.path.dirname(__file__) + '/dataset/GTSRB_Negative/images/'\n print(\"Searching for new pictures in\", full_res_folder, \"that are classified as street signs\")\n\n jpg_file_names = []\n for root, dirs, files in os.walk(full_res_folder):\n for name in files:\n if name.endswith(\".jpg\"):\n jpg_file_names.append((root, name))\n\n for i in range(1, iterations):\n random.shuffle(jpg_file_names)\n print(\">>>>>>>>>>>>>>>>>>>>>>>>>>> Iteration {}\".format(i))\n dirty = False\n new_files = []\n for root, jpg_file_name in jpg_file_names[:files_per_iteration]:\n print(\"\\tLoading\", jpg_file_name)\n pil_img = Image.open(root + jpg_file_name)\n fa = FrameAnalyzer(device, classifier=classifier, width=pil_img.size[0], height=pil_img.size[1])\n if pil_img is not None:\n rectangles = fa.get_detected_street_signs(pil_img, limit=50)\n print(\"\\t\\t{} patches classified as street signs\".format(len(rectangles)))\n if len(rectangles):\n dirty = True\n for rect in rectangles:\n x1, y1, x2, y2, label_idx, score = rect\n x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)\n fn = '{}_{}_'.format(x1, y1) + jpg_file_name\n # crop and save\n pil_img.crop((x1, y1, x2, y2)).save(output_folder + fn)\n new_files.append(fn)\n\n if dirty:\n print(\"Adding\", len(new_files), \"negative examples\")\n # shuffle and partition into training and test\n random.shuffle(new_files)\n n_test = int(test_partition * len(new_files))\n new_files_test = new_files[:n_test]\n new_files_training = new_files[n_test:]\n GTSRBDataset.add_images_to_negative_examples_json(new_files_test, overwrite=False, test=True)\n GTSRBDataset.add_images_to_negative_examples_json(new_files_training, overwrite=False, test=False)\n # train the model\n # epochs = 2 # max(1, int(iterations / 3))\n classifier.train_model(max_epochs=20, max_patience=5)\n classifier.store_to_file()", "def before_process(self,data,labels):\n # JM: if integer labels are given, then create different output\n # directories for each new label\n if all(isinstance(lbl,int) for lbl in labels):\n self.batch_dirs = \\\n [os.path.join(self.output_dir,str(lbl)) for lbl in labels]\n # JM: otherwise create the same output directory for each image\n else:\n self.batch_dirs = [self.output_dir] * len(data)\n\n # create output directories if they don't already exist\n uniques = set(self.batch_dirs)\n for out_dir in uniques:\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n self.batch_index = 0", "def make_data(_is_train, data, label):\n save_path = os.path.join(os.getcwd(), \"SRCNN\", 'checkpoint')\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n\n if _is_train:\n save_path = os.path.join(save_path, 'train.h5')\n else:\n save_path = os.path.join(save_path, 'test.h5')\n\n # data 和 label 預設類型是 numpy array ,但若建立時內部陣列維度不相等,內部數據將被轉為 dtype=object\n # 導致 h5py 無法儲存: TypeError: Object dtype dtype('O') has no native HDF5 equivalent\n with h5py.File(save_path, 'w') as hf:\n hf.create_dataset('data', data=data)\n hf.create_dataset('label', data=label)", "def create_train_sets(self, proportion_val):\n l_path = os.listdir(self.image_folder_path)\n lr_path = random.sample(l_path, len(l_path))\n val_files = lr_path[: round(proportion_val * len(lr_path))]\n train_files = lr_path[round(proportion_val * len(lr_path)) :]\n delete_files(self.root_name, \"/VOC2021/ImageSets/Main\")\n write_txt(\"train.txt\", self.txt_path, train_files)\n write_txt(\"val.txt\", self.txt_path, val_files)", "def _create_tensor_dataset(self, img_size):\n with tempfile.TemporaryDirectory() as tmpdirname:\n dir_mimic = '/cluster/work/vogtlab/Projects/mimic-cxr/physionet.org/files/mimic-cxr-jpg/2.0.0/files'\n if os.path.exists(dir_mimic):\n # only run test if original data exists\n\n if os.path.exists('/cluster/home/klugh/'):\n tmpdirname = os.path.join(os.path.expandvars('$TMPDIR'), 'test_create_tensor_dataset')\n if not os.path.exists(tmpdirname):\n os.mkdir(tmpdirname)\n dir_out = os.path.expanduser(os.path.join(tmpdirname, 'dir_out'))\n\n dir_base_resized_compressed = os.path.expanduser(os.path.join(tmpdirname))\n self.assertTrue(os.path.exists(tmpdirname))\n\n dir_base_resize = os.path.join(tmpdirname, f'files_small_{img_size[0]}')\n dataset_creator = CreateTensorDataset(dir_base_resize=dir_base_resize, dir_mimic=dir_mimic,\n dir_out=dir_out,\n img_size=img_size,\n dir_base_resized_compressed=dir_base_resized_compressed,\n max_it=10)\n dataset_creator()\n self.assertTrue(os.path.exists(dir_out))\n self.assertTrue(os.path.exists(dir_base_resized_compressed))\n\n assert os.path.exists(os.path.join(dir_base_resized_compressed,\n f'mimic_resized_{img_size[0]}.zip')), \\\n 'dir_resized_compressed {} does not exist \\n {}'.format(\n os.path.join(dir_base_resized_compressed, f'mimic_resized_{img_size[0]}.zip'),\n os.listdir(dir_base_resized_compressed))", "def split_data(train_split, src_dir, train_dir, test_dir, classes):\n for cls in classes:\n # get all dat files of this class\n data = get_instances_of_class(cls, src_dir)\n \n # how many of the data points are for training?\n train_count = round(len(data) * train_split / 100)\n \n # randomly choose indexes\n train_indexes = set()\n while len(train_indexes) < train_count:\n train_indexes.add(random.randrange(len(data)))\n \n # move all train_indexes to train_dir, others to test_dir\n COPY = lambda src, dst, filename:\\\n shutil.copy2(\n \"{}/{}\".format(src, data[i]),\n \"{}/{}\".format(dst, data[i])\n )\n \n for i in range(len(data)):\n if i in train_indexes:\n COPY(src_dir, train_dir, data[i])\n else:\n COPY(src_dir, test_dir, data[i])", "def main():\n train_src = read_file(SRC_TRAIN)\n train_tgt = read_file(TRGT_TRAIN)\n val_src = read_file(SRC_VAL)\n val_tgt = read_file(TRGT_VAL)\n # val = read_files(VAL_FILES)\n np.savez(\n DATA_NPZ_NAME, train_src=train_src, train_tgt=train_tgt, val_src=val_src, val_tgt=val_tgt)", "def generate_dataset():\n if not os.path.exists(\"../data/COVID-19/COVID-19.npy\"):\n print(\"Processing Training Data.\")\n training_data = get_training_data('../data/COVID-19/train')\n print(\"Processing Test Data.\")\n test_data = get_training_data('../data/COVID-19/test')\n\n x_train, y_train, x_test, y_test = [], [], [], []\n\n for feature, label in training_data:\n x_train.append(feature)\n y_train.append(label)\n\n for feature, label in test_data:\n x_test.append(feature)\n y_test.append(label)\n\n # Normalize the data\n x_train = np.array(x_train) / 255\n x_test = np.array(x_test) / 255\n\n # resize data for deep learning\n x_train = x_train.reshape(-1, 3, img_size, img_size)\n y_train = np.array(y_train)\n x_test = x_test.reshape(-1, 3, img_size, img_size)\n y_test = np.array(y_test)\n\n # With data augmentation to prevent overfitting and handling the imbalance in dataset\n dataset = {\"x_train\": x_train, \"y_train\": y_train, \"x_test\": x_test, \"y_test\": y_test}\n np.save(\"../data/COVID-19/COVID-19.npy\", dataset)\n else:\n dataset = np.load(\"../data/COVID-19/COVID-19.npy\", allow_pickle=True).item()\n x_train, y_train, x_test, y_test = dataset[\"x_train\"], dataset[\"y_train\"], dataset[\"x_test\"], dataset[\"y_test\"]\n\n x_train_tensor = torch.from_numpy(x_train)\n x_train_tensor = x_train_tensor.type(torch.FloatTensor)\n y_train_tensor = torch.from_numpy(y_train)\n y_train_tensor = y_train_tensor.type(torch.LongTensor)\n x_test_tensor = torch.from_numpy(x_test)\n x_test_tensor = x_test_tensor.type(torch.FloatTensor)\n y_test_tensor = torch.from_numpy(y_test)\n y_test_tensor = y_test_tensor.type(torch.LongTensor)\n\n train_dataset = TensorDataset(x_train_tensor, y_train_tensor)\n test_dataset = TensorDataset(x_test_tensor, y_test_tensor)\n\n return train_dataset, test_dataset", "def build_dataset(self):\n print(\"reading data of images currently , please wait......\")\n x_train, y_train, _ = get_images(self.train_directory)\n x_test, y_test, _ = get_images(self.test_directory)\n x_train, y_train = image_subset(self.num_classes, x_train, y_train)\n x_test, y_test = image_subset(self.num_classes, x_test, y_test)\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n self.x_train = x_train / 255\n self.x_test = x_test / 255\n self.y_train = utils.to_categorical(y_train, self.num_classes)\n self.y_test = utils.to_categorical(y_test, self.num_classes)", "def __init__(self, dataset, width=512, height=512, pictures=10, generate_classes=True, generate_objects=True):\n super().__init__(dataset)\n\n cropper = Cropper(width=width, height=height)\n\n dir_name = \"tmp-data-{}x{}-from-{}-pictures\".format(width, height, pictures)\n origins = os.path.join(dir_name, \"origins\")\n classes = os.path.join(dir_name, \"classes\")\n origins_classes_v_join = os.path.join(dir_name, \"origin-classes-v-join\")\n objects = os.path.join(dir_name, \"objects\")\n origins_objects_v_join = os.path.join(dir_name, \"origin-objects-v-join\")\n\n if not os.path.exists(origins):\n os.makedirs(origins)\n\n trains = self.get_iterable_trains()\n vals = self.get_iterable_evals()\n\n selection_set = []\n for _, val in enumerate(trains):\n origin, class_v, object_v = self.get_train_triple(val)\n selection_set.append((origin, class_v, object_v))\n for _, val in enumerate(vals):\n origin, class_v, object_v = self.get_val_triple(val)\n selection_set.append((origin, class_v, object_v))\n\n final_set = random.sample(selection_set, pictures)\n\n if generate_classes:\n if not os.path.exists(classes):\n os.makedirs(classes)\n if not os.path.exists(origins_classes_v_join):\n os.makedirs(origins_classes_v_join)\n\n if generate_objects:\n if not os.path.exists(objects):\n os.makedirs(objects)\n if not os.path.exists(origins_objects_v_join):\n os.makedirs(origins_objects_v_join)\n\n for _, (origin, class_v, object_v) in enumerate(final_set):\n print(\"Processing {}, {}, {}\".format(origin, class_v, object_v))\n cropper.set_imgs(origin, class_v, object_v, add_randomly=5)\n counter = 1\n while not cropper.is_finished:\n origin_i, class_i, object_i = cropper.next_crop()\n # Check that classes are involved\n finded = False\n for l in class_i:\n for pix in l:\n for c in pix:\n if c != 0:\n finded = True\n break\n if finded:\n break\n if finded:\n break\n if not finded:\n continue\n path = \"{}-{}.png\".format(get_origin_name(origin), counter)\n # print(\"Writing: {}\".format(os.path.join(origins, path)))\n cv2.imwrite(os.path.join(origins, path), origin_i)\n if generate_classes:\n cv2.imwrite(os.path.join(classes, path), class_i)\n cv2.imwrite(os.path.join(origins_classes_v_join, path), cv2.hconcat([origin_i, class_i]))\n if generate_objects:\n cv2.imwrite(os.path.join(objects, path), object_i)\n cv2.imwrite(os.path.join(origins_objects_v_join, path), cv2.hconcat([origin_i, object_i]))\n counter += 1\n\n print(\"Generating of {}-pictures-subset done. You find it in: {}\".format(pictures, dir_name))", "def setup(self):\n\n folder_name, file_name, url, md5 = self.resource\n dataset_folder = os.path.join(self.data_root, folder_name)\n if not os.path.exists(dataset_folder):\n sh_utils.download_and_extract_archive(url, dataset_folder, md5, file_name)\n\n test_transform = tv_transforms.Compose(\n [\n tv_transforms.ToTensor(),\n tv_transforms.Lambda(lambda x: x.permute(1, 2, 0)),\n ]\n )\n\n dataset_out = tv_datasets.ImageFolder(\n root=dataset_folder, transform=test_transform\n )\n self.images_only_dataset_out = sh_data_torch.IndexedTorchDataset(\n sh_data_torch.ImagesOnlyTorchDataset(dataset_out)\n )", "def fixture_image_data(tmp_path_factory, request):\n # Make root dir\n root = tmp_path_factory.mktemp(\"data\")\n\n # Set params\n num_images = request.param\n\n # Create image files\n paths = [root / Path(f\"{idx}.png\") for idx in range(num_images)]\n dimensions = [(idx % 10 + 1, (10 - idx) % 10 + 1) for idx in range(num_images)]\n for path, dim in zip(paths, dimensions):\n image = Image.new(mode=\"RGB\", size=dim)\n if not path.parent.exists():\n path.parent.mkdir(parents=True)\n with open(path, \"wb\") as img_file:\n image.save(img_file)\n return root", "def prepare_test_data(args):\n image_dir = args.test_image_dir\n\n files = os.listdir(image_dir)\n files = [f for f in files if f.lower().endswith('.png')]\n\n img_ids = list(range(len(files)))\n img_files = []\n img_heights = []\n img_widths = []\n \n for f in files:\n img_path = os.path.join(image_dir, f)\n img_files.append(img_path)\n img = cv2.imread(img_path)\n img_heights.append(img.shape[0]) \n img_widths.append(img.shape[1]) \n\n print(\"Building the testing dataset...\")\n dataset = DataSet(img_ids, img_files, img_heights, img_widths)\n print(\"Dataset built.\")\n return dataset", "def create_dataset(data_path, batch_size=32, repeat_size=1, num_parallel_workers=1):\n # define dataset\n mnist_ds = ds.MnistDataset(data_path)\n\n # define operation parameters\n resize_height, resize_width = 32, 32\n rescale = 1.0 / 255.0\n shift = 0.0\n rescale_nml = 1 / 0.3081\n shift_nml = -1 * 0.1307 / 0.3081\n\n # define map operations\n resize_op = CV.Resize(\n (resize_height, resize_width), interpolation=Inter.LINEAR\n ) # Resize images to (32, 32)\n rescale_nml_op = CV.Rescale(rescale_nml, shift_nml) # normalize images\n rescale_op = CV.Rescale(rescale, shift) # rescale images\n hwc2chw_op = (\n CV.HWC2CHW()\n ) # change shape from (height, width, channel) to (channel, height, width) to fit network.\n type_cast_op = C.TypeCast(\n mstype.int32\n ) # change data type of label to int32 to fit network\n\n # apply map operations on images\n mnist_ds = mnist_ds.map(\n input_columns=\"label\",\n operations=type_cast_op,\n num_parallel_workers=num_parallel_workers,\n )\n mnist_ds = mnist_ds.map(\n input_columns=\"image\",\n operations=resize_op,\n num_parallel_workers=num_parallel_workers,\n )\n mnist_ds = mnist_ds.map(\n input_columns=\"image\",\n operations=rescale_op,\n num_parallel_workers=num_parallel_workers,\n )\n mnist_ds = mnist_ds.map(\n input_columns=\"image\",\n operations=rescale_nml_op,\n num_parallel_workers=num_parallel_workers,\n )\n mnist_ds = mnist_ds.map(\n input_columns=\"image\",\n operations=hwc2chw_op,\n num_parallel_workers=num_parallel_workers,\n )\n\n # apply DatasetOps\n buffer_size = 10000\n mnist_ds = mnist_ds.shuffle(\n buffer_size=buffer_size\n ) # 10000 as in LeNet train_lenet script\n mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True)\n mnist_ds = mnist_ds.repeat(repeat_size)\n\n return mnist_ds", "def main(src_dir, dst_dir='pleiades', print_cfg_ipol=False):\n for dataset in os.listdir(src_dir):\n dataset_abspath = os.path.join(src_dir, dataset)\n if os.path.isdir(dataset_abspath):\n if 'dataset_1' in os.listdir(dataset_abspath): # the dataset has subdatasets (multidate)\n for subdataset in os.listdir(dataset_abspath):\n if os.path.isdir(os.path.join(dataset_abspath, subdataset)):\n l = list_images_in_dataset(os.path.join(dataset_abspath, subdataset))\n mkdir_p(os.path.join(dst_dir, dataset, subdataset))\n create_links(l, os.path.join(dst_dir, dataset, subdataset), print_cfg_ipol)\n else: # the dataset doesn't have subdatasets (monodate)\n l = list_images_in_dataset(dataset_abspath)\n mkdir_p(os.path.join(dst_dir, dataset))\n create_links(l, os.path.join(dst_dir, dataset), print_cfg_ipol)", "def CreateFolders(self,block):\n \n if self.mode=='first_layer' or self.mode=='greedy':\n name = 'block_'+str(block)\n if not os.path.exists(os.path.join(self.path_save,name)):\n os.makedirs(os.path.join(self.path_save,name,'training'))\n elif self.mode=='all_layers':\n name = 'block_'+str(0)+'_'+str(self.nb_blocks-1)\n if not os.path.exists(os.path.join(self.path_save,name)):\n os.makedirs(os.path.join(self.path_save,name,'training')) \n elif self.mode=='lpp':\n name = 'lpp'\n if not os.path.exists(os.path.join(self.path_save,name)):\n os.makedirs(os.path.join(self.path_save,name,'training')) \n if self.mode!='test':\n folder = os.path.join(self.path_save,'ImagesLastBlock')\n if not os.path.exists(folder):\n subfolders = ['train','val']\n subsubfolders = ['trueimage','blurredimage','trueblur','noise_std','mk_vec','diagSigma_vec','newmh_vec','newSigmah_vec','Gammap_vec','LAMBDAk_vec']\n paths = [os.path.join(folder, sub, subsub) for sub in subfolders for subsub in subsubfolders]\n for path in paths:\n os.makedirs(path)", "def __init__(self, folder_path, image_size=(320,240), batch_size=4, mode='seg', target_classes=[\"Good Crypts\"], filter_classes=[], augment=True):\n print(\"Initialising data generator\")\n # Making the image ids list\n self.folder_path = folder_path\n image_paths = [f for f in os.listdir(folder_path) if f.endswith(\".jpg\")]\n self.image_ids = [f.replace('.jpg', '') for f in image_paths]\n self.orig_image_ids = self.image_ids.copy()\n self.filter_classes = filter_classes\n self.filter_data()\n\n self.image_size = image_size\n self.batch_size = batch_size\n self.mode = mode\n self.target_classes = target_classes\n self.augment = augment\n print(\"Image count in {} path: {}\".format(self.folder_path,len(self.image_ids)))\n self.on_epoch_end()", "def main():\n if len(sys.argv) < 3:\n message = \"\"\"\n Usage: python generate_dataset.py <dataset_name> <number of files> <size of each file in bytes>\n \"\"\"\n print(message)\n sys.exit(0)\n dataset_name = sys.argv[1]\n file_number = int(sys.argv[2])\n file_size = int(sys.argv[3])\n\n if not os.path.exists(dataset_name):\n os.makedirs(dataset_name)\n\n for i in range(file_number):\n tmp_file = open('./' + dataset_name + '/' + dataset_name + '.file' + str(i), 'w+')\n tmp_file.write(os.urandom(file_size))\n tmp_file.close()", "def main(src, dst, size):\r\n\ttrain_dst = os.path.join(dst, 'train')\r\n\ttest_dst = os.path.join(dst, 'test')\r\n\tlabel_paths = [os.path.join(src, 'labels', i) for i in os.listdir(os.path.join(src, 'labels'))]\r\n\timage_folders = [os.path.join(src, i) for i in os.listdir(src) if i != \"labels\"]\r\n\r\n\timage_paths = {}\r\n\tfor folder in image_folders:\r\n\t\timages = os.listdir(folder)\r\n\t\timage_paths[os.path.basename(folder)] = [os.path.join(folder, i) for i in images]\r\n\tif DEBUG:\r\n\t\tprint(\"image folders are : {}\".format(image_paths.keys()))\r\n\r\n\t# for each image assign its xyz coordinate\r\n\targs = []\r\n\r\n\ttrain_labels = [\"B1\", \"B2\", \"B3\", \"B5\", \"B6\"]\r\n\ttest_labels = [\"B4\"]\r\n\r\n\tfor l_p in label_paths:\r\n\t\tfolder = os.path.basename(l_p).split('_')[0]\r\n\t\tcamera = os.path.basename(l_p).split('_')[-1][0:-4]\r\n\r\n\t\timages = image_paths[folder]\r\n\t\tlabels = get_xyz_coord(l_p)\r\n\t\timages = list(filter(lambda x: os.path.basename(x).split(\"_\")[0] == camera, images))\r\n\t\tif DEBUG:\r\n\t\t\tprint(l_p, camera)\r\n\t\tfor i in images:\r\n\t\t\tindex = int(os.path.basename(i).split('_')[-1][0:-4])\r\n\t\t\tif os.path.basename(l_p)[0:2] in train_labels:\r\n\t\t\t\tdestination = os.path.join(train_dst, folder, os.path.basename(i))\r\n\t\t\telif os.path.basename(l_p)[0:2] in test_labels:\r\n\t\t\t\tdestination = os.path.join(test_dst, folder, os.path.basename(i))\r\n\t\t\telse:\r\n\t\t\t\traise ValueError\r\n\t\t\targs.append([i, destination, reorder(labels[index]), size])\r\n\r\n\tp = Pool()\r\n\tresults = list(tqdm.tqdm(p.imap(image_process, args), ascii=True, total=len(args)))\r\n\tp.close()\r\n\tp.join()\r\n\r\n\tannotations_train = edict()\r\n\tannotations_test = edict()\r\n\tfor r in results:\r\n\t\tdestination, uv_coord, depth, xyz, k = r\r\n\t\tfolder = os.path.basename(os.path.dirname(destination))\r\n\t\timage = os.path.basename(destination)\r\n\r\n\t\tif folder[0:2] in train_labels:\r\n\t\t\tannotations = annotations_train\r\n\t\telif folder[0:2] in test_labels:\r\n\t\t\tannotations = annotations_test\r\n\t\telse:\r\n\t\t\traise ValueError\r\n\r\n\t\tif folder not in annotations:\r\n\t\t\tannotations[folder] = edict()\r\n\t\t\tannotations[folder][image] = edict()\r\n\t\telse:\r\n\t\t\tannotations[folder][image] = edict()\r\n\t\tannotations[folder][image].uv_coord = uv_coord\r\n\t\tannotations[folder][image].k = k\r\n\t\tannotations[folder][image].depth = depth\r\n\t\tannotations[folder][image].xyz = xyz\r\n\r\n\twith open(os.path.join(train_dst, \"annotation.pickle\"), \"wb\") as handle:\r\n\t\tpickle.dump(annotations_train, handle)\r\n\r\n\twith open(os.path.join(test_dst, \"annotation.pickle\"), \"wb\") as handle:\r\n\t\tpickle.dump(annotations_test, handle)", "def create_dataset(data_path, batch_size=32, repeat_size=1, num_parallel_workers=1):\n # define dataset\n mnist_ds = ds.MnistDataset(data_path, num_samples=batch_size * 10)\n\n resize_height, resize_width = 32, 32\n rescale = 1.0 / 255.0\n rescale_nml = 1 / 0.3081\n shift_nml = -1 * 0.1307 / 0.3081\n\n # define map operations\n resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR) # Bilinear mode\n rescale_nml_op = CV.Rescale(rescale_nml, shift_nml)\n rescale_op = CV.Rescale(rescale, shift=0.0)\n hwc2chw_op = CV.HWC2CHW()\n type_cast_op = C.TypeCast(mstype.int32)\n\n # apply map operations on images\n mnist_ds = mnist_ds.map(operations=type_cast_op, input_columns=\"label\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=resize_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=rescale_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=rescale_nml_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=hwc2chw_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n\n # apply DatasetOps\n mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True)\n mnist_ds = mnist_ds.repeat(repeat_size)\n\n return mnist_ds", "def init_data(dataset_config: dict):\n # train and dev will be in random order, test may be ordered according to labels\n if dataset_config[\"name\"] == \"CoLA\":\n train, dev, test, num_classes = load_cola(dataset_config)\n elif dataset_config[\"name\"] == \"AGNews\":\n train, dev, test, num_classes = load_ag_news(dataset_config)\n elif dataset_config[\"name\"] == \"DBPedia\":\n train, dev, test, num_classes = load_dbpedia(dataset_config)\n elif dataset_config[\"name\"] == \"YRF\":\n train, dev, test, num_classes = load_yrf(dataset_config)\n else:\n raise NameError(f\"Dataset {dataset_config['name']} not implemented.\")\n # etc.\n\n # shrink size if debugging\n if dataset_config[\"debug\"]:\n # choose a random subset using huggingface select function\n train = train.select(random.sample(range(len(train)), k=200))\n dev = dev.select(random.sample(range(len(dev)), k=40))\n test = test.select(random.sample(range(len(test)), k=200))\n\n # create class imbalance\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"pool_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"pool_balance\"] == \"imbalanced\":\n train = train.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"pool_balance = {dataset_config['pool_balance']} not allowed\")\n\n if dataset_config[\"dev_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"dev_balance\"] == \"imbalanced\":\n dev = dev.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"dev_balance = {dataset_config['dev_balance']} not allowed\")\n\n # get seed labelled pool indices (using the same seed data every time)\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"seed_balance\"] == \"balanced\":\n # this is random (will have some variance vs pool)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"]\n )\n elif dataset_config[\"seed_balance\"] == \"stratified\":\n # this is the same as the underlying train set (which may be unbalanced)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"],\n stratify=train['label']\n )\n elif dataset_config[\"seed_balance\"] == \"imbalanced\":\n # artificially sample an imbalanced seed set from the pool\n unlabelled_pool_idx, labelled_pool_idx = create_imbalanced_seed(\n train,\n num_classes,\n dataset_config[\"seed_size\"],\n dataset_config['imbalance_prop'],\n dataset_config['imbalance_cls']\n )\n else:\n raise NameError(f\"seed_balance = {dataset_config['seed_balance']} not allowed\")\n\n return train, dev, test, num_classes, labelled_pool_idx, unlabelled_pool_idx", "def _create_data_directory(self):\n self.src_data_dir.mkdir(exist_ok=True, parents=True)", "def cli(suffix, partspec, target_person_size, crop=513, only_missing=False, up3d_fp=UP3D_FP): # pylint: disable=too-many-locals, too-many-arguments\n np.random.seed(1)\n LOGGER.info(\"Creating segmentation dataset for %s classes with target \"\n \"person size %f and suffix `%s`.\",\n partspec, target_person_size, suffix)\n assert ' ' not in suffix\n dset_fromroot = path.join(partspec, str(target_person_size), suffix)\n dset_fp = path.join(DSET_ROOT_FP, dset_fromroot)\n if path.exists(dset_fp):\n if not only_missing:\n if not click.confirm(\"Dataset folder exists: `%s`! Continue?\" % (dset_fp)):\n return\n else:\n os.makedirs(dset_fp)\n LOGGER.info(\"Creating list files...\")\n list_fp = path.join(path.dirname(__file__), '..', 'training', 'list')\n if not path.exists(list_fp):\n os.makedirs(list_fp)\n train_list_f = open(path.join(list_fp, 'train_%s_%d_%s.txt' % (\n partspec, target_person_size, suffix)), 'w')\n val_list_f = open(path.join(list_fp, 'val_%s_%d_%s.txt' % (\n partspec, target_person_size, suffix)), 'w')\n test_list_f = open(path.join(list_fp, 'test_%s_%d_%s.txt' % (\n partspec, target_person_size, suffix)), 'w')\n with open(path.join(up3d_fp, 'train.txt'), 'r') as f:\n train_spec = [line.strip() for line in f.readlines()]\n with open(path.join(up3d_fp, 'val.txt'), 'r') as f:\n val_spec = [line.strip() for line in f.readlines()]\n with open(path.join(up3d_fp, 'test.txt'), 'r') as f:\n test_spec = [line.strip() for line in f.readlines()]\n\n LOGGER.info(\"Processing...\")\n add_dataset(\n dset_fp,\n dset_fromroot,\n up3d_fp,\n train_list_f, val_list_f, test_list_f,\n train_spec, val_spec, test_spec,\n target_person_size, partspec,\n crop, 0,\n only_missing=only_missing)\n train_list_f.close()\n val_list_f.close()\n test_list_f.close()\n LOGGER.info(\"Creating trainval file...\")\n trainval_list_f = open(path.join(list_fp, 'trainval_%s_%d_%s.txt' % (\n partspec, target_person_size, suffix)), 'w')\n train_list_f = open(path.join(list_fp, 'train_%s_%d_%s.txt' % (\n partspec, target_person_size, suffix)), 'r')\n val_list_f = open(path.join(list_fp, 'val_%s_%d_%s.txt' % (\n partspec, target_person_size, suffix)), 'r')\n for line in train_list_f:\n trainval_list_f.write(line)\n for line in val_list_f:\n trainval_list_f.write(line)\n trainval_list_f.close()\n train_list_f.close()\n val_list_f.close()\n LOGGER.info(\"Done.\")" ]
[ "0.6529704", "0.6481513", "0.6435279", "0.6418067", "0.63397163", "0.6227401", "0.62035155", "0.61983734", "0.6165103", "0.61225355", "0.612228", "0.60964", "0.60848254", "0.6073827", "0.60563177", "0.6047265", "0.60234", "0.6019958", "0.60174394", "0.6004536", "0.5983722", "0.5966851", "0.59544486", "0.59262556", "0.5918337", "0.591189", "0.58968794", "0.58944523", "0.58807254", "0.58793026", "0.5871399", "0.58556706", "0.5851229", "0.58354205", "0.58233756", "0.58172244", "0.57947797", "0.5782247", "0.5777168", "0.5774015", "0.5772425", "0.5770133", "0.5766006", "0.5765337", "0.5765043", "0.57493174", "0.57476", "0.57440317", "0.57390124", "0.5719708", "0.5712652", "0.5707615", "0.5693829", "0.5691468", "0.5671326", "0.5667025", "0.56623685", "0.56515706", "0.5650346", "0.5646674", "0.5635191", "0.56300783", "0.56294036", "0.5623636", "0.5620676", "0.56172675", "0.5613995", "0.5613662", "0.5607737", "0.5605935", "0.560168", "0.5601285", "0.56000787", "0.55907685", "0.55793387", "0.5576717", "0.55765754", "0.55756813", "0.5573758", "0.5572673", "0.5571896", "0.5570903", "0.55697584", "0.55687547", "0.556801", "0.55678827", "0.55598503", "0.55552244", "0.5552731", "0.5547544", "0.55471724", "0.55348134", "0.5534614", "0.55288994", "0.55208385", "0.55152154", "0.551271", "0.5511608", "0.5510444", "0.55075467" ]
0.7347322
0
Given a wav file, use Praat to return a dictionary containing pitch (in Hz) at each millisecond.
def praat_analyze_pitch(audio_file): praatpath = path.abspath('Praat.app/Contents/MacOS/Praat') # locate Praat executable pl = PraatLoader(praatpath=praatpath) # create instance of PraatLoader object praat_output = pl.run_script('pitch.praat', audio_file) # run pitch script in Praat pitch_data = pl.read_praat_out(praat_output) # turn Praat's output into Python dict return pitch_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_pitch_from_sound_file(filename: str, samplerate: int = DEFAULT_SAMPLE_RATE):\n if os.path.isfile(filename) is False:\n raise Exception('File not found with filename = %s' % filename)\n\n print(\"====> reading pitch from sound file\")\n win_s = 4096 // DOWN_SAMPLE # fft size\n hop_s = 512 // DOWN_SAMPLE # hop size\n\n s = source(filename, samplerate, hop_s)\n samplerate = s.samplerate\n\n tolerance = 0.8\n\n pitch_o = pitch(\"yin\", win_s, hop_s, samplerate)\n pitch_o.set_unit(\"midi\")\n pitch_o.set_tolerance(tolerance)\n\n result = []\n\n # total number of frames read\n total_frames = 0\n while True:\n samples, read = s()\n # the pitch value is not rounded and many zeroes occur\n that_pitch = pitch_o(samples)[0]\n confidence = pitch_o.get_confidence()\n result.append(dict(time=total_frames / float(samplerate), pitch=that_pitch, confidence=confidence))\n total_frames += read\n if read < hop_s:\n break\n\n group_result_with_log_density = compute_density_from_pitch_result(result)\n density_level_list = compute_density_level(group_result_with_log_density, result[len(result) - 1]['time'])\n print(\"====> density level list length %s\" % len(density_level_list))\n proportion_list = get_emphasis_start_times(group_result_with_log_density, result[len(result) - 1]['time'])\n print(\"====> emphasis proportion list length = %d\" % len(proportion_list))\n return dict(pitch_result=result, emphasis_proportion_list=proportion_list, density_level_list=density_level_list)", "def load_wav(wav_file):\n rate, data = wavfile.read(wav_file)\n return rate, data", "def filesample(filename):\n sampling_rate, samples = wavfile.read(filename)\n times = np.arange(len(samples)) / sampling_rate\n return samples, sampling_rate", "def fingerprint_wave(file):\n\n\twav = wave.open(file, 'rb')\n\tif wav.getnchannels() == 1:\n\t\tstereo = 0\n\telif wav.getnchannels() == 2:\n\t\tstereo = 1\n\telse:\n\t\twav.close()\n\t\traise Exception(\"Only 1 or 2 channel WAV files supported\")\n\n\twidth = wav.getsampwidth()\n\tif width != 2:\n\t\twav.close()\n\t\traise Exception(\"Only 16-bit sample widths supported\")\n\n\tsrate = wav.getframerate()\t\n\n\tbuffer = wav.readframes(wav.getnframes())\n\twav.close()\n\n\tms = (len(buffer) / 2)/(srate/1000)\n\tif stereo == 1:\n\t\tms = ms / 2\n\t\n\tfprint = libofa.create_print(buffer, libofa.BYTE_ORDER_LE, len(buffer) / 2,\n\t\t\t\t\t\t\t\tsrate, stereo);\n\n\treturn (fprint, ms)", "def load_wav(file_path):\n sample_rate, data = wavfile.read(file_path)\n return data, sample_rate", "def analyzeWAV(inputFile):\n data, fs, nbits = audiolab.wavread(inputFile)\n samplingRate = fs\n return [data, samplingRate]", "def load_wav_file(file_path: str):\n rate, data = wavfile.read(file_path)\n return rate, data", "def read_wav(wav_file):\n w = wave.open(wav_file)\n n = 60 * 10000\n if w.getnframes() < n * 2:\n raise ValueError('Le fichier est trop court')\n frames = w.readframes(n)\n wav_data1 = struct.unpack('%dh' % n, frames)\n frames = w.readframes(n)\n wav_data2 = struct.unpack('%dh' % n, frames)\n return wav_data1, wav_data2", "def parse(cls, file: Keyvalues) -> Dict[str, 'Sound']:\n sounds = {}\n for snd_prop in file:\n volume = split_float(\n snd_prop, 'volume',\n VOLUME.__getitem__,\n 1.0,\n )\n pitch = split_float(\n snd_prop, 'pitch',\n Pitch.__getitem__,\n 100.0,\n )\n\n if 'soundlevel' in snd_prop:\n level = split_float(\n snd_prop, 'soundlevel',\n Level.__getitem__,\n Level.SNDLVL_NORM,\n )\n elif 'attenuation' in snd_prop:\n atten_min, atten_max = split_float(\n snd_prop, 'attenuation',\n ATTENUATION.__getitem__,\n ATTENUATION['ATTN_IDLE'],\n )\n # Convert to a soundlevel.\n # See source_sdk/public/soundflags.h:ATTN_TO_SNDLVL()\n level = (\n (50.0 + 20.0 / atten_min) if atten_min else 0.0,\n (50.0 + 20.0 / atten_max) if atten_max else 0.0,\n )\n else:\n level = (Level.SNDLVL_NORM, Level.SNDLVL_NORM)\n\n # Either 1 \"wave\", or multiple in \"rndwave\".\n wavs: List[str] = []\n for prop in snd_prop:\n if prop.name == 'wave':\n wavs.append(prop.value)\n elif prop.name == 'rndwave':\n for subprop in prop:\n wavs.append(subprop.value)\n\n channel_str = snd_prop['channel', 'CHAN_AUTO'].upper()\n channel: Union[int, Channel]\n if channel_str.startswith('CHAN_'):\n channel = Channel(channel_str)\n else:\n channel = int(channel_str)\n\n sound_version = snd_prop.int('soundentry_version', 1)\n\n if 'operator_stacks' in snd_prop:\n if sound_version == 1:\n raise ValueError(\n 'Operator stacks used with version '\n f'less than 2 in \"{snd_prop.real_name}\"!'\n )\n\n start_stack, update_stack, stop_stack = (\n Keyvalues(stack_name, [\n prop.copy()\n for prop in\n snd_prop.find_children('operator_stacks', stack_name)\n ])\n for stack_name in\n ['start_stack', 'update_stack', 'stop_stack']\n )\n else:\n start_stack, update_stack, stop_stack = [None, None, None]\n\n sounds[snd_prop.name] = Sound(\n snd_prop.real_name,\n wavs,\n volume,\n channel,\n level,\n pitch,\n start_stack,\n update_stack,\n stop_stack,\n sound_version == 2,\n )\n return sounds", "def load_and_get_stats(filename):\n\n import scipy.io.wavfile as siow\n sampling_rate, amplitude_vector = siow.read(filename)\n\n wav_length = amplitude_vector.shape[0] / sampling_rate\n\n return sampling_rate, amplitude_vector, wav_length", "def read_wav(wav_file):\n w = wave.open(wav_file)\n n = 60 * 10000\n if w.getnframes() < n * 2:\n raise ValueError('Wave file too short')\n frames = w.readframes(n)\n wav_data1 = struct.unpack('%dh' % n, frames)\n frames = w.readframes(n)\n wav_data2 = struct.unpack('%dh' % n, frames)\n return wav_data1, wav_data2", "def wav_to_raw(path, log=False):\n rate, data = wavfile.read(path)\n if log:\n m, s = divmod(float(len(data))/rate, 60)\n h, m = divmod(m, 60)\n logging.info(\"Original recording length: %d h %d m %d s\" % (h, m, s))\n try:\n if data.shape[1] == 2:\n # If stereo (2-channel), take the average of the two channels.\n data = 0.5 * (data[:, 0] + data[:, 1])\n if log:\n logging.info('Stereo audio')\n except IndexError:\n if log:\n logging.info('Mono audio')\n return rate, data", "def inputwav(filename):\n data, sr = sf.read(filename)\n print('Decoding \"'+filename+'\"...')\n print('Sample rate is '+str(sr)+'...')\n try:\n ch=len(data[0,])\n except:\n ch=1\n print('File contains '+str(ch)+' audio channel(s)...')\n #Reshape the data so other functions can interpret the array if mono.\n #basically transposing the data\n if ch==1:\n data=data.reshape(-1,1)\n n=len(data)\n #This prevents log(data) producing nan when data is 0\n data[np.where(data==0)]=0.00001\n #convert to dB\n data_dB=20*np.log10(abs(data))\n return n, data,data_dB,sr, ch", "def read_wave(path):\n with contextlib.closing(wave.open(path, 'rb')) as wf:\n num_channels = wf.getnchannels()\n assert num_channels == 1\n sample_width = wf.getsampwidth()\n assert sample_width == 2\n sample_rate = wf.getframerate()\n assert sample_rate in (8000, 16000, 32000)\n pcm_data = wf.readframes(wf.getnframes())\n return pcm_data, sample_rate", "def read_wav_file(wave_file):\n return wavfile.read(wave_file)", "def read(filename):\n\n fileName, fileExtension = os.path.splitext(filename)\n wav_filename = filename\n rate, data = scipy.io.wavfile.read(str(wav_filename)) # the data is read in its native format\n if data.dtype =='int16':\n data = numpy.cast['float'](data)\n return [rate,data]", "def load_wav(file_name):\n fs, signal = wavfile.read(file_name)\n signal = np.float32(signal) / (2**(16)/2-1)\n return fs, signal", "def read_wave(f):\n # w will be an object of type wave.Wav_read.\n file = file_utils.open_or_fd(f, encoding=None)\n wav = wavio.read(file)\n # see https://github.com/WarrenWeckesser/wavio/blob/master/wavio.py for\n # format of `wav`\n\n # we want data as (num_channels, num_samples).. this is the\n # format that seems most compatible with convolutional code and\n # resampling.\n data = wav.data.swapaxes(0, 1)\n if data.dtype == np.int16:\n data = data.astype(np.float32) * (1.0 / 2**15)\n elif data.dtype == np.int24:\n data = data.astype(np.float32) * (1.0 / 2**23)\n else:\n if data.dtype != np.float32:\n raise RuntimeError(\"Array returned from wavio.read had \"\n \"unexpected dtype \".format(data.dtype))\n return (data, float(wav.rate))", "def get_wav_info(file_name):\n wr = wave.open(file_name, 'r')\n sample_width = wr.getsampwidth()\n frame_rate = wr.getframerate()\n num_frames = wr.getnframes()\n n_channels = wr.getnchannels()\n s = \"sample width: {} bytes\\n\".format(sample_width) + \\\n \"frame rate: {} Hz\\n\".format(frame_rate) + \\\n \"num frames: {}\\n\".format(num_frames) + \\\n \"track length: {} s\\n\".format(num_frames / frame_rate) + \\\n \"num channels: {}\\n\".format(n_channels)\n\n return s", "def wav_to_prosodic(path, sr=16000, offset=10):\n sound = parselmouth.Sound(path)\n pitch = sound.to_pitch() #timestep, pitch_floor, pitch_ceiling\n intensity = sound.to_intensity()\n\n features = []\n\n max_time = sound.get_total_duration()\n\n for time in np.arange(0, max_time, 0.001):\n f0 = pitch.get_value_at_time(time)\n f0_nan = 0\n if np.isnan(f0):\n f0 = 0\n f0_nan = 1\n int_db = intensity.get_value(time)\n if np.isnan(int_db):\n int_db = 0\n\n features.append([f0, f0_nan, int_db])\n\n array_feats = np.array(features).T\n\n print(\"SHAPE OF THE FEATURES:\", array_feats.shape)\n assert(not np.any(np.isnan(array_feats)))\n\n return array_feats, max_time", "def wavread(fname):\n fh = wave.open(fname,'rb')\n (nchannels, sampwidth, framerate, nframes, comptype, compname) = fh.getparams()\n if sampwidth == 2:\n frames = fh.readframes(nframes * nchannels)\n dn = struct.unpack_from('%dh' % nframes*nchannels, frames)\n if nchannels > 1:\n out = np.array([dn[i::nchannels] for i in range(nchannels)])/float(2**15)\n else:\n out = np.array(dn)/float(2**15)\n else:\n print('not a 16 bit wav-file')\n out = [0]\n fh.close()\n return (out,framerate)", "def from_wav(cls, fps):\n fpi = iter(fps)\n fs, data = wavfile.read(next(fpi))\n hlist = [data] + [wavfile.read(fp)[1] for fp in fpi]\n\n h = np.array(hlist)\n if data.dtype in [np.uint8, np.int16, np.int32]:\n lim_orig = (np.iinfo(data.dtype).min, np.iinfo(data.dtype).max)\n lim_new = (-1.0, 1.0)\n h = _rescale(h, lim_orig, lim_new).astype(np.double)\n\n return cls.from_time(fs, h)", "def tone_to_freq(tone):\n return math.pow(2, (tone - 69.0) / 12.0) * 440.0", "def sp_audio_pipeline(wav):\n sig = sb.dataio.dataio.read_audio(wav)\n sig = sig.unsqueeze(0)\n sig = hparams[\"speed_perturb\"](sig)\n sig = sig.squeeze(0)\n return sig", "def load_wav(wav_filepath):\n wv, _ = librosa.load(wav_filepath, sr=44100, mono=False) \n return wv", "def wavread(filename):\n\n\tif (os.path.isfile(filename) == False): # raise error if wrong input file\n\t\traise ValueError(\"Input file is wrong\")\n\n\tfs, x = read(filename)\n\n\tif (len(x.shape) !=1): # raise error if more than one channel\n x = np.mean(x,axis = 1)\n\t\tprint \"Audio file is stereo, converting to mono\"\n\n\t#scale down and convert audio into floating point number in range of -1 to 1\n\tx = np.float32(x)/norm_fact[x.dtype.name]\n\treturn fs, x", "def read_note_from_sound_file(filename: str, samplerate: int = DEFAULT_SAMPLE_RATE):\n print(\"====> reading notes from sound file\")\n win_s = 512 // DOWN_SAMPLE # fft size\n hop_s = 256 // DOWN_SAMPLE # hop size\n # adjust sample rate\n s = source(filename, samplerate, hop_s)\n samplerate = s.samplerate\n notes_o = notes(\"default\", win_s, hop_s, samplerate)\n\n result = []\n total_frames = 0\n while True:\n samples, read = s()\n new_note = notes_o(samples)\n # note too high considered as noise\n if new_note[0] != 0 and new_note[0] <= 120:\n note_klass = Note(time=total_frames / float(samplerate), pitch=new_note[0], volume=new_note[1] - 20,\n duration=new_note[2])\n result.append(note_klass)\n total_frames += read\n if read < hop_s:\n break\n\n return result", "def _read_pha(file):\n with fits.open(file) as hdul:\n data = hdul[1].data\n header_for_livetime = hdul[0].header\n\n return data['channel'], data['counts'], header_for_livetime['LIVETIME']", "def audioRead(path):\n data, samplerate = sf.read(path)\n frames = data.shape[0]\n channels = len(data.shape)\n duration = 1/samplerate*frames\n return data, samplerate, path, duration, frames, channels", "def get_metadata(my_path):\n with wave.open(my_path, \"rb\") as wave_file:\n frame_rate = wave_file.getframerate()\n channels = wave_file.getnchannels()\n my_format = pyaudio.get_format_from_width(wave_file.getsampwidth())\n return str(frame_rate), str(channels), str(my_format)", "def get_line_wavelengths():\n line_wavelengths = OrderedDict() ; line_ratios = OrderedDict()\n \n line_wavelengths['PaB'] = [12821]\n line_ratios['PaB'] = [1.]\n line_wavelengths['Ha'] = [6564.61]\n line_ratios['Ha'] = [1.]\n line_wavelengths['Hb'] = [4862.68]\n line_ratios['Hb'] = [1.]\n line_wavelengths['Hg'] = [4341.68]\n line_ratios['Hg'] = [1.]\n line_wavelengths['Hd'] = [4102.892]\n line_ratios['Hd'] = [1.]\n \n line_wavelengths['OIII-4363'] = [4364.436]\n line_ratios['OIII-4363'] = [1.]\n line_wavelengths['OIII'] = [5008.240, 4960.295]\n line_ratios['OIII'] = [2.98, 1]\n \n # Split doublet, if needed\n line_wavelengths['OIII4959'] = [4960.295]\n line_ratios['OIII4959'] = [1]\n line_wavelengths['OIII5007'] = [5008.240]\n line_ratios['OIII5007'] = [1]\n \n line_wavelengths['OII'] = [3727.092, 3729.875]\n line_ratios['OII'] = [1, 1.] \n \n line_wavelengths['OI-6302'] = [6302.046, 6363.67]\n line_ratios['OI-6302'] = [1, 0.33]\n\n line_wavelengths['NeIII'] = [3869]\n line_ratios['NeIII'] = [1.]\n line_wavelengths['NeV'] = [3346.8]\n line_ratios['NeV'] = [1.]\n line_wavelengths['NeVI'] = [3426.85]\n line_ratios['NeVI'] = [1.]\n \n line_wavelengths['SIII'] = [9068.6, 9530.6][::-1]\n line_ratios['SIII'] = [1, 2.44][::-1]\n \n # Split doublet, if needed\n line_wavelengths['SIII9068'] = [9068.6]\n line_ratios['SIII9068'] = [1]\n line_wavelengths['SIII9531'] = [9530.6]\n line_ratios['SIII9531'] = [1]\n \n line_wavelengths['SII'] = [6718.29, 6732.67]\n line_ratios['SII'] = [1., 1.] \n \n line_wavelengths['HeII'] = [4687.5]\n line_ratios['HeII'] = [1.]\n line_wavelengths['HeI-5877'] = [5877.2]\n line_ratios['HeI-5877'] = [1.]\n line_wavelengths['HeI-3889'] = [3889.5]\n line_ratios['HeI-3889'] = [1.]\n line_wavelengths['HeI-1083'] = [10830.]\n line_ratios['HeI-1083'] = [1.]\n \n line_wavelengths['MgII'] = [2799.117]\n line_ratios['MgII'] = [1.]\n \n line_wavelengths['CIV-1549'] = [1549.480]\n line_ratios['CIV-1549'] = [1.]\n line_wavelengths['CIII-1908'] = [1908.734]\n line_ratios['CIII-1908'] = [1.]\n line_wavelengths['OIII-1663'] = [1665.85]\n line_ratios['OIII-1663'] = [1.]\n line_wavelengths['HeII-1640'] = [1640.4]\n line_ratios['HeII-1640'] = [1.]\n \n line_wavelengths['NII'] = [6549.86, 6585.27]\n line_ratios['NII'] = [1., 3]\n line_wavelengths['NIII-1750'] = [1750.]\n line_ratios['NIII-1750'] = [1.]\n line_wavelengths['NIV-1487'] = [1487.]\n line_ratios['NIV-1487'] = [1.]\n line_wavelengths['NV-1240'] = [1240.81]\n line_ratios['NV-1240'] = [1.]\n\n line_wavelengths['Lya'] = [1215.4]\n line_ratios['Lya'] = [1.]\n \n line_wavelengths['Lya+CIV'] = [1215.4, 1549.49]\n line_ratios['Lya+CIV'] = [1., 0.1]\n \n line_wavelengths['Ha+SII'] = [6564.61, 6718.29, 6732.67]\n line_ratios['Ha+SII'] = [1., 1./10, 1./10]\n line_wavelengths['Ha+SII+SIII+He'] = [6564.61, 6718.29, 6732.67, 9068.6, 9530.6, 10830.]\n line_ratios['Ha+SII+SIII+He'] = [1., 1./10, 1./10, 1./20, 2.44/20, 1./25.]\n\n line_wavelengths['Ha+NII+SII+SIII+He'] = [6564.61, 6549.86, 6585.27, 6718.29, 6732.67, 9068.6, 9530.6, 10830.]\n line_ratios['Ha+NII+SII+SIII+He'] = [1., 1./(4.*4), 3./(4*4), 1./10, 1./10, 1./20, 2.44/20, 1./25.]\n \n line_wavelengths['OIII+Hb'] = [5008.240, 4960.295, 4862.68]\n line_ratios['OIII+Hb'] = [2.98, 1, 3.98/6.]\n \n line_wavelengths['OIII+Hb+Ha'] = [5008.240, 4960.295, 4862.68, 6564.61]\n line_ratios['OIII+Hb+Ha'] = [2.98, 1, 3.98/10., 3.98/10.*2.86]\n\n line_wavelengths['OIII+Hb+Ha+SII'] = [5008.240, 4960.295, 4862.68, 6564.61, 6718.29, 6732.67]\n line_ratios['OIII+Hb+Ha+SII'] = [2.98, 1, 3.98/10., 3.98/10.*2.86*4, 3.98/10.*2.86/10.*4, 3.98/10.*2.86/10.*4]\n\n line_wavelengths['OIII+OII'] = [5008.240, 4960.295, 3729.875]\n line_ratios['OIII+OII'] = [2.98, 1, 3.98/4.]\n \n line_wavelengths['OII+Ne'] = [3729.875, 3869]\n line_ratios['OII+Ne'] = [1, 1./5]\n \n return line_wavelengths, line_ratios", "def read_wav_data(timestamps, wavfile, snapint=[-0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3], fft_size=1024):\n sig, samplerate = librosa.load(wavfile, sr=None, mono=True)\n data = list()\n\n # normalize sound wave\n # sig = sig / np.sqrt(np.mean(sig**2, axis=0));\n # sig = sig / np.max(np.max(np.abs(sig), axis=0));\n sig = sig / np.max(np.abs(sig))\n\n # calc a length array\n tmpts = np.array(timestamps)\n timestamp_interval = tmpts[1:] - tmpts[:-1]\n timestamp_interval = np.append(timestamp_interval, timestamp_interval[-1])\n\n for sz in snapint:\n data_r = np.array([get_wav_data_at(max(0, min(len(sig) - fft_size, coord + timestamp_interval[i] * sz)),\n sig, samplerate, fft_size=fft_size, freq_high=samplerate//4) for i, coord in enumerate(timestamps)])\n data.append(data_r)\n\n raw_data = np.array(data)\n norm_data = np.tile(np.expand_dims(\n np.mean(raw_data, axis=1), 1), (1, raw_data.shape[1], 1, 1))\n std_data = np.tile(np.expand_dims(\n np.std(raw_data, axis=1), 1), (1, raw_data.shape[1], 1, 1))\n return (raw_data - norm_data) / std_data", "def preprocessing(filename):\n reporting(\"Preprocessing file...\", True)\n chdir(path.dirname(filename))\n (rate, sig) = wavefile.load(path.split(filename)[1])\n signal = sig[0]\n\n duration = len(signal) / rate\n reporting(f\"Done. Duration={duration}\")\n return signal", "def load_wav_16k_mono(self, filename):\n filename = utils.get_file_path('webapp/static/processed', filename)\n\n file_contents = tf.io.read_file(filename)\n wav, sample_rate = tf.audio.decode_wav(file_contents,\n desired_channels=1)\n wav = tf.squeeze(wav, axis=-1)\n sample_rate = tf.cast(sample_rate, dtype=tf.int64)\n wav = tfio.audio.resample(wav, rate_in=sample_rate, rate_out=16000)\n return wav", "def read_wav(fname, normalize=True):\n # samps_int16: N x C or N\n # N: number of samples\n # C: number of channels\n sampling_rate, samps_int16 = wavfile.read(fname)\n # N x C => C x N\n samps = samps_int16.astype(np.float)\n # tranpose because I used to put channel axis first\n if samps.ndim != 1:\n samps = np.transpose(samps)\n # normalize like MATLAB and librosa\n if normalize:\n samps = samps / MAX_INT16\n return sampling_rate, samps", "def readNextGenSpectrum(fname=''):\n\n print('Reading : ', fname)\n\n with open(fname, 'r') as rfile:\n dum = rfile.readline()\n sdum = dum.split()\n teff = float(sdum[0])\n logg = float(sdum[1])\n mph = float(sdum[2])\n dum = rfile.readline()\n nwav = float(dum.split()[0])\n\n bigline = []\n dum = rfile.readline()\n while dum.strip() != '':\n sdum = dum.split()\n for i in range(len(sdum)):\n bigline.append(float(sdum[i]))\n dum = rfile.readline()\n\n bigline = np.array(bigline)\n # Convert wavelength from angstrom to micron\n wav = bigline[:nwav] / 1e4\n inu = bigline[nwav:2 * nwav]\n bnu = bigline[nwav * 2:nwav * 3]\n\n ii = wav.argsort()\n wav = wav[ii]\n inu = inu[ii] * 1e-8 * wav * 1e4 / np.pi / (29979245800.0 / wav * 1e4)\n bnu = bnu[ii] * 1e-8 * wav * 1e4 / np.pi / (29979245800.0 / wav * 1e4)\n\n #\n # The unit is now erg/s/cm/Hz/ster\n #\n\n return {'teff': teff, 'logg': logg, 'mph': mph, 'nwav': nwav, 'wav': wav, 'inu': inu, 'bnu': bnu}", "def record_and_get_wav(self, time):\n sample_width, frames = self.record_audio(time)\n return WavFile(samples=frames, sample_width=sample_width, time=time)", "def mono(filename,wout=True):\n n, data, data_dB,sr,ch=inputwav(filename)\n if ch==2:\n print('Converting to mono...')\n L=data[:,0]\n R=data[:,1]\n n=len(data)\n data_m=np.zeros((n,1))\n data_m=L/2.0+R/2.0\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_mono.wav',data_m,sr,'PCM_16')\n print('Done!')\n return data_m\n else:\n print( \"Error: input is already mono stoooooooooopid!\")", "def wavPlayer(data, rate, scale=False, autoplay=False):\r\n #if np.max(abs(data)) > 1 or scale:\r\n # data = data/np.max(abs(data))\r\n #data = (2**13*data).astype(np.int16)\r\n \r\n buffer = BytesIO()\r\n buffer.write(b'RIFF')\r\n buffer.write(b'\\x00\\x00\\x00\\x00')\r\n buffer.write(b'WAVE')\r\n \r\n buffer.write(b'fmt ')\r\n if data.ndim == 1:\r\n noc = 1\r\n else:\r\n noc = data.shape[1]\r\n \r\n bits = data.dtype.itemsize * 8\r\n sbytes = rate*(bits // 8)*noc\r\n ba = noc * (bits // 8)\r\n buffer.write(struct.pack('<ihHIIHH', 16, 1, noc, rate, sbytes, ba, bits))\r\n\r\n # data chunk\r\n buffer.write(b'data')\r\n buffer.write(struct.pack('<i', data.nbytes))\r\n\r\n if data.dtype.byteorder == '>' or (data.dtype.byteorder == '=' and sys.byteorder == 'big'):\r\n data = data.byteswap()\r\n\r\n buffer.write(data.astype(np.int16).tostring())\r\n\r\n # Determine file size and place it in correct position at start of the file.\r\n size = buffer.tell()\r\n buffer.seek(4)\r\n buffer.write(struct.pack('<i', size-8))\r\n \r\n val = buffer.getvalue()\r\n autoplay = \" autoplay=\\\"autoplay\\\"\"*autoplay + \"\"\r\n \r\n src = \"\"\"<audio controls=\"controls\" style=\"width:600px\"{autoplay}>\r\n <source controls src=\"data:audio/wav;base64,{base64}\" type=\"audio/wav\" />\r\n Your browser does not support the audio element.\r\n </audio>\"\"\".format(base64=base64.b64encode(val).decode(\"ascii\"), autoplay=autoplay)\r\n display(HTML(src))", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=1, rate=RATE,\n input=True, output=True,\n frames_per_buffer=CHUNK_SIZE)\n\n num_silent = 0\n snd_started = False\n\n r = array('h')\n\n while 1:\n # little endian, signed short\n snd_data = array('h', stream.read(CHUNK_SIZE))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n snd_started = True\n\n if snd_started and num_silent > 30:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n r = trim(r)\n r = add_silence(r, 0.5)\n return sample_width, r", "def read_audio(filename, sample_rate = 44100):\n loader = essentia.standard.MonoLoader(filename = filename, sampleRate = sample_rate)\n audio = loader()\n return audio", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=1, rate=RATE,\n input=True, output=True,\n frames_per_buffer=CHUNK_SIZE)\n\n num_silent = 0\n snd_started = False\n\n r = array('h')\n\n while 1:\n # little endian, signed short\n snd_data = array('h', stream.read(CHUNK_SIZE))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n snd_started = True\n\n if snd_started and num_silent > SILENCE:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n r = trim(r)\n r = add_silence(r, 0.5)\n return sample_width, r", "def read_wavetxt(path):\n with open(path) as f:\n for line in f.readlines():\n line = line.strip()\n if 'SampleFrequence' in line:\n freq = int(line[16:])\n elif 'DataInput' in line:\n series = np.array(line[10:].split(',')).astype(np.float64)\n return (freq, series)", "def wav_to_sig(wav_file):\n spf = wave.open(wav_file,'r')\n sig = spf.readframes(-1)\n sig = np.fromstring(sig, 'Int16')\n fs = spf.getframerate()\n return (sig, fs)", "def load_wav_file(fname):\n fp = wave.open(fname, \"rb\")\n channels = fp.getnchannels()\n bitrate = fp.getsampwidth() * 8\n samplerate = fp.getframerate()\n buf = fp.readframes(fp.getnframes())\n return SoundData(buf, channels, bitrate, len(buf), samplerate)", "def signal_dictionary(music_filename):\n\tx = []\n\ty = []\n\tassign_points = {}\n\n\tsignal = input_waves.WAVtoSignal(music_filename)\n\tfor i in range(len(signal)):\n\t\tx = signal[i][0]\n\t\ty = signal[i][1]\n\t\tassign_points.update({x:y})\n\tprint 'assign dictionary complete'\n\treturn assign_points", "def graph_spectrogram(wav_file):\n rate, data = get_wav_info(wav_file)\n nfft = 200 # Length of each window segment\n fs = 8000 # Sampling frequencies\n noverlap = 120 # Overlap between windows\n nchannels = data.ndim\n if nchannels == 1:\n pxx, freqs, bins, im = plt.specgram(data, nfft, fs, noverlap = noverlap)\n elif nchannels == 2:\n pxx, freqs, bins, im = plt.specgram(data[:,0], nfft, fs, noverlap = noverlap)\n return pxx", "def samplerate(self):\n return self.sound.samplerate", "def silence_intervals(file_path,file_name):\r\n nsil_start_time=[]\r\n nsil_end_time=[]\r\n sil_start_time=[]\r\n sil_end_time=[]\r\n #read file \r\n audio, sample_rate = librosa.load(os.path.join(file_path,file_name))\r\n \r\n #silence extraction using librosa\r\n nsil_intv=librosa.effects.split(audio, top_db=30).astype('float32') / sample_rate\r\n \r\n #silence extraction using pyAudioanalysis\r\n # [Fs, x] = aIO.readAudioFile(os.path.join(file_path,file_name))\r\n # nsil_intv = np.array(aS.silenceRemoval(x, Fs, 0.020, 0.020, smoothWindow = 0.7, Weight = 0.3, plot = False))\r\n # print \"non-sil segments=\"+str(nsil_intv)\r\n\r\n #silence detection using webrtcvad (voice activity detection)\r\n #nsil_intv=np.array(vad_webrtcvad(file_path,file_name))\r\n\r\n\r\n dur=librosa.get_duration(y=audio, sr=sample_rate)\r\n print nsil_intv\r\n print dur\r\n print sample_rate\r\n curr_sil_start=0.0\r\n curr_sil_end=0.0\r\n for i in range(nsil_intv.shape[0]):\r\n nsil_start_time.append(nsil_intv[i][0])\r\n #sil_start_time=list(np.array(sil_start_time)/sample_rate)\r\n\r\n nsil_end_time.append(nsil_intv[i][1])\r\n #sil_end_time=list(np.array(sil_end_time)/sample_rate)\r\n\r\n for i in range(len(nsil_start_time)):\r\n curr_sil_end=nsil_start_time[i]\r\n sil_start_time.append(str(curr_sil_start))\r\n sil_end_time.append(str(curr_sil_end))\r\n curr_sil_start=nsil_end_time[i]\r\n\r\n print sil_start_time\r\n print sil_end_time\r\n return sil_start_time,sil_end_time", "def read_audio(self, path_to_wav):\n y, sr = librosa.load(path_to_wav, sr=None)\n return (y, sr)", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, input_device_index=0, channels=1, rate=RATE, input=True, output=True, frames_per_buffer=CHUNK_SIZE)\n num_silent = 0\n snd_started = False\n\n r = array('h')\n while 1:\n snd_data = array('h', stream.read(CHUNK_SIZE, exception_on_overflow = False))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n print(\"Sound started.\")\n snd_started = True\n\n if snd_started and num_silent> 10:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n #r = trim(r)\n #r = add_silence(r, 0.5)\n return sample_width, r", "def _synthesize_tone(self, duration_in_msec):\n sample_count = int(float(self.sample_rate) * duration_in_msec * 0.001)\n # There are two bytes per 16-bit sample.\n tmp_buffer = bytearray(sample_count + sample_count)\n fscale = 2.0 * math.pi * self.tone_frequency * self.sample_period;\n # Loop and create the audio samples.\n index = 0\n # Create the rising envelope part of the tone.\n for i, gain in enumerate(self.pulse_shaping_list):\n angle = float(i) * fscale\n value = gain * math.sin(angle)\n byte0, byte1 = MorseCodeSender._float_to_16_bit_sample(value)\n # Write the bytes in little-endian order.\n tmp_buffer[index] = byte0\n tmp_buffer[index + 1] = byte1\n index += 2\n # Create the level part of the tone. Start at the next\n # sample index so that the phase is a continuous function.\n rising_falling_count = len(self.pulse_shaping_list)\n middle_sample_count = sample_count - (2 * rising_falling_count)\n for i in range(0, middle_sample_count):\n angle = float(i + rising_falling_count) * fscale\n value = math.sin(angle)\n byte0, byte1 = MorseCodeSender._float_to_16_bit_sample(value)\n # Write the bytes in little-endian order.\n tmp_buffer[index] = byte0\n tmp_buffer[index + 1] = byte1\n index += 2\n # Create the decaying part of the tone. Start at the next\n # sample index so that the phase is a continuous function.\n temp_count = rising_falling_count + middle_sample_count;\n for i, rev_gain in enumerate(self.pulse_shaping_list):\n angle = float(i + temp_count) * fscale\n value = (1.0 - rev_gain) * math.sin(angle)\n byte0, byte1 = MorseCodeSender._float_to_16_bit_sample(value)\n # Write the bytes in little-endian order.\n tmp_buffer[index] = byte0\n tmp_buffer[index + 1] = byte1\n index += 2\n return tmp_buffer", "def process_sound_file(file_path):\n\n return to_mfcc(get_wav(file_path))", "def get_file_bpm(path, params = {}):\n try:\n win_s = params['win_s']\n samplerate = params['samplerate']\n hop_s = params['hop_s']\n except:\n \"\"\"\n # super fast\n samplerate, win_s, hop_s = 4000, 128, 64 \n # fast\n samplerate, win_s, hop_s = 8000, 512, 128\n \"\"\"\n # default:\n samplerate, win_s, hop_s = 44100, 1024, 512\n\n s = source(path, samplerate, hop_s)\n samplerate = s.samplerate\n o = tempo(\"specdiff\", win_s, hop_s, samplerate)\n # List of beats, in samples\n beats = []\n # Total number of frames read\n total_frames = 0\n\n while True:\n samples, read = s()\n is_beat = o(samples)\n if is_beat:\n this_beat = o.get_last_s()\n beats.append(this_beat)\n #if o.get_confidence() > .2 and len(beats) > 2.:\n # break\n total_frames += read\n if read < hop_s:\n break\n\n # Convert to periods and to bpm \n bpms = 60./diff(beats)\n b = median(bpms)\n return b", "def load(filename):\n root,ext = _os_path.splitext(filename)\n loader = LOADER[ext]\n frequency,raw_signal = loader(filename)\n iinfo = _numpy.iinfo(raw_signal.dtype)\n raw_signal_midpoint = (iinfo.max + iinfo.min)/2.\n raw_signal_range = iinfo.max - raw_signal_midpoint\n unit_output_signal = (raw_signal - raw_signal_midpoint)/raw_signal_range\n return (frequency, unit_output_signal)", "def micsample(listentime):\n frames, sampling_rate = record_audio(listentime)\n samples = np.hstack([np.frombuffer(i, np.int16) for i in frames])\n times = np.arange(samples.size) / sampling_rate\n return samples, times", "def read_speeches(filename):\n\n # Open a speech file\n speech_file = open(filename)\n\n # Create a new dictionary\n speech_dict = {}\n\n # Iterate over lines\n for line in speech_file:\n # Replace whitespace, including /n, at the end of a line with a single space\n line = line.rstrip() + ' '\n\n # Given that a title begins with #\n if line.startswith('#'):\n # Remove '# ' at the beginning and ': ' at the end, to be used as a title\n title = line[2:-2]\n # Assign the tile as a key in the dictionary\n speech_dict[title] = ''\n # A speech line does not begins with #\n else:\n # Not begins with [ either\n if line.startswith('[') is False:\n # Append the speech line to the already existing string of the corresponding title\n # The tile variable is kept from the previous loop(s)\n speech_dict[title] += line\n\n # Close the file\n speech_file.close()\n\n return speech_dict", "def read(f, normalized=False):\r\n a = pydub.AudioSegment.from_mp3(f)\r\n y = np.array(a.get_array_of_samples())\r\n if a.channels == 2:\r\n y = y.reshape((-1, 2))\r\n if normalized:\r\n return a.frame_rate, np.float32(y) / 2**15\r\n else:\r\n return a.frame_rate, y", "def sampling_rate(self):\n with audioread.audio_open(self.path) as f:\n return f.samplerate", "def from_frequency(frequency:float, detune=0) -> 'Pitch':\n return Pitch(1200*np.log2(frequency/440) + detune)", "def record_audio_to_file_and_get_wav(self, time, file_name):\n sample_width, frames = self.record_audio(time)\n wf = wave.open(file_name, 'wb')\n wf.setnchannels(self.channels)\n wf.setsampwidth(sample_width)\n wf.setframerate(self.rate)\n wf.writeframes(frames)\n wf.close()\n return WavFile(samples=frames, sample_width=sample_width, time=time, word=file_name)", "def save_timit_pitch():\n timit_names = []\n pitch_intensity_tables = []\n\n wav_txt_file_names = glob.glob(os.path.join(timit_pitch_data_path, '*.wav.txt'))\n for wav_txt_file in wav_txt_file_names:\n pitch_intensity = pd.read_csv(wav_txt_file, delimiter='\\t', dtype=np.float64, na_values=['?'])\n pitch_intensity = pitch_intensity.dropna()\n pitch_intensity.loc[pitch_intensity.pitch == 0, 'pitch'] = np.NaN\n pitch_intensity.loc[pitch_intensity.intensity == 0, 'intensity'] = np.NaN\n pitch_intensity['log_hz'] = np.log(pitch_intensity['pitch'])\n pitch_intensity['erb_rate'] = convert_hz(pitch_intensity['pitch'], \"erb\")\n pitch = pitch_intensity['log_hz']\n pitch_intensity['rel_pitch_global'] = (pitch - np.mean(pitch))/np.std(pitch)\n pitch = pitch_intensity['erb_rate']\n pitch_intensity['rel_pitch_global_erb'] = (pitch - np.mean(pitch))/np.std(pitch)\n\n timit_name = wav_txt_file.split(os.sep)[-1][:-8]\n\n timit_names.append(timit_name)\n pitch_intensity_tables.append(pitch_intensity)\n\n timit_pitch = pd.concat(pitch_intensity_tables, keys=timit_names)\n #print(np.mean(timit_pitch['log_hz'])) # -> 4.9406, (no log: 147.0387)\n #print(np.std(timit_pitch['log_hz'])) # -> 0.3112, (no log: 48.59846)\n timit_pitch['abs_pitch'] = (timit_pitch['log_hz'] - np.mean(timit_pitch['log_hz']))/np.std(timit_pitch['log_hz'])\n timit_pitch['abs_pitch_erb'] = (timit_pitch['erb_rate'] - np.mean(timit_pitch['erb_rate']))/np.std(timit_pitch['erb_rate'])\n timit_pitch['abs_pitch_change'] = timit_pitch['abs_pitch'].diff()\n timit_pitch['abs_pitch_erb_change'] = timit_pitch['abs_pitch_erb'].diff()\n #print(np.mean(timit_pitch.intensity)) # -> 63.000\n #print(np.std(timit_pitch.intensity)) # -> 15.537\n timit_pitch['zscore_intensity'] = (timit_pitch.intensity - np.mean(timit_pitch.intensity))/np.std(timit_pitch.intensity)\n\n filename = os.path.join(processed_timit_data_path, 'timit_pitch.h5')\n timit_pitch.to_hdf(filename, 'timit_pitch')\n return timit_pitch", "def get_sig(filename):\n\n (rate, data) = readAudioFile(filename)\n\n return rate, data", "def format_pitch_data(pd):\n\n\tfor t in pd.keys():\n\t\tpd[t] = pd[t]['Pitch'] \t # make each value just the pitch, instead of a sub-dict\n\t\tif pd[t] == 0:\n\t\t\tdel pd[t]\t\t # if pitch is 0, remove from dictionary\n\n\t# now, pd is dict where each key is time (x value) and each value is pitch (y value)\n\t# to format for graph input, make list of dicts containing x-y pairs\n\tdatapoints_list = []\n\tfor t in pd.keys():\n\t\tdatapoint = {}\n\t\tdatapoint[\"x\"] = t\n\t\tdatapoint[\"y\"] = pd[t]\n\t\tdatapoints_list.append(datapoint)\n\n\t# sort the list by the value of \"x\"\n\tdatapoints_sorted = sorted(datapoints_list, key=itemgetter(\"x\"))\n\n\t# with this sorted list, do some data smoothing\n\t# pull out every nth item\n\ti = 0\n\tdatapoints_keep = []\n\twhile i < len(datapoints_sorted):\n\t\tdatapoints_keep.append(datapoints_sorted[i])\n\t\ti += 50\n\t# make sure last item is included so length of curve isn't lost\n\tdatapoints_keep.append(datapoints_sorted[-1])\n\n\t# print \"num of datapoints:\", len(datapoints_keep)\n\t# print datapoints_keep[:100]\n\n\treturn json.dumps(datapoints_keep, sort_keys=True)", "def mic_audio(dur):\n\n audio,b = microphone.record_audio(dur)\n audio = np.hstack([np.frombuffer(i,np.int16) for i in audio])\n return audio", "def single_analyze_wav(self, filePath):\n\n tChopped, vChopped, fVals,\\\n powerFFT, peakFreqs, peakAmps = Utils.AnalyzeFFT(filePath, tChop=self.settings['processing']['tChop'],\n detail=self.settings['processing']['detail'])\n\n self.analyzeDone.emit(tChopped, vChopped, fVals, powerFFT, peakFreqs, peakAmps, filePath)\n self.update_table(peakFreqs, peakAmps)", "def process_audio_file(self, file_name):\n sig, sr = librosa.load(file_name, mono=True)\n return self._extract_function(sig, sr)", "def decode_wav(raw_data):\n return _kaldi_module.decode_wav(raw_data)", "def slow(filename,p=10,wout=True):\n n, data, data_dB,sr,ch=inputwav(filename)\n if p>0:\n print('Slowing...')\n if p<0:\n print('Warning: You are speeding up the audio! Use positive value'\n +' for p to slow.')\n f=resample(data,int(len(data)*(1+p/100.0)))\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_slow.wav',f,sr,'PCM_16')\n print('Done!')\n return f", "def wav_to_fourier(wav_file,\n rate_limit=6000.0,\n step=1.0):\n rate, aud_data = read(wav_file)\n # Should be mono\n if len(aud_data) != len(aud_data.ravel()):\n aud_data = np.mean(aud_data, axis=1)\n\n # Zero padding\n len_data = aud_data.shape[0]\n channel_1 = np.zeros(2 ** (int(np.ceil(np.log2(len_data)))))\n channel_1[0:len_data] = aud_data\n\n # Fourier analysis\n fourier = np.abs(np.fft.fft(channel_1))\n freq = np.linspace(0, rate, fourier.shape[0])\n\n freq, fourier = limit_by_freq(freq,\n fourier,\n upper_limit=rate_limit)\n freq, fourier = group_by_freq(freq,\n fourier,\n step=step)\n # Max frequency should be 100.0\n a = np.max(np.abs(fourier)) / 100.0\n fourier = fourier / a\n\n return freq, fourier", "def read_audio(file_path, resample_rate=None, to_mono=False):\n return librosa.load(file_path, sr=resample_rate, mono=to_mono)", "def load_wav_to_torch(full_path):\n sampling_rate, data = read(full_path)\n return torch.from_numpy(data).float(), sampling_rate", "def load_wav_dic(wav_dic):\n noisy_path, clean_path = wav_dic[\"noisy\"], wav_dic[\"clean\"]\n noisy, fs = sf.read(noisy_path, dtype=\"float32\")\n clean, fs = sf.read(clean_path, dtype=\"float32\")\n return noisy, clean, fs", "def simp(filename, seconds_per_average=0.001):\n wavefile = wave.open(filename, 'rb')\n print \"# gnuplot data for %s, seconds_per_average=%s\" % \\\n (filename, seconds_per_average)\n print \"# %d channels, samplewidth: %d, framerate: %s, frames: %d\\n# Compression type: %s (%s)\" % wavefile.getparams()\n\n framerate = wavefile.getframerate() # frames / second\n frames_to_read = int(framerate * seconds_per_average)\n print \"# frames_to_read=%s\" % frames_to_read\n\n time_and_max = []\n values = []\n count = 0\n while 1:\n fragment = wavefile.readframes(frames_to_read)\n if not fragment:\n break\n\n # other possibilities:\n # m = audioop.avg(fragment, 2)\n # print count, \"%s %s\" % audioop.minmax(fragment, 2)\n\n m = audioop.rms(fragment, wavefile._framesize)\n time_and_max.append((count, m))\n values.append(m)\n count += frames_to_read\n # if count>1000000:\n # break\n\n # find the min and max\n min_value, max_value = min(values), max(values)\n points = [] # (secs,height)\n for count, value in time_and_max:\n points.append((count/framerate,\n (value - min_value) / (max_value - min_value)))\n return points", "def decode_audio(fp, fs=None, mono=False, normalize=False, fastwav=False, measured = False):\n if measured:\n fp = fp.decode('latin').replace(\"clean\", \"measured\")\n\n if fastwav:\n # Read with scipy wavread (fast).\n _fs, _wav = wavread(fp)\n if fs is not None and fs != _fs:\n raise NotImplementedError('Fastwav cannot resample audio.')\n if _wav.dtype == np.int16:\n _wav = _wav.astype(np.float32)\n _wav /= 32768.\n elif _wav.dtype == np.float32:\n pass\n else:\n raise NotImplementedError('Fastwav cannot process atypical WAV files.')\n else:\n # TODO: librosa currently optional due to issue with cluster installation\n import librosa\n # Decode with librosa load (slow but supports file formats like mp3).\n _wav, _fs = librosa.core.load(fp, sr=fs, mono=False)\n if _wav.ndim == 2:\n _wav = np.swapaxes(_wav, 0, 1)\n\n assert _wav.dtype == np.float32\n\n # At this point, _wav is np.float32 either [nsamps,] or [nsamps, nch].\n # We want [nsamps, 1, nch] to mimic 2D shape of spectral feats.\n if _wav.ndim == 1:\n nsamps = _wav.shape[0]\n nch = 1\n else:\n nsamps, nch = _wav.shape\n _wav = np.reshape(_wav, [nsamps, 1, nch])\n \n # Average channels if we want monaural audio.\n if mono:\n _wav = np.mean(_wav, 2, keepdims=True)\n\n if normalize:\n _wav /= np.max(np.abs(_wav))\n\n return _wav", "def load_audio(file_path):\n # load the audio file in its original sampling rate\n audio_data, sr = librosa.load(file_path, sr=sampling_rate)\n\n # get the common file name\n file_name = file_path.split(\"/\")[-1]\n file_name = file_name.split(\".wav\")[0]\n\n # calculate number of samples in the time duration needed\n num_samples = int(sr*time_duration)\n\n # get the cut-off audio signals and save them\n for i in np.arange(num_audio_files):\n audio_cut_data = cut_sample(audio_data, num_samples)\n file_path=dir_to_save + file_name + \"_\" + str(i+1) + \".wav\"\n save_sample(audio_cut_data, file_path, sr)\n print(f\"generating signal {str(i)}, its length {len(audio_cut_data)} by cutting the original signal\")", "def load_audio(self):\n df = pd.read_csv(\"{dir}/iteration_{iter}.csv\".format(dir=self.directory, iter=self.iteration),\n usecols=[1, 2, 3])\n\n doa_from_file = df.iloc[0][1]\n wav_name = df.iloc[0][0]\n filename = \"{dir}/{wav_name}\".format(dir=self.directory, wav_name=wav_name)\n\n y, sr = librosa.load(filename, mono=False)\n\n y_8k = librosa.resample(y, sr, 8000)\n result_x = librosa.util.fix_length(y_8k, 8000)\n\n return result_x, doa_from_file", "def test_wav(self, dtype, sample_rate, num_channels):\n duration = 1\n path = self.get_temp_path(\"data.wav\")\n data = get_wav_data(dtype, num_channels, normalize=False, num_frames=duration * sample_rate)\n save_wav(path, data, sample_rate)\n info = self._info(path)\n assert info.sample_rate == sample_rate\n assert info.num_frames == sample_rate * duration\n assert info.num_channels == num_channels\n assert info.bits_per_sample == sox_utils.get_bit_depth(dtype)\n assert info.encoding == get_encoding(\"wav\", dtype)", "def get_pitch(self, start):\n spectrum, amplitude = self.frequency_spectrum(start)\n peaks = self.get_peaks(spectrum, amplitude)\n\n if self.print:\n self.plot_clip(spectrum, amplitude, peaks)\n \n return self.get_note_probabilities(peaks)", "def pitch_gen(freq: float, duration: float, signal: np.array, sample_freq: int, alpha=0.99, ref_length=50):\n total_sample_number = int(sample_freq * duration)\n desire_signal_length = int(sample_freq / freq)\n # pad or cur signal\n if len(signal) >= desire_signal_length:\n input_signal = signal[: desire_signal_length]\n else: # pad\n input_signal = np.pad(signal, (0, desire_signal_length - len(signal)), 'constant')\n\n result = KS(input_signal, N=total_sample_number, alpha=alpha, ref_length=ref_length)\n\n return result", "def loadTTLPulse(file, n_channels = 2, fs = 20000, track = 0, mscope = 1):\n f = open(file, 'rb')\n startoffile = f.seek(0, 0)\n endoffile = f.seek(0, 2)\n bytes_size = 2 \n n_samples = int((endoffile-startoffile)/n_channels/bytes_size)\n f.close()\n with open(file, 'rb') as f:\n data = np.fromfile(f, np.uint16).reshape((n_samples, n_channels))\n \n ch_track = data[:,track].astype(np.int32)\n peaks,_ = scipy.signal.find_peaks(np.diff(ch_track), height=30000)\n timestep = np.arange(0, len(data))/fs\n peaks+=1\n ttl_track = pd.Series(index = timestep[peaks], data = data[peaks,track]) \n\n ch_mscope = data[:,mscope].astype(np.int32)\n peaks,_ = scipy.signal.find_peaks(np.abs(np.diff(ch_mscope)), height=30000)\n peaks+=1\n ttl_mscope = pd.Series(index = timestep[peaks], data = data[peaks,mscope])\n\n return ttl_track, ttl_mscope", "def extract_beat(audio, sample_rate = 44100):\n beat_tracker = BeatTrackerDegara()\n beats_time = beat_tracker(audio)\n beats = np.array(map(lambda time : round(time * sample_rate), beats_time))\n beats = np.append(0, beats)\n beats_time = np.append(0, beats_time)\n\n return beats, beats_time", "def write_wav(filename, data, rate = 44100):\r\n \r\n # Compress the data (the input format is likely to be float64)\r\n # Make sure that the format is readable by Librosa\r\n maxv = np.iinfo(np.int16).max\r\n lb_write_wav(filename, (data * maxv).astype(np.int16), rate) \r\n \r\n return(None)", "def _save_wav(buff, data, rate) -> None:\n # Code inspired from `IPython.display.Audio`\n data = np.array(data, dtype=float)\n\n bit_depth = 16\n max_sample_value = int(2**(bit_depth - 1)) - 1\n\n num_channels = data.shape[1] if len(data.shape) > 1 else 1\n scaled = np.int16(data / np.max(np.abs(data)) * max_sample_value)\n # The WAVE spec expects little-endian integers of \"sampwidth\" bytes each.\n # Numpy's `astype` accepts array-protocol type strings, so we specify:\n # - '<' to indicate little endian\n # - 'i' to specify signed integer\n # - the number of bytes used to represent each integer\n # See: https://numpy.org/doc/stable/reference/arrays.dtypes.html\n encoded_wav = scaled.astype(f'<i{bit_depth // 8}', copy=False).tobytes()\n\n with wave.open(buff, mode='wb') as waveobj:\n waveobj.setnchannels(num_channels)\n waveobj.setframerate(rate)\n waveobj.setsampwidth(bit_depth // 8)\n waveobj.setcomptype('NONE', 'NONE')\n waveobj.writeframes(encoded_wav)", "def decodeSpeech(hmmd, lmdir, dictp, wavfile):\n\n try:\n import sphinxbase\n import pocketsphinx as ps\n\n except:\n import pocketsphinx as ps\n print \"\"\"Pocket sphinx and sphixbase is not installed\n in your system. Please install it with package manager.\n \"\"\"\n speechRec = ps.Decoder(hmm=hmmd, lm=lmdir, dict=dictp)\n wavFile = file(wavfile, 'rb')\n speechRec.decode_raw(wavFile)\n result = speechRec.get_hyp()\n print result[0]\n return result[0]", "def checkfrequency(inputgiven):\n data_size = 40000\n wav_file = wave.open(inputgiven, 'r')\n data = wav_file.readframes(data_size)\n wav_file.close()\n data = struct.unpack('{n}h'.format(n=data_size), data)\n print max(data)", "def play(sampler, name=\"/Users/Jxie0755/Documents/DXcodings/Learning_Python/CS_61A/week03/mario.wav\", seconds=2):\n out = open(name, \"wb\")\n out.setnchannels(1)\n out.setsampwidth(2)\n out.setframerate(frame_rate)\n t = 0\n while t < seconds * frame_rate:\n sample = sampler(t)\n out.writeframes(encode(sample))\n t = t + 1\n out.close()", "def extract_duration(path, out_file):\n\n # sanity_check: check if the paths are correct\n # sanity_check: check if the out_file exists; if not then create one\n\n metadata_filepath_duration = open(out_file, 'w')\n\n for subdir, dirs, files in os.walk(path):\n for file in files:\n file_path = os.path.join(subdir, file)\n wavfile, sampling_rate = librosa.load(file_path)\n wavfile_duration = librosa.get_duration(y=wavfile, sr=sampling_rate)\n metadata_filepath_duration.write(file_path + ' | ' + str(wavfile_duration) + '\\n')\n\n metadata_filepath_duration.close()\n\n # sorting the wavfiles alphabetically to maintain order\n subprocess.call(['sort', out_file, '-o', out_file])", "def read_audio(f, downmix):\n if f.endswith('.mp3'):\n f = _mp3_hook(f)\n sr, audio = scipy.io.wavfile.read(f)\n if not audio.dtype is np.float32:\n audio = _normalize_pcm(audio)\n if downmix and len(audio.shape) == 2:\n audio = down_mix(audio)\n return sr, audio", "def tonify(self, tone_generator=None, verbose=False):\n if tone_generator is None:\n tone_generator = ToneGenerator('tonifyoutput.wav')\n tone_generator.file.setnchannels(len(self.sheets))\n # Find the max length (in seconds) of the data sheets\n max_length = 0.0\n for sheet in self.sheets:\n if len(sheet) > max_length:\n max_length = len(sheet)\n nframes = int(max_length * tone_generator.sample_rate)\n tone_generator.file.setnframes(nframes)\n\n tone_strs = []\n for d in self.sheets:\n if verbose:\n print \"File:\", d.data.name\n print \"Frequencies:\", self.freqs[self.sheets.index(d)]\n values = []\n tone_generator.setfreqs(self.freqs[self.sheets.index(d)])\n for i in range(0, len(d.times)):\n duration = d.durations[i]\n calls = d.calls[i]\n if verbose:\n print \"\\ttone: (%d, %d, %d) for %f seconds\" % (calls[0], calls[1],\n calls[2], duration)\n tone = tone_generator.get_tone((calls[0], calls[1], calls[2]), duration)\n values.append(str(tone))\n try:\n delta = float((d.times[i + 1] - d.times[i]).seconds)\n if float(delta) - duration < 0.0:\n silence_duration = 0.0\n else:\n silence_duration = float(delta) - duration\n except IndexError:\n break\n if verbose:\n print \"\\tsilence for\", silence_duration,\"seconds\"\n silence = tone_generator.get_silence(silence_duration)\n values.append(str(silence))\n if len(d) < max_length:\n end_silence = tone_generator.get_silence(max_length - len(d))\n values.append(str(end_silence))\n value_str = ''.join(values)\n tone_strs.append(value_str)\n \n if verbose:\n print \"Writing to file... (may take several minutes)\"\n combined = interleave_binarystr(tone_strs)\n tone_generator.file.writeframes(combined)\n if verbose:\n print \"Finished writing.\"\n tone_generator.close()", "def read(self, path):\n pbase = os.path.splitext(path)[0]\n gsid = pbase.split('/')[-2]\n gender, sid = gsid[0], gsid[1:]\n assert sid in self._spkr_table\n phoneseq = phnread(pbase+'.PHN')\n wrdseq = phnread(pbase+'.WRD')\n transcrpt = txtread(pbase+'.TXT')\n sample = TIMITSpeech(\n *audioread(path), speaker=sid, gender=gender,\n transcript=transcrpt, phonemeseq=phoneseq,\n wordseq=wrdseq\n )\n #sample.phonemeseq = [\n # (t, PHONETABLE[p]) for t, p in sample.phonemeseq]\n return sample", "def raw_to_wav(data, path, rate=44100):\n wavfile.write(path, rate, data)", "def _process_utterance(pml_dir, wav_dir, index, wav_path, pml_path, hparams):\n try:\n # Load the audio as numpy array\n wav = audio.load_wav(wav_path)\n except FileNotFoundError: # catch missing wav exception\n print('file {} present in csv metadata is not present in wav folder. skipping!'.format(\n wav_path))\n return None\n\n # rescale wav\n if hparams.rescale:\n wav = wav / np.abs(wav).max() * hparams.rescaling_max\n\n # Assert all audio is in [-1, 1]\n if (wav > 1.).any() or (wav < -1.).any():\n raise RuntimeError('wav has invalid value: {}'.format(wav_path))\n\n # Mu-law quantize\n if is_mulaw_quantize(hparams.input_type):\n # [0, quantize_channels)\n out = mulaw_quantize(wav, hparams.quantize_channels)\n\n constant_values = mulaw_quantize(0, hparams.quantize_channels)\n out_dtype = np.int16\n\n elif is_mulaw(hparams.input_type):\n # [-1, 1]\n out = mulaw(wav, hparams.quantize_channels)\n constant_values = mulaw(0., hparams.quantize_channels)\n out_dtype = np.float32\n\n else:\n # [-1, 1]\n out = wav\n constant_values = 0.\n out_dtype = np.float32\n\n # Get the PML features from the cmp file\n pml_cmp = np.fromfile(pml_path, dtype=np.float32)\n pml_features = pml_cmp.reshape((-1, hparams.pml_dimension))\n pml_frames = pml_features.shape[0]\n\n if pml_frames > hparams.max_pml_frames and hparams.clip_pmls_length:\n return None\n\n # Find parameters\n n_fft = (hparams.num_freq - 1) * 2\n\n if hparams.use_lws:\n # Ensure time resolution adjustement between audio and mel-spectrogram\n l, r = audio.pad_lr(wav, n_fft, audio.get_hop_size(hparams))\n\n # Zero pad audio signal\n out = np.pad(out, (l, r), mode='constant', constant_values=constant_values)\n else:\n # Ensure time resolution adjustement between audio and mel-spectrogram\n l_pad, r_pad = audio.librosa_pad_lr(wav, n_fft, audio.get_hop_size(hparams))\n\n # Reflect pad audio signal (Just like it's done in Librosa to avoid frame inconsistency)\n out = np.pad(out, (l_pad, r_pad), mode='constant', constant_values=constant_values)\n\n # print(len(out), pml_frames, audio.get_hop_size(hparams), pml_frames * audio.get_hop_size(hparams))\n assert len(out) >= pml_frames * audio.get_hop_size(hparams)\n\n # time resolution adjustment\n # ensure length of raw audio is multiple of hop size so that we can use\n # transposed convolution to upsample\n out = out[:pml_frames * audio.get_hop_size(hparams)]\n assert len(out) % audio.get_hop_size(hparams) == 0\n time_steps = len(out)\n\n # Write the spectrogram and audio to disk\n audio_filename = os.path.join(wav_dir, 'audio-{}.npy'.format(index))\n pml_filename = os.path.join(pml_dir, 'pml-{}.npy'.format(index))\n np.save(audio_filename, out.astype(out_dtype), allow_pickle=False)\n np.save(pml_filename, pml_features, allow_pickle=False)\n\n # global condition features\n if hparams.gin_channels > 0:\n raise RuntimeError('When activating global conditions, please set your speaker_id rules in line 129 of '\n 'datasets/wavenet_preprocessor.py to use them during training')\n else:\n speaker_id = '<no_g>'\n\n # Return a tuple describing this training example\n return audio_filename, pml_path, pml_filename, speaker_id, time_steps, pml_frames", "def get_wav_duration(wav_bytes: bytes) -> float:\n with io.BytesIO(wav_bytes) as wav_buffer:\n wav_file: wave.Wave_read = wave.open(wav_buffer, \"rb\")\n with wav_file:\n frames = wav_file.getnframes()\n rate = wav_file.getframerate()\n return frames / float(rate)", "def read_wav(filename, offset=0, nframes=None, dtype=torch.double):\n\n if nframes is None: # Load whole file\n fs, x = wavfile.read(filename, mmap=False)\n x = torch.tensor(x, dtype=dtype)\n x.unsqueeze_(dim=0)\n\n else: # Load a part\n with wave.open(filename) as f:\n fs = f.getframerate()\n f.setpos(offset)\n buff = f.readframes(nframes)\n x = torch.tensor(np.frombuffer(buff, np.int16), dtype=dtype)\n x.unsqueeze_(dim=0)\n x -= x.mean()\n\n return x.to(DEVICE), fs", "def readAudioFile(path):\n\n extension = os.path.splitext(path)[1]\n\n try:\n # Commented below, as we don't need this\n # #if extension.lower() == '.wav':\n # #[Fs, x] = wavfile.read(path)\n # if extension.lower() == '.aif' or extension.lower() == '.aiff':\n # s = aifc.open(path, 'r')\n # nframes = s.getnframes()\n # strsig = s.readframes(nframes)\n # x = numpy.fromstring(strsig, numpy.short).byteswap()\n # Fs = s.getframerate()\n if extension.lower() == '.mp3' or extension.lower() == '.wav' or extension.lower() == '.au' or extension.lower() == '.ogg':\n try:\n audiofile = AudioSegment.from_file(path)\n except:\n print(\"Error: file not found or other I/O error. \"\n \"(DECODING FAILED)\")\n return -1 ,-1\n\n if audiofile.sample_width == 2:\n data = numpy.fromstring(audiofile._data, numpy.int16)\n elif audiofile.sample_width == 4:\n data = numpy.fromstring(audiofile._data, numpy.int32)\n else:\n return -1, -1\n Fs = audiofile.frame_rate\n x = numpy.array(data[0::audiofile.channels]).T\n else:\n print(\"Error in readAudioFile(): Unknown file type!\")\n return -1, -1\n except IOError:\n print(\"Error: file not found or other I/O error.\")\n return -1, -1\n\n if x.ndim == 2:\n if x.shape[1] == 2:\n x = x.flatten()\n\n return Fs, x", "def get_wav_name(wav_path):\r\n filename = wav_path.rsplit(\"\\\\\")[-1]\r\n id_list = filename.split(\".\")[0]\r\n wav_speaker = id_list.split(\"_\")\r\n return wav_speaker[0], wav_speaker[1]", "def generate_wavplot(song_name):\n\n filepath = features[features.inferred_name.str.title() == song_name].feature_file.values[0]\n rate, wave = wavfile.read(filepath)\n mono = np.mean(wave, axis=1)\n mono.shape\n plt.figure(figsize=(20,6))\n plt.axis('off')\n plt.plot(mono[::mono.shape[0]//6000], color='white')\n plt.tight_layout;\n friendly_song_name = '_'.join(song_name.split()).lower()\n output_filepath = './static/wavplots/' + friendly_song_name + '.png'\n plt.savefig(output_filepath, bbox_inches='tight', pad_inches=0, transparent=True)\n return output_filepath", "def play_audio_file(self, fname=DETECT_DONG):\n ding_wav = wave.open(fname, 'rb')\n ding_data = ding_wav.readframes(ding_wav.getnframes())\n # with no_alsa_error():\n audio = pyaudio.PyAudio()\n stream_out = audio.open(\n format=audio.get_format_from_width(ding_wav.getsampwidth()),\n channels=ding_wav.getnchannels(),\n rate=ding_wav.getframerate(), input=False, output=True)\n stream_out.start_stream()\n stream_out.write(ding_data)\n time.sleep(0.2)\n stream_out.stop_stream()\n stream_out.close()\n audio.terminate()", "def wave(pi, gpio, hz, secs, on=1, offset=0):\n micros_left = int(secs * 1000000)\n transitions = int(2 * hz * secs)\n micros = micros_left / transitions\n\n if (offset < 0) or (offset > micros):\n print(\"Illegal offset {} for hz {}\".format(offset, hz))\n exit()\n\n pi.set_mode(gpio, pigpio.OUTPUT)\n\n wf = [] # Empty waveform.\n\n if offset:\n wf.append(pigpio.pulse(0, 0, offset))\n micros_left -= micros\n last_micros = micros - offset\n transitions -= 1\n\n for t in range(transitions, 0, -1):\n micros = micros_left / t\n if (t & 1) == (on & 1):\n wf.append(pigpio.pulse(0, 1<<gpio, micros))\n else:\n wf.append(pigpio.pulse(1<<gpio, 0, micros))\n micros_left -= micros\n\n if offset:\n if on:\n wf.append(pigpio.pulse(1<<gpio, 0, last_micros))\n else:\n wf.append(pigpio.pulse(0, 1<<gpio, last_micros))\n\n pi.wave_add_generic(wf)\n pi.wave_send_repeat(pi.wave_create())" ]
[ "0.70273054", "0.70135546", "0.68500084", "0.6821027", "0.6812894", "0.6796246", "0.65991676", "0.6327907", "0.6325302", "0.62763965", "0.62052196", "0.61778903", "0.60919714", "0.60821027", "0.60718834", "0.60426664", "0.59239006", "0.59204537", "0.5904547", "0.58931905", "0.58914626", "0.5872094", "0.5866617", "0.58492965", "0.5761596", "0.5707479", "0.5656295", "0.5656023", "0.5653055", "0.564231", "0.56335354", "0.560535", "0.56025934", "0.5597245", "0.55909026", "0.5574474", "0.5539481", "0.5534304", "0.5522624", "0.5520059", "0.55175614", "0.550967", "0.5509119", "0.55017835", "0.54978997", "0.5497711", "0.54865444", "0.54853845", "0.5484697", "0.5473517", "0.5465837", "0.5460034", "0.54588896", "0.545026", "0.5445254", "0.5437466", "0.54361683", "0.5431766", "0.54274046", "0.5426893", "0.5414216", "0.54095185", "0.53995675", "0.53924966", "0.53743005", "0.53598315", "0.53539485", "0.5342214", "0.5326171", "0.53151184", "0.53137136", "0.52978605", "0.52967584", "0.5294765", "0.52905804", "0.5283515", "0.5279952", "0.5263066", "0.52577126", "0.52519506", "0.5243364", "0.52353483", "0.5233773", "0.5228164", "0.522146", "0.5215236", "0.5208882", "0.52087027", "0.52056855", "0.52038515", "0.5203597", "0.5200142", "0.51992565", "0.5196355", "0.5195985", "0.5195538", "0.51953286", "0.5187094", "0.5186792", "0.5157119" ]
0.6584172
7
Checks whether the given ISBN13 code is valid. >>> isISBN13('9789743159664') True >>> isISBN13('9787954527409') False >>> isISBN13('8799743159665') False
def isISBN13(code): # helper function for computing ISBN-10 check digit def check_digit(code): # compute check digit check = sum((3 if i % 2 else 1) * int(code[i]) for i in range(12)) # convert check digit into a single digit return str((10 - check) % 10) # check whether given code is a string if not isinstance(code, str): return False # check whether given code contains 10 characters if len(code) != 13: return False # check whether first nine characters of given code are digits if not code[:12].isdigit(): return False # check the check digit return check_digit(code) == code[-1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_isbn_13(isbn13):\r\n isbn13 = re.sub(r'[^0-9X]', '', isbn13.replace('x', 'X'))\r\n if len(isbn13) != 13: return False\r\n if isbn13[0:3] not in ('978', '979'): return False\r\n return False if isbn_13_check_digit(isbn13[:-1]) != isbn13[-1] else True", "def isISBN(code, isbn13=True):\n\n return isISBN13(code) if isbn13 else isISBN10(code)", "def isbn_13_check_structure(isbn13):\r\n return True if re.match(RE_ISBN13, isbn13) else False", "def areISBN(codes, isbn13=None):\n\n # initialize list of checks\n checks = []\n\n # construct list of checks\n for code in codes:\n\n if isinstance(code, str):\n\n if isbn13 is None:\n checks.append(isISBN(code, len(code) == 13))\n else:\n checks.append(isISBN(code, isbn13))\n\n else:\n\n checks.append(False)\n\n # return list of checks\n return checks", "def isISBN(code):\n if not (\n isinstance(code, str) and # code must be a string\n len(code) == 10 and # code must contain 10 characters\n code[:9].isdigit() # first nine characters must be digits\n ):\n return False\n\n # check the check digit\n return checkdigit(code) == code[-1]", "def isbn_has_valid_check_digit(self, isbn):\n if not self.ISBN_RE.match(isbn):\n raise ValueError(str(isbn) + \" is no valid 13-digit ISBN!\")\n checksum = 0\n for index, digit in enumerate(isbn):\n if index % 2 == 0:\n checksum += int(digit)\n else:\n checksum += 3 * int(digit)\n return checksum % 10 == 0", "def is_valid_isbn(isbn):\n clean = clean_isbn(isbn)\n return clean[-1] == isbn_check_digit(clean[:-1])", "def isISBN10(code):\n\n # helper function for computing ISBN-10 check digit\n def check_digit(code):\n\n # compute check digit\n check = sum((i + 1) * int(code[i]) for i in range(9)) % 11\n\n # convert check digit into its string representation\n return 'X' if check == 10 else str(check)\n\n # check whether given code is a string\n if not isinstance(code, str):\n return False\n\n # check whether given code contains 10 characters\n if len(code) != 10:\n return False\n\n # check whether first nine characters of given code are digits\n if not code[:9].isdigit():\n return False\n\n # check the check digit\n return check_digit(code) == code[-1]", "def is_valid(isbn: str) -> bool:\n no_dashes = isbn.replace('-', '')\n\n nums = []\n\n valid_num = False\n\n if no_dashes:\n for char in no_dashes:\n if char == 'X':\n nums.append(10)\n elif char != 'X' and char.isalpha() or len(no_dashes) < 10 or len(no_dashes) > 10:\n break\n elif 'X' in no_dashes and no_dashes[-1] != 'X':\n break\n else:\n nums.append(int(char))\n\n char = 0\n value = 0\n\n if nums and len(nums) == 10:\n for n in range(10, 0, -1):\n value += (n * nums[char])\n char += 1\n valid_num = (value % 11 == 0)\n\n return valid_num", "def is_isbn(val):\n if is_isbn10(val) or is_isbn13(val):\n if val[0:3] in [\"978\", \"979\"] or not is_ean13(val):\n return True\n return False", "def verify_format(isbn):\n\n return len(isbn) == 10 and (isbn[-1] == \"X\" or isbn[-1].isdigit()) \\\n and all(digit.isdigit() for digit in isbn[:-1])", "def verify(s):\n\t# Remove any spurious characters\n\ts = re.sub(r'[^0-9xX]', '', s).upper().strip()\n\n\tl = len(s)\n\n\tif l==10:\n\t\tif verify_10(s):\n\t\t\treturn s\n\telif l==13:\n\t\tif verify_13(s):\n\t\t\treturn s\n\n\t# It's not the right length to be an ISBN\n\treturn False", "def verify(isbn):\n\n isbn = isbn.replace(\"-\", \"\")\n if not verify_format(isbn):\n return False\n\n isbn_sum = 0\n for digit, i in zip(isbn, range(10, 0, -1)):\n if digit == \"X\":\n isbn_sum += 10 * i\n else:\n isbn_sum += int(digit) * i\n\n return isbn_sum % 11 == 0", "def isbn13_convert(isbn13):\r\n if not is_isbn_13(isbn13): return None\r\n return isbn13[3:-1] + isbn_10_check_digit(isbn13[3:-1])", "def isbn_10_check_structure(isbn10):\r\n return True if re.match(RE_ISBN10, isbn10) else False", "def isbn():\n message = 'Informe um ISBN válido'\n def _isbn(form, field):\n if not is_isbn10(field.data) and not is_isbn13(field.data):\n raise ValidationError(message)\n return _isbn", "def validate_isbn_format(isbn_code: str):\n format_valid = False\n isbn = list(isbn_code)\n msj = ''\n\n if len(isbn) == 13:\n\n isbn_numbers = []\n isbn_separator = []\n index = 0\n isbn_characters = []\n\n for character in isbn:\n\n if character in '0123456789':\n isbn_numbers.append(character)\n\n elif character not in '0123456789':\n isbn_characters.append(character)\n\n if character == '-':\n isbn_separator.append(character)\n\n if index > 0:\n if isbn[index - 1] not in '0123456789':\n msj = 'Se ingresaron dos separadores juntos'\n break\n else:\n msj = 'Se ingresó un caracter inválido'\n break\n\n index += 1\n\n if len(isbn_numbers) < 10:\n msj = 'Faltan dígitos'\n\n if len(isbn_separator) != 3:\n msj = 'No son 4 grupos de números.'\n\n if len(isbn_separator) < 3:\n diff = 3 - len(isbn_separator)\n msj += ' Faltan ' + str(diff) + ' separadores'\n else:\n diff = len(isbn_separator) - 3\n msj += ' Hay ' + str(diff) + ' separador sobrante'\n\n if msj == '':\n format_valid = True\n\n elif len(isbn) < 13:\n msj = 'Faltan caracteres'\n\n else:\n msj = 'Se excede la cantidad de carácteres'\n\n return format_valid, msj", "def is_isbn_10(isbn10):\r\n isbn10 = re.sub(r'[^0-9X]', '', isbn10.replace('x', 'X'))\r\n if len(isbn10) != 10: return False\r\n return False if isbn_10_check_digit(isbn10[:-1]) != isbn10[-1] else True", "def validate_isbn_math_relation(isbn_code: str):\n isbn_code_valid = False\n isbn_only_numbers = []\n msj = ''\n\n for character in isbn_code:\n if character in '0123456789':\n char_parse_int = int(character)\n isbn_only_numbers.append(char_parse_int)\n else:\n pass\n\n pos = 10\n addition = 0\n for num in isbn_only_numbers:\n mult = pos * num\n addition += mult\n pos -= 1\n\n final_result = addition % 11\n\n if final_result == 0:\n isbn_code_valid = True\n\n if not isbn_code_valid:\n msj = 'No se cumple la relación matemática'\n\n return isbn_code_valid, msj", "def testBadFormatISBN(self): \n val = format_isbn(\"1234567843534594123\")\n self.assertFalse(val)", "def is_code_valid_checksum(processed_code):\n\n if processed_code.isnumeric():\n list_of_digits = [int(digit) for digit in processed_code]\n else:\n converted_digits = convert_code_to_decimal(processed_code)\n list_of_digits = [int(digit) for digit in converted_digits]\n\n return sum(list_of_digits) > 0 and get_calculated_checksum(list_of_digits) % 11 == 0", "def is_valid_issn(issn):\n try:\n return bool(validate_issn(issn))\n except (ValueError, TypeError):\n return False", "def test_book_isbn_must_only_be_numbers(self):\n\n\t\twith self.client:\n\t\t\tadd_book = {\n\t\t\t\t'title': 'Hello Books',\n\t\t\t\t'isbn': '56987451Ky'\n\t\t\t}\n\t\t\tlogin_data = self.login_test_user()\n\t\t\ttoken = login_data['auth_token']\n\t\t\tres = self.client.post(\n\t\t\t\tf'{URL_BOOKS}',\n\t\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\t\tcontent_type='application/json',\n\t\t\t\tdata=json.dumps(add_book)\n\t\t\t)\n\t\t\tres2 = json.loads(res.data.decode())\n\t\t\tself.assertIn('isbn must only include numbers', str(res2))\n\t\t\tself.assertEqual(res.status_code, 400)", "def is_ean13(val):\n if len(val) != 13:\n return False\n sequence = [1, 3]\n try:\n r = sum([int(x) * sequence[i % 2] for i, x in enumerate(val[:-1])])\n ck = (10 - r % 10) % 10\n return ck == int(val[-1])\n except ValueError:\n return False", "def valid_book(self, info):\n self.cursor.execute(\"SELECT ISBN, title, price, stock FROM book WHERE ISBN=%s\", (info['ISBN'],))\n for book in self.cursor.fetchall():\n return True, float(book[2]), book[1], book[3]\n return False, 0, 0, 0", "def validate_identifier(identifier: str) -> bool:\n if identifier[:2] == 'NR':\n return True\n\n if len(identifier) < 9:\n return False\n\n try:\n d = int(identifier[-7:])\n if d == 0:\n return False\n except ValueError:\n return False\n # TODO This is not correct for entity types that are not Coops\n if identifier[:-7] not in ('CP', 'XCP', 'BC'):\n return False\n\n return True", "def is_id_valid(id_code: str) -> bool:\n if is_valid_gender_number(int(id_code[0:1])):\n if is_valid_year_number(int(id_code[1:3])):\n if is_valid_month_number(int(id_code[3:5])):\n if is_valid_day_number(int(id_code[0:1]), int(id_code[1:3]), int(id_code[3:5]), int(id_code[5:7])):\n if is_valid_birth_number(int(float(id_code[7:10]))):\n if is_valid_control_number(id_code):\n return True\n else:\n return False\n else:\n return False\n\n else:\n return False\n else:\n return False\n else:\n return False", "def testBadFormatISBNAgain(self): \n val = format_isbn(\"12345678\")\n self.assertFalse(val)", "def is_id_valid(id_code: str) -> bool:\n if id_code.isdigit():\n if len(str(id_code)) == 11:\n id_code = str(id_code)\n gender_number = int(id_code[0:1])\n day = int(id_code[5:7])\n month = int(id_code[3:5])\n year = id_code[1:3]\n birth_number = id_code[7:10]\n if is_valid_gender_number(gender_number) \\\n and is_valid_year_number(int(year)) \\\n and is_valid_month_number(int(month)) \\\n and is_valid_day_number(gender_number, int(year), int(month), int(day)) \\\n and is_valid_birth_number(int(birth_number)) \\\n and is_valid_control_number(str(id_code)):\n return True\n return False\n return False\n return False", "def barcode_is_valid(s):\n return (bool(re.match(r'^[ATGC]*$',s))\n or barcode_is_10xgenomics(s))", "def check_code(item_code):\r\n # RA matches\r\n if re.match(r'^MCRNC[0-9]{4}\\.T$', item_code):\r\n return True\r\n\r\n if re.match(r'^RAN[0-9]{3,4}(\\.[0-9])?C?(\\.T)?$', item_code):\r\n return True\r\n\r\n if re.match(r'^RAS[0-9]{5}$', item_code):\r\n return True\r\n\r\n if re.match(r'^RNC[0-9]{4}\\.T$', item_code):\r\n return True\r\n\r\n if re.match(r'^RU[0-9]{5}(\\.T)?$', item_code):\r\n return True\r\n\r\n # Feature ID (RAN) matches\r\n if re.match(r'^RAN[0-9]{2,5}$', item_code):\r\n return True\r\n\r\n if re.match(r'^(?P<code>RAN[1,2](\\.[0-9]{3,4}))$', item_code):\r\n return True\r\n\r\n return False", "def is_valid(self):\n return phonenumbers.is_valid_number(self)", "def test_and_normalize_isbn(self, isbn):\n ret = {\"valid\": False, \"input_value\": str(isbn)}\n stripped_isbn = isbn.strip()\n unsplit_isbn = stripped_isbn.replace(\"-\", \"\")\n split_on_input = False\n if self.ISBN_SPLIT_RE.match(stripped_isbn):\n if len(stripped_isbn) < 17:\n ret[\"error_type\"] = 1\n return ret\n elif len(stripped_isbn) > 17:\n ret[\"error_type\"] = 2\n return ret\n else:\n split_on_input = True\n if self.ISBN_RE.match(unsplit_isbn):\n split_isbn = self.split_isbn(unsplit_isbn)[\"value\"]\n if split_on_input and split_isbn != stripped_isbn:\n ret[\"error_type\"] = 3\n return ret\n ret[\"normalised\"] = split_isbn\n ret[\"valid\"] = True\n return ret\n ret[\"error_type\"] = 0\n return ret", "def is_valid_month_number(month_number: int) -> bool:\n if month_number in range(13):\n return True\n else:\n return False", "def validate(input):\n regex = re.compile(r'(UL)?\\d{1,' + re.escape(str(barcode_digit_length)) + '}$', flags=re.IGNORECASE)\n if regex.match(input):\n is_valid = True\n else:\n is_valid = False\n return is_valid", "def isbn_check_digit(isbn):\n return (11 - (sum(x * y for (x, y) in enumerate(reversed(isbn), start=2))\n % 11)) % 11", "def split_isbn(self, isbn):\n ret_value = {\n 'success': False,\n 'value': None\n }\n split_isbn = \"\"\n remaining_isbn = isbn\n\n if not self.ISBN_RE.match(isbn):\n ret_value['value'] = '\"' + str(isbn) + '\" is no valid 13-digit ISBN!'\n return ret_value\n for ean in self.ean_elements:\n prefix = ean.find(\"Prefix\").text\n if remaining_isbn.startswith(prefix):\n split_isbn += prefix\n remaining_isbn = remaining_isbn[len(prefix):]\n rules = ean.find(\"Rules\")\n length = self._get_range_length_from_rules(remaining_isbn, rules)\n if length == 0:\n msg = ('Invalid ISBN: Remaining fragment \"{}\" for EAN prefix \"{}\" is inside a ' +\n 'range which is not marked for use yet')\n ret_value['value'] = msg.format(remaining_isbn, prefix)\n return ret_value\n group = remaining_isbn[:length]\n split_isbn += \"-\" + group\n remaining_isbn = remaining_isbn[length:]\n break\n else:\n msg = 'ISBN \"{}\" does not seem to have a valid prefix.'\n ret_value['value'] = msg.format(isbn)\n return ret_value\n for group in self.registration_groups:\n prefix = group.find(\"Prefix\").text\n if split_isbn == prefix:\n rules = group.find(\"Rules\")\n length = self._get_range_length_from_rules(remaining_isbn, rules)\n if length == 0:\n msg = ('Invalid ISBN: Remaining fragment \"{}\" for registration group \"{}\" is ' +\n 'inside a range which is not marked for use yet')\n ret_value['value'] = msg.format(remaining_isbn, split_isbn)\n return ret_value\n registrant = remaining_isbn[:length]\n split_isbn += \"-\" + registrant\n remaining_isbn = remaining_isbn[length:]\n check_digit = remaining_isbn[-1:]\n publication_number = remaining_isbn[:-1]\n split_isbn += \"-\" + publication_number + \"-\" + check_digit\n ret_value['success'] = True\n ret_value['value'] = split_isbn\n return ret_value\n else:\n msg = 'ISBN \"{}\" does not seem to have a valid registration group element.'\n ret_value['value'] = msg.format(isbn)\n return ret_value", "def valid_barcode(s):\n # implement this function!\n odd_digits = 0\n even_digits = 0\n result = 0\n for i in range(len(s) - 1):\n if i % 2 == 0:\n odd_digits += int(s[i])\n else:\n even_digits += int(s[i])\n result = (3 * odd_digits + even_digits) % 10\n if result != 0:\n result = 10 - result\n\n try:\n if int(s[-1]) == result and len(s) == 12:\n return True\n else:\n return False\n except IndexError:\n return False", "def is_isni(val):\n val = val.replace(\"-\", \"\").replace(\" \", \"\").upper()\n if len(val) != 16:\n return False\n try:\n r = 0\n for x in val[:-1]:\n r = (r + int(x)) * 2\n ck = (12 - r % 11) % 11\n return ck == _convert_x_to_10(val[-1])\n except ValueError:\n return False", "def validate_seq(sequence):\n sequence = sequence.strip()\n sequence = sequence.replace(\" \", \"\")\n sequence.upper()\n regex = re.compile('^[ACTGNRYSWKMBDHVEFILPQSXZ]*$', re.I)\n if regex.search(sequence) is not None:\n return True\n else:\n return False", "def CheckZipCode(zipcode):\n # see if there are enough digits\n if (len(zipcode) >= 5):\n # check if numerical\n try:\n int(zipcode)\n return True\n except:\n return False\n else:\n return False", "def validateFormat(barcode):\r\n validatesymbol = 0\r\n delimitedsymbol = 0\r\n if barcode[0] == '' or barcode[-1] == '':\r\n validatesymbol += 1\r\n for i in range(len(barcode)):\r\n try:\r\n int(barcode[i])\r\n except ValueError:\r\n if barcode[i] == '-':\r\n delimitedsymbol += 1\r\n else:\r\n validatesymbol += 1\r\n if delimitedsymbol == 0 and validatesymbol == 0:\r\n if len(barcode) == 12 or len(barcode) == 13:\r\n pass\r\n else:\r\n validatesymbol += 1\r\n if validatesymbol == 0:\r\n return True\r\n else:\r\n return False", "def is_valid_year_number(year_number: int) -> bool:\n if year_number in range(100):\n return True\n else:\n return False", "def seq_validator(sequence):\n\n # checks for ascii characters that should not appear in a fasta sequence\n seq_val = re.compile(r\"[.-@|\\s| -)|z-~|Z-`|EFIJLOPQX|efijlopqx+,]+\")\n\n if seq_val.search(sequence) is None:\n return True\n\n return False", "def isnumber(n):\r\n N = str(n)\r\n if N.isdigit():\r\n return True\r\n else:\r\n return False", "def isEncAddress(key):\n\tif re.search('^EAddr38[a-km-zA-HJ-NP-Z0-9]{56}$', key):\n\t\tif checkChecksum(key) is False:\n\t\t\treturn True, 'checksum'\n\t\treturn True, 'good'\n\telse:\n\t\treturn False, 'not valid'", "def is_issn(val):\n try:\n val = val.replace(\"-\", \"\").replace(\" \", \"\").upper()\n if len(val) != 8:\n return False\n r = sum([(8 - i) * (_convert_x_to_10(x)) for i, x in enumerate(val)])\n return not (r % 11)\n except ValueError:\n return False", "def is_valid(postal_code):\n return bool(re.match(UK_POST_CODE_REGEX, postal_code, re.VERBOSE)) if postal_code else False", "def testFormatISBN(self): \n val = format_isbn(\"1234567894123\")\n self.assertEqual(val,\"123-4-567-89412-3\")", "def is_valid_expiration_year(expiration_year: int) -> bool:\n return expiration_year.isnumeric() and 2020 <= int(expiration_year) <= 2030", "def is_valid(key):\n return key[0:2] == \"MR\" and key[2:].isdigit() and len(key) in [9, 10]", "def is_valid_issue_year(issue_year: int) -> bool:\n return issue_year.isnumeric() and 2010 <= int(issue_year) <= 2020", "def checkBarcode(barcode):\r\n barcode = barcode.strip()\r\n if validateFormat(barcode) is False:\r\n return 'barcode not valid'\r\n else:\r\n barcode = barcode.replace('-','')\r\n if len(barcode) == 12:\r\n fullbarcode = barcode + str(findlastdigit(barcode))\r\n return fullbarcode\r\n elif len(barcode) == 13:\r\n if findlastdigit(barcode) == int(barcode[-1]):\r\n return 'Valid'\r\n else:\r\n return 'Invalid'", "def check_ean(eancode):\n if not eancode:\n return True\n if len(eancode) <> 13:\n return False\n try:\n int(eancode)\n except:\n return False\n return ean_checksum(eancode) == int(eancode[-1])", "def verify(n):\n\n # Take the sum of all digits.\n sum_of_digits = sum(luhn_digits(n))\n\n # The number is valid iff the sum of digits modulo 10 is equal to 0\n return sum_of_digits % 10 == 0", "def test_book_isbn_length_must_be_ten(self):\n\n\t\twith self.client:\n\t\t\tadd_book = {\n\t\t\t\t'title': 'Hello Books',\n\t\t\t\t'isbn': '56987451'\n\t\t\t}\n\t\t\tlogin_data = self.login_test_user()\n\t\t\ttoken = login_data['auth_token']\n\t\t\tres = self.client.post(\n\t\t\t\tf'{URL_BOOKS}',\n\t\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\t\tcontent_type='application/json',\n\t\t\t\tdata=json.dumps(add_book)\n\t\t\t)\n\t\t\tres2 = json.loads(res.data.decode())\n\t\t\tself.assertIn('isbn length must be 10', str(res2))", "def is_valid_postal_code(postal_code):\n is_code_valid = False\n postcode_regex = re.compile(r'^\\d{2}-\\d{3}$')\n\n if postcode_regex.search(postal_code) is not None:\n is_code_valid = True\n\n return is_code_valid", "def input_validation(input_: str) -> bool:\n return fullmatch('[1-9]', input_) is not None", "def _is_doi(s: str) -> bool:\n # Thanks to Andrew Gilmartin\n # https://www.crossref.org/blog/dois-and-matching-regular-expressions/\n match = re.match(r\"^10.\\d{4,9}/[-._;()/:A-Z0-9]+$\", s, re.IGNORECASE)\n\n return match is not None and match.group() is not None", "def is_luhn_valid(card_number):\n is_valid = luhn_checksum(card_number) == 0\n return is_valid", "def ni_number_check(number):\n ni_nuber = re.match(r\"^\\s*[a-zA-Z]{2}(?:\\s*\\d\\s*){6}[a-zA-Z]?\\s*$\", number)\n if ni_nuber:\n return True\n return False", "def is_code_contain_multiple_bad_digits(processed_code):\n return True if list(processed_code).count(\"?\") > 1 else False", "def is_valid_hex(hex_code: str) -> bool:\n\n match = re.search(r'^#(?:[0-9a-fA-F]{3}){1,2}$', hex_code)\n\n if match:\n return True\n else:\n return False", "def barcode_is_10xgenomics(s):\n return bool(re.match(r'^SI\\-[A-Z0-9]+\\-[A-Z0-9]+$',s))", "def is_valid_doi(doi):\n\n try:\n req = requests.get('http://dx.doi.org/%s' % doi, timeout=2.5)\n except (Timeout, RequestException) as e:\n logger.error('Can not validate doi: ' + str(e))\n raise\n else:\n return req.status_code == 200", "def citationContainsDOI(citation):\n if citation.startswith(\"doi:\"):\n return True\n elif citation.startswith(\"@doi:\"):\n return True\n elif citation.startswith(\"[@doi\"):\n return True\n else:\n return False", "def validate_account_number(num, should_exist=True):\n if len(num) != 8:\n return False\n elif num[0] == '0':\n return False\n else:\n if should_exist:\n return account_number_exists(num)\n else:\n return not account_number_exists(num)", "def is_valid_year_number(year_number: int) -> bool:\n if 0 <= int(year_number) < 100:\n return True\n return False", "def check_ont_address_format(address):\n if len(address) != 34:\n return False\n\n for ch in address:\n if ch not in '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz':\n return False\n\n return True", "def is_valid(self, doi):\n doi = self.normalise(doi, include_prefix=True)\n\n if doi is None or match(\"^doi:10\\\\..+/.+$\", doi) is None:\n return False\n else:\n if not doi in self._data or self._data[doi] is None:\n return self.__doi_exists(doi)\n return self._data[doi].get(\"valid\")", "def esCUITValida(cuit):\n # Convertimos el valor a una cadena\n cuit = str(cuit)\n # Aca removemos guiones, espacios y puntos para poder trabajar\n cuit = cuit.replace(\"-\", \"\") # Borramos los guiones\n cuit = cuit.replace(\" \", \"\") # Borramos los espacios\n cuit = cuit.replace(\".\", \"\") # Borramos los puntos\n # Si no tiene 11 caracteres lo descartamos\n if len(cuit) != 11:\n return False, cuit\n # Solo resta analizar si todos los caracteres son numeros\n if not cuit.isdigit():\n return False, cuit\n # Despues de estas validaciones podemos afirmar\n # que contamos con 11 numeros\n # Aca comienza la magia\n base = [5, 4, 3, 2, 7, 6, 5, 4, 3, 2]\n aux = 0\n for i in range(10):\n aux += int(cuit[i]) * base[i]\n aux = 11 - (aux % 11)\n if aux == 11:\n aux = 0\n elif aux == 10:\n aux = 9\n if int(cuit[10]) == aux:\n return True, cuit\n else:\n return False, cuit", "def validate_SSN(SSN_test):\n\n is_valid_SSN = False\n\n # if user breaks format but enters 9 digits, SSN is counted as valid\n if len(SSN_test) == 9 and SSN_test.isdigit():\n is_valid_SSN = True\n\n\n # otherwise, if the length is not 11 characters, and there aren't at least 2 dashes, entry immediately fails\n elif len(SSN_test) != 11 or (SSN_test.count(\"-\") != 2):\n pass\n\n # if the dashes are in the wrong place, entry fails\n elif (SSN_test[3] != \"-\") and (SSN_test[6] != \"-\"):\n pass\n\n # dashes are correct, but all other characters must be numbers\n else:\n valid_SSN1 = (SSN_test[0 : 3]).isdigit()\n valid_SSN2 = (SSN_test[4 : 6]).isdigit()\n valid_SSN3 = (SSN_test[7 : ]).isdigit()\n if (valid_SSN1 and valid_SSN2 and valid_SSN3):\n is_valid_SSN = True\n else:\n is_valid_SSN = False\n\n return is_valid_SSN", "def is_valid_two_digit_char(code: str) -> bool:\n\n return 10 <= int(code) <= 26", "def is_valid_bid(klass, bid):\n return bid and re.match(\"^\\d{3,5}$\", bid.strip())", "def is_valid_key(key, crypt_method):\n logger.info(f\"key: {key}, crypt_method: {crypt_method}\")\n if crypt_method == 'C':\n while type(key) is not int or key not in range(0, 95):\n try:\n key = Check.is_integer(key)[1]\n if key not in range(0, 95):\n raise ValueError\n except (TypeError, ValueError):\n print(\"You must enter an integer between 1 and 95!\")\n key = input(\"Enter an encryption key\\n>> \")\n elif crypt_method in ('M', 'P'):\n pass\n else:\n return False\n return True, key", "def isSequenceValid(sequence):\n if not sequence:\n return False\n allowed_chars = set('GCAU')\n return set(sequence).issubset(allowed_chars)", "def istele(number):\n if number[:3] == '140':\n return True\n return False", "def is_valid_control_number(id_code: str) -> bool:\n check_numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1]\n check_sum = 0\n for i in range(10):\n check_sum += int(id_code[i]) * check_numbers[i]\n check_sum = check_sum % 11\n if check_sum == 10:\n check_numbers = [3, 4, 5, 6, 7, 8, 9, 1, 2, 3]\n check_sum = 0\n for i in range(10):\n check_sum += int(id_code[i]) * check_numbers[i]\n check_sum = check_sum % 11\n if check_sum == int(id_code[-1]):\n return True\n return False", "def isNumber(string):\r\n for char in string:\r\n charNum = ord(char)\r\n if (charNum < 48 or charNum > 57):\r\n return False\r\n return True", "def is_valid(number):\n try:\n return bool(validate(number))\n except ValidationError:\n return False", "def is_valid(number):\n try:\n return bool(validate(number))\n except ValidationError:\n return False", "def is_valid(number):\n try:\n return bool(validate(number))\n except ValidationError:\n return False", "def is_valid(number):\n try:\n return bool(validate(number))\n except ValidationError:\n return False", "def is_valid(number):\n try:\n return bool(validate(number))\n except ValidationError:\n return False", "def is_valid(number):\n try:\n return bool(validate(number))\n except ValidationError:\n return False", "def is_sms_valid(text=''):\n try:\n text.decode('ascii')\n except:\n return False\n if len(text) > 160:\n return False\n\n return True", "def is_valid_control_number(id_code: str) -> bool:\n sum = 1 * int(id_code[:1]) + 2 * int(id_code[1:2]) + 3 * int(id_code[2:3]) + 4 * int(id_code[3:4]) + 5 * \\\n int(id_code[4:5]) + 6 * int(id_code[5:6]) + 7 * int(id_code[6:7]) + 8 * int(id_code[7:8]) + 9 *\\\n int(id_code[8:9]) + 1 * int(id_code[9:10])\n control_number = sum % 11\n if int(control_number) == int(id_code[10:11]):\n return True\n elif int(control_number) == 10:\n sum = 3 * int(id_code[:1]) + 4 * int(id_code[1:2]) + 5 * int(id_code[2:3]) + 6 * int(id_code[3:4]) + 7 * \\\n int(id_code[4:5]) + 8 * int(id_code[5:6]) + 9 * int(id_code[6:7]) + 1 * int(id_code[7:8]) + 2 * \\\n int(id_code[8:9]) + 3 * int(id_code[9:10])\n control_number = sum % 11\n if control_number == int(id_code[10:11]):\n return True\n elif control_number == 10:\n if int(id_code[10:11]) == 0:\n return True\n else:\n return False\n else:\n return False", "def is_isbn_or_keyword(inputs):\n isbn_or_keyword='keyword'\n if len(inputs)==13 and inputs.isdigit():\n isbn_or_keyword='isbn'\n short_inputs=inputs.strip('-')\n if '-' in inputs and short_inputs.isdigit() and len(short_inputs)==10:\n isbn_or_keyword='isbn'\n return isbn_or_keyword", "def is_mci(code):\n assert isinstance(code, str)\n code_set = ('331.83', '294.9', 'G31.84', 'F09', '33183', '2949', 'G3184')\n return code.startswith(code_set)", "def isbn_convert(isbn10):\r\n if not is_isbn_10(isbn10): return None\r\n return '978' + isbn10[:-1] + isbn_13_check_digit('978' + isbn10[:-1])", "def is_code_has_unknown_digit(processed_code):\n return True if list(processed_code).count(\"?\") == 0 else False", "def test_valid_luhn(self):\n assert luhn_checksum(\"79927398713\") == 0", "def is_valid_year(year_number):\n\n if (type(year_number) == int) and (START_YEAR <= year_number <= FINAL_YEAR):\n return True\n\n return False", "def validate_license(key: str) -> bool:\r\n return bool(\r\n re.match(r'^PB-[A-Z0-9]{8}(?:-[A-Z0-9]{8}){3}$', key)\r\n )", "def valid(formula):\r\n\r\n try:\r\n return not re.search(r'\\b0[0-9]', formula) and eval((formula) is True\r\n #except ArithmeticError:\r\n #return False\r\n except:\r\n return False", "def is_valid_language_code(code):\n try:\n iso639.languages.get(part3=code)\n return True\n except KeyError:\n return False", "def test_issn_incorrect_length(self):\n issns = ['0307-15', '0307-15789', '03071758', '0307175']\n for num in issns:\n with self.subTest(num=num):\n with self.assertRaisesRegex(ValueError, num):\n star_barcode.construct_postscript(\n issn=num,\n bwipp_location=self.bwipp,\n sequence=21,\n week=46,\n header_line=''\n )", "def spki_req_is_valid(spki_req):\n try:\n netscape_spki_from_b64(spki_req)\n return True\n except Exception:\n return False", "def is_number(s):\r\n try:\r\n int(s)\r\n return True\r\n except ValueError:\r\n return False", "def _is_valid_code(self, code):\r\n return code in COUNTRY_CODES" ]
[ "0.8294502", "0.8156061", "0.7813687", "0.7593365", "0.7539352", "0.7096975", "0.70109516", "0.6998053", "0.6760535", "0.6702344", "0.6442007", "0.64231825", "0.6403078", "0.6313602", "0.6305555", "0.6260889", "0.6107894", "0.5969835", "0.5896946", "0.57390374", "0.56742215", "0.56619984", "0.5472034", "0.5456971", "0.54418916", "0.5413317", "0.5353483", "0.534153", "0.5326616", "0.5303506", "0.5217984", "0.5211934", "0.51902163", "0.51871204", "0.5084005", "0.5074594", "0.50719017", "0.50622916", "0.50617665", "0.5058008", "0.50500935", "0.5020174", "0.50175", "0.50040436", "0.4989212", "0.49833947", "0.49579582", "0.49529052", "0.4929644", "0.49093333", "0.49092564", "0.4906936", "0.4903256", "0.4901409", "0.4896878", "0.48937032", "0.4869668", "0.48673004", "0.48531428", "0.48336205", "0.48329458", "0.4825178", "0.48226863", "0.48027158", "0.47957835", "0.47878042", "0.47827426", "0.4766859", "0.4756467", "0.47523406", "0.4734664", "0.47339183", "0.47305772", "0.47181854", "0.4710365", "0.47096112", "0.46994412", "0.46975347", "0.46925443", "0.46924987", "0.46924987", "0.46924987", "0.46924987", "0.46924987", "0.46924987", "0.46814105", "0.46790826", "0.46769387", "0.46705437", "0.4666508", "0.46633697", "0.4661586", "0.46615326", "0.46581215", "0.4626793", "0.46239927", "0.4621767", "0.4616196", "0.45998105", "0.45958" ]
0.86895674
0
Parse OpenSSLstyle foo.0, foo.1, ... subscripted options. Returns a list of values matching the specified option name.
def multiget(self, option, section = None): matches = [] if section is None: section = self.default_section if self.cfg.has_option(section, option): matches.append((-1, self.get(option, section = section))) for key, value in self.cfg.items(section): s = key.rsplit(".", 1) if len(s) == 2 and s[0] == option and s[1].isdigit(): matches.append((int(s[1]), self.get(option, section = section))) matches.sort() return [match[1] for match in matches]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_option(self, name):\r\n if not isinstance(name, str):\r\n name = \" \".join(name)\r\n lines = self.sendAndRecv(\"GETCONF %s\\r\\n\" % name)\r\n\r\n r = []\r\n for _,line,_ in lines:\r\n try:\r\n key, val = line.split(\"=\", 1)\r\n r.append((key,val))\r\n except ValueError:\r\n r.append((line, None))\r\n\r\n return r", "def value_options(*args):\n\n @with_pattern(r\"|\".join(args))\n def parse_options(text):\n return text\n\n return parse_options", "def _parse_delimited_options(ttsoptions, _engine):\n options = []\n for prop, val in [s.strip().split('=') for s in ttsoptions.split(',')]:\n prop = prop.strip()\n val = val.strip()\n val = float(val) if val.isdecimal() else val\n options[prop] = val\n\n return options", "def _handle_short_form(element):\n if len(element) <= 1:\n raise CmdLineException(\"Invalid option: '{}'\".format(element))\n tokens = []\n for i in range(1, len(element)):\n if element[i: i + 1] == \"=\":\n if i + 1 < len(element):\n tokens.append(element[i + 1:])\n break\n tokens.append(\"-\" + element[i: i + 1])\n return tokens", "def parse_options(self, options):\n pass", "def parse_options(options, return_list=True):\n\n cmd_options = []\n\n for key, value in options.items():\n\n if value is not None:\n txt = f\"--{key} {value}\"\n else:\n txt = f\"--{key}\"\n\n cmd_options.append(txt)\n\n if return_list:\n return cmd_options\n\n cmd_options = \" \".join(cmd_options)\n\n return cmd_options", "def parse_options(options):\r\n # convert single quotes inside option values to html encoded string\r\n options = re.sub(r\"([a-zA-Z])('|\\\\')([a-zA-Z])\", r\"\\1&#39;\\3\", options)\r\n options = re.sub(r\"\\\\'\", r\"&#39;\", options) # replace already escaped single quotes\r\n # parse the set of possible options\r\n lexer = shlex.shlex(options[1:-1].encode('utf8'))\r\n lexer.quotes = \"'\"\r\n # Allow options to be separated by whitespace as well as commas\r\n lexer.whitespace = \", \"\r\n\r\n # remove quotes\r\n # convert escaped single quotes (html encoded string) back to single quotes\r\n tokens = [x[1:-1].decode('utf8').replace(\"&#39;\", \"'\") for x in lexer]\r\n\r\n # make list of (option_id, option_description), with description=id\r\n return [(t, t) for t in tokens]", "def parse_args(args, optinfos):\n\n for opt_identifier, optinfo in optinfos:\n try:\n options, arguments = getopt.gnu_getopt(args, optinfo)\n return opt_identifier, options, arguments\n except getopt.GetoptError:\n # That version doesn't work, so try the next one\n continue\n \n # If we got this far, they both failed (read: syntax error)\n error(2, \"Syntax Error: Incorrect option passed. See the man page for more information.\\nA common cause is using old LPRng syntax.\\nValid options: %s\\n\" % \n (string.replace(re.sub(r'([a-zA-Z])', r'-\\1 ',\n optinfos[SYSTEM_CUPS][1]), ':', '[arg] ')))", "def _getOptions(self):\n args = []\n for iname, value in self.options:\n args.append('-' + iname)\n if value != 'true':\n args.append(value)\n return args", "def options_by_name(self):\n pass", "def options(argv=[]):\r\n parser = HendrixOptionParser\r\n return vars(parser.parse_args(argv)[0])", "def parse_options():\n\n from optparse import OptionParser\n usage = r\"\"\"%prog [options] <voxel_file>\"\"\"\n p = OptionParser(usage=usage)\n p.add_option('-o', '--output', action='store', dest='output',\n default='plot', help='Path to output SILO or VTK file.')\n p.add_option('-v', '--vtk', action='store_true', dest='vtk',\n default=False, help='Flag to convert to VTK instead of SILO.')\n parsed = p.parse_args()\n if not parsed[1]:\n p.print_help()\n return parsed\n return parsed", "def get_options(self, field):\n base, req_option = field.split(\"-\")\n assert base == \"options\", \"get_options can only be used to fetch options.\"\n option_type = self.option_str_to_int(req_option)\n i = 0\n # First, check if the option is already present in the packet\n for option in self.layer.options:\n # Scapy may try to be helpful and return the string of the option\n next_option = self.option_str_to_int(option[0])\n if option_type == next_option:\n _name, value = self.layer.options[i]\n # Some options (timestamp, checksums, nop) store their value in a\n # tuple.\n if isinstance(value, tuple):\n # Scapy returns values in any of these types\n if value in [None, b'', ()]:\n return ''\n value = value[0]\n if value in [None, b'', ()]:\n return ''\n if req_option == \"md5header\":\n return binascii.hexlify(value).decode(\"utf-8\")\n\n return value\n i += 1\n return ''", "def interpret_options(options):\n # template always has to be index 0\n template = options[0]\n # namespace always has to be index 1. Support 'ec2' (human friendly) and\n # 'AWS/EC2' (how CloudWatch natively calls these things)\n namespace = options[1].rsplit('/', 2)[-1].lower()\n next_idx = 2\n # region might be index 2\n region = ''\n if len(options) > 2 and re.match(r'^\\w+\\-[\\w\\-]+\\-\\d+$', options[2]):\n region = options[2]\n next_idx += 1\n else:\n next_idx = 2\n region = region or boto.config.get('Boto', 'ec2_region_name', 'us-east-1')\n\n filter_by = {}\n extras = []\n for arg in options[next_idx:]:\n if arg.startswith('-'):\n # throw these away for now\n extras.append(arg)\n elif '=' in arg:\n key, value = arg.split('=', 2)\n filter_by[key] = value\n else:\n # throw these away for now\n extras.append(arg)\n\n return template, namespace, region, filter_by, extras", "def get_options(options, opt_path):\r\n options_in = open(opt_path, 'r')\r\n # get exceptions\r\n for line_in in options_in:\r\n line = line_in.strip()\r\n if len(line) == 0:\r\n continue\r\n if line.startswith(\"#\"):\r\n continue\r\n if line.startswith(\"[\") and \"pep8\" in line:\r\n continue\r\n option = line\r\n if not line.startswith(\"-\"):\r\n line = \"--\" + line\r\n options.append(line)\r\n\r\n options_in.close()", "def test_parsingValues(self):\n argV = (\"--fooint 912 --foofloat -823.1 \"\n \"--eggint 32 --eggfloat 21\").split()\n self.usage.parseOptions(argV)\n self.failUnlessEqual(self.usage.opts['fooint'], 912)\n self.assert_(isinstance(self.usage.opts['fooint'], int))\n self.failUnlessEqual(self.usage.opts['foofloat'], -823.1)\n self.assert_(isinstance(self.usage.opts['foofloat'], float))\n self.failUnlessEqual(self.usage.opts['eggint'], 32)\n self.assert_(isinstance(self.usage.opts['eggint'], int))\n self.failUnlessEqual(self.usage.opts['eggfloat'], 21.)\n self.assert_(isinstance(self.usage.opts['eggfloat'], float))", "def _parse(self, args):\r\n\r\n ordered = []\r\n opt_full = dict()\r\n opt_abbrev = dict()\r\n\r\n args = args + [''] # Avoid out of range\r\n i = 0\r\n\r\n while i < len(args) - 1:\r\n arg = args[i]\r\n arg_next = args[i+1]\r\n if arg.startswith('--'):\r\n if arg_next.startswith('-'):\r\n raise ValueError('{} lacks value'.format(arg))\r\n else:\r\n opt_full[arg[2:]] = arg_next\r\n i += 2\r\n elif arg.startswith('-'):\r\n if arg_next.startswith('-'):\r\n raise ValueError('{} lacks value'.format(arg))\r\n else:\r\n opt_abbrev[arg[1:]] = arg_next\r\n i += 2\r\n else:\r\n ordered.append(arg)\r\n i += 1\r\n \r\n return ordered, opt_full, opt_abbrev", "def ParseOption():\n parser = optparse.OptionParser()\n parser.add_option('--input', dest='input', help='Input file path')\n parser.add_option('--output', dest='output', help='Output file path')\n parser.add_option(\n '--var_name', dest='var_name', help='Var name for the array')\n return parser.parse_args()[0]", "def check(options, rules = rules):\n s = [\"str\", \"unicode\"]\n for key in options:\n if not key.endswith(\" comment\"):\n if key in rules:\n c = rules[key]\n else:\n raise OptionKeyError(key)\n value = options[key]\n if c[0] == \"U\": continue\n elif c[0] == \"POT\":\n if not(((value & (value - 1)) == 0) and value):\n raise OptionPOTError(key)\n elif c[0] == \"R\":\n if value not in list(range(c[1], c[2]+1)):\n raise OptionRangeError(key, c[1], c[2]+1)\n elif c[0] == \"B\":\n if value not in list(range(0, 2)):\n #print (value)\n raise OptionRangeError(key, 0, 2)\n elif c[0] == \"N1+\":\n if value < 1:\n raise OptionRangeError(key, 1, float(\"inf\"))\n elif c[0] == \"N0+\":\n if value < 0:\n raise OptionRangeError(key, 0, float(\"inf\"))\n elif c[0] == \"FN0+\":\n if value < 0:\n raise OptionRangeError(key, 0, float(\"inf\"))\n elif c[0] == \"N-1+\":\n if value < -1:\n raise OptionRangeError(key, -1, float(\"inf\"))\n elif c[0] == \"S\":\n if value.__class__.__name__ not in s:\n raise OptionTypeError(key, \"text\")\n elif c[0] == \"Name\":check_name(value,key)\n\n elif c[0] == \"L\":\n if value.__class__.__name__ != \"list\":\n raise OptionTypeError(key, \"list\")\n\n elif c[0] == \"C\":\n if len(value) != 3:\n raise OptionError()\n if sum(value) < 1:\n raise OptionError()\n else:\n raise Exception(\"%s not valid rule type from %s\" % (c[0], key))", "def versatileOptions():\r\n return tuple(sorted(i[0] for i in list(Options.defaults().items()) if i[1].find(' #v ') > 0))", "def lookup_option_symbols(self, underlying: str) -> List[Symbol]:\n url = \"/v1/markets/options/lookup\"\n params = {\"underlying\": underlying}\n\n data = self.get(url, params)\n res = MarketsAPIResponse(**data)\n return res.symbols", "def getOptionsNames(self) -> List[unicode]:\n ...", "def parseSubscripts(part):\n subs = str(part)\n subs = part.split(\"]\")[:-1]\n return [int(sub[1:]) for sub in subs]", "def parse_opts2(tokens, optpx='--', argparam=False):\n opts0 = []\n args = []\n n = len(optpx)\n\n for token in tokens:\n if token[:2] == optpx:\n opts0.append(token[n:])\n else:\n if argparam:\n token = token.split('=')\n args.append(token)\n\n opts = parse_opts(opts0)\n\n return args, opts", "def parse(self, section):\n # try to find alternatives if they exist\n alternatives = deepcopy(self.alternatives)\n while len(alternatives) != 0 and self.name not in section.dict:\n other_name = alternatives.pop(0)\n if other_name in section.dict:\n section.dict[self.name] = section.dict[other_name]\n del section.dict[other_name]\n break\n if not self.optional:\n assert_exists(self.name, section.dict, section.name)\n if self.name not in section.dict:\n return self.default\n else:\n if self.dtype != list:\n if self.dtype == bool:\n # this is necessary since ``bool(\"False\")`` returns ``True``.\n value = parse_bool(section, self.name)\n else:\n value = self.dtype(section.dict[self.name])\n if not self.validation_func(value):\n raise ValueError('Invalid input for option ' + self.name +\n ' in section ' + section.name)\n return value\n else:\n\n value = parse_list(section.dict[self.name], self.datatype)\n\n # value validation\n if not all_true(self.validation_func, value):\n raise ValueError('Invalid input for option ' + self.name +\n ' in section ' + section.name)\n\n shape = deepcopy(self.shape)\n\n # now we need to get the correct shape\n if shape == -1:\n # we don't care for the shape of this\n if not isinstance(value, list):\n value = [value]\n return value\n\n if isinstance(shape, str):\n # in this case we simply use the shape of the option with this name\n if shape not in section.dict:\n raise ValueError(self.name + ' in ' + section.name + ' has an invalid ' +\\\n 'shape because the options whose shape it should have ' +\\\n 'does not exist. Check your option definitions!')\n shape = get_shape(section.dict[shape])\n if isinstance(shape, int):\n shape = [shape]\n # shape is now a list, but it might still contain strings\n for i in range(len(shape)):\n if isinstance(shape[i], str):\n shape[i] = len(section.dict[shape[i]])\n\n\n\n # shape is now either a 'flat' shape, i.e. something like [2, 3, 2],\n # or an expanded shape, e.g. [2, [3, 3], [[2, 2, 2],[2, 2, 2]]]\n # if it's flat, it might contain dimensions with -1 that cannot be\n # autoexpanded. We first need to determine the shape of this dimension.\n if is_flat(shape):\n real_shape = get_shape(value)\n if isinstance(real_shape, (list, tuple)):\n # if it's just a single number we can expand it\n # Here I'm trying to find the flat shape of the value that was\n # given in the configuration file.\n flat_shape_value = try_flattening_shape(real_shape)\n # It might happen that we cannot flatten the shape, in this\n # case there are negative values remaining in flat_shape_value.\n # If there are, this means that there is a dimension\n # containing lists of different lengths.\n # In any case I will try to replace any -1 in ``shape``\n # with the value in ``flat_shape_value``.\n shape = get_positive_shape(shape, flat_shape_value)\n # Now we do a test for equality of the asserted shape and\n # the shape of the value found in the config file. Keep in\n # mind that there might be -1 values left.\n if flat_shape_value != shape[-len(flat_shape_value):]:\n raise ShapeError(self.name, section.name)\n # If there are -1's left we must ensure that the \"depth\" of\n # the given value, i.e. the number of dimensions, is higher\n # than the ``number of dimensions after the value preceding\n # the first -1`` + 1 .\n if any(map(lambda x: x == -1, shape)):\n depth = numdim(value)\n mindepth = len(shape) - shape.index(-1) + 1\n if depth < mindepth:\n raise ValueError('Option ' + self.name + ' in section ' +\n section.name + ' can not be expanded!')\n shape = expand_shape(shape)\n\n # Now we have an expanded shape, so only two tasks remain:\n # * auto-expansion\n # * shape validation\n value = expand_to_shape(shape, value)\n if not compare_shapes(shape, get_shape(value)):\n raise ShapeError(self.name, section.name)\n return value", "def Options():\n p = optparse.OptionParser('split_doc.py [options] input_file out_prefix')\n # Like awk -v\n p.add_option(\n '-v', dest='default_vals', action='append', default=[],\n help=\"If the doc's own metadata doesn't define 'name', set it to this value\")\n p.add_option(\n '-s', '--strict', dest='strict', action='store_true', default=False,\n help=\"Require metadata\")\n return p", "def get_options(self, key):\n if key in self.options.get_option_names():\n return self.options\n\n try:\n scope, scoped_key = key.split('.')\n except ValueError:\n return None\n\n if scope == 'input' and scoped_key in self.input.options.get_option_names():\n return self.input.options\n elif scope == 'output' and scoped_key in self.output.options.get_option_names():\n return self.output.options\n elif scope == 'exploit' and scoped_key in self.exploit.options.get_option_names():\n return self.exploit.options\n else:\n return None", "def extract_opt(options, optname):\n extracted = []\n remaining = []\n for o, v in options:\n if o == optname:\n extracted.append((o, v))\n else:\n remaining.append((o, v))\n return extracted, remaining", "def _check_prefixes(self, docstring: PetscDocStringImpl) -> None:\n for key, opts in sorted(self.items.items()):\n lopts = len(opts)\n assert lopts >= 1, f'number of options {lopts} < 1, key: {key}, items: {self.items}'\n\n if lopts == 1:\n # only 1 option, should start with '.'\n self._check_opt_starts_with(docstring, opts[0], 'Solitary', '.')\n else:\n # more than 1, should be '+', then however many '.', then last is '-'\n self._check_opt_starts_with(docstring, opts[0], 'First multi', '+')\n for opt in opts[1:-1]:\n self._check_opt_starts_with(docstring, opt, 'Multi', '.')\n self._check_opt_starts_with(docstring, opts[-1], 'Last multi', '-')\n return", "def _getOptions(self, sectionName):\r\n\r\n if sectionName in self.sections:\r\n attri_list = self.cf.options(sectionName)\r\n return attri_list\r\n else:\r\n return None", "def _parse_option_section(conf, items, copt, opt, _allow_include=0):\n global config_stray_opts, _non_options, _list_options, _path_options\n\n for key, val in items:\n if key == 'include' and _allow_include:\n for inc in val.split(' '):\n _parse_option_section(conf, conf.items(inc), copt, opt, _allow_include=(_allow_include-1))\n\n for key, val in items:\n if key in _non_options:\n continue\n elif key in dir(copt):\n if key in _list_options:\n val = val.split(_list_options[key])\n elif isinstance(getattr(copt, key), list) or \\\n (key in ('modules',)):\n val = val.split(' ')\n elif isinstance(getattr(copt, key), bool):\n val = bool(val.lower() in ('1', 'true', 't', 'yes'))\n\n if not getattr(copt, key):\n setattr(opt, key, val)\n else:\n config_stray_opts.append((key, val))\n pass", "def _collect_options(self, option_index):\n input_option = list()\n if not option_index:\n for k in self._options.keys():\n input_option.append(self._options.get(k))\n else:\n for index in option_index:\n input_option.append(self._options.get(index))\n return input_option", "def getoptions(str,num,num2=None):\n if num2==None:\n num2=num\n op=str.split(',')\n if len(op) >= num and len(op) <= num2:\n for i in range(len(op)):\n op[i]=op[i].strip()\n return op\n else:\n raise OptionError, \"WrongNumber\"", "def processOption (self, line) :\n ll = line.split ('=')\n if len (ll) < 2:\n print \"Cannot parse option \" , line\n sys.exit()\n result = (ll[0].strip() , ll[1].strip())\n return result", "def parse_opts_adapter(tokens, delim, optpx='--', argparam=False):\n if any([t.startswith(optpx) for t in tokens]):\n # new style\n args, opts = parse_opts2(tokens, optpx=optpx, argparam=argparam)\n else:\n # old style\n args = tokens[:delim]\n opts = parse_opts(tokens[delim:])\n return args, opts", "def extract_option(prefix, args):\n if prefix in ('#',):\n unique = False\n else:\n unique = True\n value = [a for a in args if a.startswith(prefix)]\n if len(value) == 1:\n value = value[0]\n args.remove(value)\n value = value[1:]\n if not unique:\n return [value]\n return value\n elif len(value) > 1 and unique:\n print('More than one %s found in args' % prefix)\n sys.exit(1)\n elif len(value) > 1 and not unique:\n for v in value:\n if v in args:\n args.remove(v)\n return [v[1:] for v in value]\n return None", "def parseopts(opts):\n\n for opt, arg in opts:\n\n if opt in [\"--input\"]:\n filetag = arg\n\n return filetag", "def _parse_options(options):\n opts = dict()\n for attr in dir(options):\n if attr.startswith(\"__\"):\n continue\n opts[attr] = getattr(options, attr)\n return opts", "def getList(self,section, option): \n unparsedOption=self.get(section, option)\n if unparsedOption.find(',')>0:\n splittedValue=unparsedOption.split(',')\n strippedValue=[]\n while splittedValue:\n valuePart=splittedValue.pop(0)\n strippedValue.append(valuePart.strip())\n result=strippedValue\n else: result=unparsedOption\n return result", "def options(self, a: str) -> typing.Any:", "def parse_options(arguments):\n parser = optparse.OptionParser(option_list=OPTION_LIST)\n options, values = parser.parse_args(arguments)\n return options", "def parse_opts(opts0):\n opts = {}\n # parse the stuff in \"opts\"\n for opt in opts0:\n parsed = opt.split('=')\n key = parsed[0].strip()\n if len(parsed) > 1:\n # OLD: cmd = parsed[1].strip()\n cmd = '='.join(parsed[1:]).strip()\n else:\n cmd = ''\n opts[key] = cmd\n\n return opts", "def str2choice(options: List[str]) -> Callable[[str], str]:\n\n def _parse(string: str) -> str:\n if string not in options:\n raise argparse.ArgumentTypeError(\"Expected one of: \" + \" \".join(options))\n return string\n\n return _parse", "def opts_load(opts):\n attr_words = []\n kv_words = []\n kv_exprs = {}\n for opt in opts:\n if isinstance(opt, basestring): # attr_word\n attr_words.append(opt)\n elif isinstance(opt, list):\n if len(opt) == 1: # attr_word\n attr_words.append(unicode(opt[0]))\n elif len(opt) == 2 and not opt[1]: # attr_word\n attr_words.append(unicode(opt[0]))\n elif (len(opt) == 2 and\n len(opt[0]) == 1 and\n unicode(opt[0]).isalpha() and\n unicode(opt[1]).isdigit()\n ): # kv_word\n kv_words.append(unicode(opt[0]) + unicode(opt[1]))\n else: # kv_expr\n kv_exprs[unicode(opt[0])] = \" \".join(opt[1:])\n return attr_words, kv_words, kv_exprs", "def _optionvarkey(name):\n return \"ragdoll%s\" % (name[0].upper() + name[1:])", "def parseOpts(self):\n\n for opt in self.opts:\n var, val = opt.split('=', 1)\n try:\n val = int(val)\n except ValueError:\n try:\n val = float(val)\n except ValueError:\n # just a string\n pass\n self[var] = val", "def getmulti(self, section, option, nested=False):\n data = self.get(section, option)\n if '\\n' not in data and self.read_keyval(data)[0] is None:\n # oneliner version\n return data.strip().split()\n\n # block version\n if not nested:\n return [element for element in data.strip().split('\\n')]\n\n def walk(data):\n \"\"\"docstring for walk\"\"\"\n response = []\n option_name = None\n option_value = None\n for element in data.split('\\n'):\n if element and element.startswith(' '):\n option_value.append(element)\n continue\n if option_name:\n response.append({option_name: walk(dedent('\\n'.join(option_value)))})\n option_name = None\n option_value = None\n\n n, v = self.read_keyval(element)\n if not n:\n response.append(element)\n option_name = None\n option_value = None\n continue\n elif v:\n response.append({n: v})\n option_name = None\n option_value = None\n continue\n option_name = n\n option_value = []\n\n if option_name:\n response.append({option_name: walk(dedent('\\n'.join(option_value)))})\n\n return response\n return walk(data)", "def encode_options(options: Dict[str, Union[str, float, int]]) -> List[str]:\n d = list()\n rev_dict = {v: k for k, v in type_mappings.items()}\n for k, v in options.items():\n t = type(v)\n if t not in rev_dict:\n raise OptionParsingError(f\"Unknown option type {t}.\")\n arg = f'{k}={v}={rev_dict[t]}'\n d.append(arg)\n return d", "def _parse_option_name(line):\n return line.split('=')[0].strip()", "def myst_options(options):\n num_options = len(options.keys())\n myst_options = []\n if num_options == 0:\n return myst_options\n elif num_options < 2: # TODO parameterise this in conf.py\n for option, option_val in options.items():\n myst_options.append(\":{}: {}\".format(option, option_val).rstrip())\n return myst_options\n else:\n myst_options.append(\"---\")\n for item in sorted(options.keys()):\n myst_options.append(\"{}: {}\".format(item, options[item]))\n myst_options.append(\"---\")\n return myst_options", "def options(self, section):\n try:\n return list(self._dict[section])\n except KeyError as e:\n raise NoSectionError(str(e)) from None", "def get_option_names(self):\n # There are no options until the current exploit is set\n if self.exploit is None:\n return []\n\n option_names = self.options.get_option_names()\n\n if self.input is not None:\n option_names += ['input.' + option for option in self.input.options.get_option_names()]\n\n if self.output is not None:\n option_names += ['output.' + option for option in self.output.options.get_option_names()]\n\n if self.exploit is not None:\n option_names += ['exploit.' + option for option in self.exploit.options.get_option_names()]\n\n return option_names", "def parse_option(group_name, option_name, value_str):\n group = get_group(group_name)\n if option_name in group.members:\n return group.members[option_name].parse(value_str)\n else:\n raise UnknownConfigOptionError(groupname + \".\" + \"option_name\")", "def _parse_qselect(qselect_output):\n jobs = qselect_output.splitlines()\n if not jobs or (len(jobs) == 1 and jobs[0] == ''):\n return []\n return [int(job.split('.')[0]) for job in jobs]", "def get_options(self):\r\n return self._option_values", "def _parse_qselect(qselect_output):\n jobs = qselect_output.splitlines()\n if not jobs or (len(jobs) == 1 and jobs[0] is ''):\n return []\n return [int(job.split('.')[0]) for job in jobs]", "def _parse(self, options):\n\n '''Start by considering all registered options, and validating them\n if they are in the incoming options dict'''\n self.results = {}\n wanted = self.wanted.copy()\n for opt in wanted.keys():\n if opt in options:\n self.results[opt] = self._access(wanted, opt, options[opt])\n\n '''As all registered options, in trac.ini, have composite names,\n consisting of a prefix and the option name separated by a dot,\n now find the starting list of prefixes to consider. Either use\n the value of incoming option of the name found in self.config,\n or use the fixed default prefix from self.prefix'''\n if self.config in options:\n parents = self._parents_to_list(options[self.config])\n del options[self.config]\n else:\n parents = [ self.prefix ]\n\n '''Look up these composite options'''\n if len(wanted) > 0:\n self._inherit(options, parents, wanted, {})\n\n '''Set all still unresolved registered options, to their defaults'''\n for opt in wanted.keys():\n self.results[opt] = (\n wanted[opt].default,\n self._is_default,\n wanted[opt]\n )\n\n '''Move over all UNregistered options as they were passed in.'''\n for opt in options.keys():\n if not opt in self.results:\n self.results[opt] = (\n options[opt],\n self._is_extra,\n None\n )", "def parse_options():\n\n parser = optparse.OptionParser(usage=USAGE, version=VERSION)\n\n parser.add_option(\"-f\", \"--file\",\n action=\"store\", default=Utils.getConfig(\"defaultFile\"), dest=\"file\",\n help=\"Read the site name from external file\")\n\n parser.add_option(\"-s\", \"--site-name\",\n action=\"store\", default=\"\", dest=\"sitename\",\n help=\"Get links for specified url only\")\n\n opts, args = parser.parse_args()\n\n return opts, args", "def parse_cmdline(cmdline_args):\n option_parser = optparse.OptionParser(usage='usage: %prog [options] <pattern>',\n description='ts short for TextSearch, Grep like tool', prog='ts', add_help_option=False) # -h is a real option\n\n option_parser.add_option('-h', '--help', action='store_true', dest='help', help='Display this information')\n option_parser.add_option('-e', '--extension', action='store', dest='extension', type='string', default=None,\n help='file extension')\n\n group_searching = optparse.OptionGroup(option_parser, 'Regexp selection and interpretation')\n group_searching.add_option('-i', '--ignore-case', action='store_true', dest='ignore_case', default=False,\n help='Ignore case distinctions in the pattern')\n group_searching.add_option('-w', '--word-regexp', action='store_true', dest='word_regexp', default=False,\n help='Force the pattern to match only whole words')\n group_searching.add_option('-l', '--literal', action='store_true', dest='literal', default=False,\n help='Quote all metacharacters; the pattern is literal')\n option_parser.add_option_group(group_searching)\n\n group_miscellaneous = optparse.OptionGroup(option_parser, 'Miscellaneous')\n group_miscellaneous.add_option('--path-only', action='store_true', dest='path_only', default=False,\n help='only print out the matching file')\n group_miscellaneous.add_option('-v', '--invert-match', action='store_true', dest='invert_match', default=False,\n help='Invert the sense of matching, to select non-matching lines.')\n option_parser.add_option_group(group_miscellaneous)\n\n group_output = optparse.OptionGroup(option_parser, 'Output control')\n group_output.add_option('-c', '--count', action='store_true', dest='count', default=False,\n help='Suppress normal output; instead print a count of matching lines for each input file.')\n group_output.add_option('-o', '--only-matching', action='store_true', dest='only_matching', default=False,\n help='Print only the matched (non-empty) parts of a matching line, with each such part on '\n 'a separate output line.')\n option_parser.add_option_group(group_output)\n\n options, args = option_parser.parse_args(cmdline_args)\n return options, args, option_parser", "def options(self):\n if self._ast:\n for option in self._ast[1]:\n yield option", "def getList(self,section,option,sep=\";\"):\n value=ConfigParser.SafeConfigParser.get(self,section,option)\n value=value.strip('\"')\n vallist=value.split(sep)\n return vallist", "def getOptions(self, propertyListName: unicode) -> ghidra.framework.options.Options:\n ...", "def _parser_options():\n #We have two options: get some of the details from the config file,\n import argparse\n from pydft import base\n pdescr = \"Numerical DFT code.\"\n parser = argparse.ArgumentParser(parents=[base.bparser], description=pdescr)\n for arg, options in script_options.items():\n parser.add_argument(arg, **options)\n \n args = base.exhandler(examples, parser)\n if args is None:\n return\n\n return args # pragma: no cover", "def find_opts_linux(soup, header):\n\n # Get the source line of the header\n header_el = soup.find(id=header)\n if header_el is None:\n return set()\n header_source_line = soup.find(id=header).sourceline\n\n # Get the element where the options are described\n opts_el = [pre for pre in soup.find_all('pre') if pre.sourceline == header_source_line][0]\n\n opts_lines = opts_el.text.split('\\n')\n opts_lines = [line.lstrip().split(maxsplit=1)[0] for line in opts_lines if line]\n opts = [line for line in opts_lines if line[0] == '-' and line != '-']\n\n # Remove false positives\n opts = {o for o in opts if not o[-1] in NON_OPTS_CHARS}\n\n return opts", "def parse_options():\n\n parser = optparse.OptionParser()\n\n parser.add_option(\"-q\", \"--quiet\",\n action=\"store_true\", default=False, dest=\"quiet\",\n help=\"Enable quiet mode\")\n\n parser.add_option(\"-l\", \"--links\",\n action=\"store_true\", default=False, dest=\"links\",\n help=\"Get links for specified url only\")\n\n parser.add_option(\"-d\", \"--depth\",\n action=\"store\", type=\"int\", default=1, dest=\"depth\",\n help=\"Maximum depth to traverse\")\n\n parser.add_option(\"-v\", \"--verbose\",\n action=\"store_true\", default=False, dest=\"verbose\",\n help=\"Enable verbose mode\")\n\n opts, args = parser.parse_args()\n\n if len(args) < 1:\n parser.print_help()\n raise SystemExit, 1\n\n return opts, args", "def find_option(self, name, namespace=...):\n ...", "def list_opts():\n return [('ironic_lib', utils_opts)]", "def read_array_option(config, section):\n if not config.has_section(section):\n return\n\n return [item for item, _ in config.items(section)]", "def svn_diff_file_options_parse(*args):\n return _diff.svn_diff_file_options_parse(*args)", "def processCmdlineOpts(cmdOpts):\n global opts\n opts = {}\n for i in range(1,len(cmdOpts)):\n if re.match('-i', cmdOpts[i]):\n opts['i'] = cmdOpts[i+1]\n if i not in opts: \n opts['i']='awn.xml'\n return opts", "def parseOptions():\n\n strict = False\n lists = []\n addressbooks = []\n folders = []\n exclusions = []\n\n opts, args = getopt.getopt(sys.argv[1:], \"sl:a:f:e:\", [ \"strict\", \"hand-list\", \"addressbook\", \"folder\", \"exclusions\" ])\n if len(args) != 1 or len(opts) < 1: \n raise getopt.GetoptError(\"Invalid arguments.\")\n\n for opt in opts:\n if opt[0] == \"-s\" or opt[0] == \"--strict\":\n strict = True\n if opt[0] == '-l' or opt[0] == '--hand-list':\n lists.append(opt[1])\n if opt[0] == '-a' or opt[0] == '--addressbook':\n addressbooks.append(opt[1])\n if opt[0] == '-f' or opt[0] == '--folder':\n folders.append(opt[1])\n if opt[0] == '-e' or opt[0] == '--exclusions':\n exclusions.append(opt[1])\n\n return(args[0], strict, lists, addressbooks, folders, exclusions)", "def _find_options(self, inputfield):\r\n elements = inputfield.findall('./options/option')\r\n return [{\r\n 'id': index,\r\n 'description': option.text,\r\n 'choice': option.get('choice')\r\n } for (index, option) in enumerate(elements)]", "def vararg_callback(option, opt_str, value, parser):\n\tassert value is None\n\tvalue = []\n\n\tdef floatable(str):\n\t\ttry:\n\t\t\tfloat(str)\n\t\t\treturn True\n\t\texcept ValueError:\n\t\t\treturn False\n\n\tfor arg in parser.rargs:\n\t\t# Stop on options like --foo \n\t\tif arg[:2] == \"--\" and len(arg) > 2:\n\t\t\tbreak\n\t\t# Stop on -a, but not on negative numbers\n\t\tif arg[:1] == \"-\" and len(arg) > 1 and not floatable(arg):\n\t\t\tbreak\n\t\tvalue.append(arg)\n\n\tdel parser.rargs[:len(value)]\n\tsetattr(parser.values, option.dest, value)", "def split_package_spec(package_spec):\n match = re.match('^(.*?)\\((.*)\\)$', package_spec)\n if match:\n package_name = match.group(1)\n package_options = match.group(2).split(',')\n else:\n package_name = package_spec\n package_options = []\n return package_name, package_options", "def parse_args(self, w, pos, parsing_state=None):\n\n from .. import latexwalker\n\n if parsing_state is None:\n parsing_state = w.make_parsing_state()\n\n argnlist = []\n\n if self.args_math_mode is not None and \\\n len(self.args_math_mode) != len(self.argspec):\n raise ValueError(\"Invalid args_math_mode={!r} for argspec={!r}!\"\n .format(self.args_math_mode, self.argspec))\n\n def get_inner_parsing_state(j):\n if self.args_math_mode is None:\n return parsing_state\n amm = self.args_math_mode[j]\n if amm is None or amm == parsing_state.in_math_mode:\n return parsing_state\n if amm == True:\n return parsing_state.sub_context(in_math_mode=True)\n return parsing_state.sub_context(in_math_mode=False)\n\n p = pos\n\n if self._like_pylatexenc1x_ignore_leading_star:\n # ignore any leading '*' character\n tok = w.get_token(p)\n if tok.tok == 'char' and tok.arg == '*':\n p = tok.pos + tok.len\n\n for j, argt in enumerate(self.argspec):\n if argt == '{':\n (node, np, nl) = w.get_latex_expression(\n p,\n strict_braces=False,\n parsing_state=get_inner_parsing_state(j)\n )\n p = np + nl\n argnlist.append(node)\n\n elif argt == '[':\n\n if self.optional_arg_no_space and w.s[p].isspace():\n # don't try to read optional arg, we don't allow space\n argnlist.append(None)\n continue\n\n optarginfotuple = w.get_latex_maybe_optional_arg(\n p,\n parsing_state=get_inner_parsing_state(j)\n )\n if optarginfotuple is None:\n argnlist.append(None)\n continue\n (node, np, nl) = optarginfotuple\n p = np + nl\n argnlist.append(node)\n\n elif argt == '*':\n # possible star.\n tok = w.get_token(p)\n if tok.tok == 'char' and tok.arg.startswith('*'):\n # has star\n argnlist.append(\n w.make_node(latexwalker.LatexCharsNode,\n parsing_state=get_inner_parsing_state(j),\n chars='*', pos=tok.pos, len=1)\n )\n p = tok.pos + 1\n else:\n argnlist.append(None)\n\n else:\n raise LatexWalkerError(\n \"Unknown macro argument kind for macro: {!r}\".format(argt)\n )\n\n parsed = ParsedMacroArgs(\n argspec=self.argspec,\n argnlist=argnlist,\n )\n\n return (parsed, pos, p-pos)", "def _getoptions():\n parser = OptionParser()\n parser.add_option(\"-f\", \"--dwca_file\", dest=\"dwca_file\",\n help=\"Darwin Core Archive file\",\n default=None)\n return parser.parse_args()[0]", "def parse_params(params):\n def isoption(x):\n return x.startswith('-')\n solo_flags = []\n arg_flags = dict()\n i = 0\n while i < len(params):\n if not isoption(params[i]):\n raise ValueError('\"' + params[i] + '\" does not look like an option.')\n if i == len(params) - 1 or isoption(params[i+1]):\n solo_flags.append(params[i])\n i += 1\n continue\n else:\n arg_flags[params[i]] = process_arg(params[i+1])\n i += 2\n continue\n return solo_flags, arg_flags", "def parse_options():\n\n parser = optparse.OptionParser(description='PySpark WordCount.')\n parser.add_option('-i', '--input', action='store', nargs=1,\n default='s3://dimajix-training/data/alice/',\n help='Input file or directory')\n parser.add_option('-o', '--output', action='store', nargs=1,\n default='alice-counts',\n help='Output file or directory')\n\n (opts, args) = parser.parse_args()\n\n return opts", "def get_options_from_file(path):\n with open(path) as f:\n content = f.read()\n keys = re.findall(r\"%(.+):\", content)\n values = re.findall(r\":\\s*([\\w\\W]+?)\\s*(?:%|$)\", content)\n\n options = dict(zip(keys, values))\n return options", "def getlist(self, option, sep=',', chars=None):\n return [chunk.strip(chars) for chunk in option.split(sep)]", "def parse_kw_args(tagname, bits, args_spec=None, restrict=False):\n\n args = []\n\n if restrict:\n if args_spec is None:\n raise ValueError(\"you must pass an args_spec dict if you want to restrict allowed args\")\n allowed = list(args_spec.keys())\n do_validate = True\n else:\n do_validate = args_spec is not None\n\n for bit in bits:\n try:\n name, val = bit.split('=')\n except ValueError:\n raise template.TemplateSyntaxError(\n \"keyword arguments to '%s' tag must have 'key=value' form (got : '%s')\" \\\n % (tagname, bit)\n )\n\n name = str(name)\n if do_validate:\n if restrict:\n if name in allowed:\n # we only want each name once\n del allowed[allowed.index(name)]\n else:\n raise template.TemplateSyntaxError(\n \"keyword arguments to '%s' tag must be one of % (got : '%s')\" \\\n % (tagname, \",\".join(allowed), name)\n )\n\n validate = args_spec[name]\n else:\n validate = args_spec.get(name, None)\n\n if validate is not None:\n if callable(validate):\n try:\n val = validate(val)\n except Exception, e:\n raise template.TemplateSyntaxError(\n \"invalid optional argument '%s' for '%s' tag: '%s' (%s)\" \\\n % (tagname, name, val, e)\n )\n else:\n # assume re\n if re.match(validate, val) is None:\n raise template.TemplateSyntaxError(\n \"invalid optional argument '%s' for '%s' tag: '%s' (doesn't match '%s')\" \\\n % (tagname, name, val, validate)\n )\n\n # should be ok if we managed to get here \n args.append((name, val))\n\n return args", "def parseCmdLine(cmdLine):\n files=[]\n modifiers=[]\n for i in range(len(cmdLine)):\n arg = cmdLine[i]\n if arg[:2] != '--':\n files = cmdLine[i:]\n return (modifiers, files)\n \n arg = arg[2:]\n parts = arg.split('=',1)\n modifiers.append((parts[0], parts[1]))\n return (modifiers, files)", "def parse_options_plotRates(parser):\n parsers = parser.add_subparsers()\n sub_parser = parsers.add_parser(\"plotRates\")\n sub_parser.add_argument(\"-i\", \"--interactive\", dest=\"interactive\", action='store_false', help=\"Draw plots on screen.\")\n sub_parser.add_argument(\"-n\", \"--nevents\", dest=\"nevents\", default=1, type=int, help=\"Total nmumber of events\")\n sub_parser.add_argument(\"-b\", \"--bunches\", dest=\"bunches\", default=0, type=int, help=\"Number of colliding bunches\")\n sub_parser.add_argument(\"--pu\", dest=\"pu\", default=20, type=int, help=\"Average PU. default=20\")\n sub_parser.add_argument(\"--xsect\", dest=\"xsect\", default=80, type=float, help=\"Total cross section in mb. default=80 mb\")\n sub_parser.add_argument(\"--instlumi\", dest=\"instlumi\", default=1.2e34, type=float, help=\"Instantaneous luminosity. default=1.2e-34 cm-2s-1\")\n sub_parser.add_argument(\"--scale\", dest=\"scale\", default=1., type=float, help=\"Additional scale factor for rate calculate\")\n sub_parser.add_argument(\"-l\", \"--legacy\", dest=\"legacy\", action='store_true', help=\"Draw plots relative to legacy.\")\n\n opts, unknown = parser.parse_known_args()\n return opts", "def parseArgs(args):\n parsed = []\n for arg in args:\n print arg\n arg = arg.strip()\n interpretation = None\n try:\n interpretation = float(arg)\n if string.find(arg, \".\") == -1:\n interpretation = int(interpretation)\n except:\n # Oh - it was a string.\n interpretation = arg\n pass\n parsed.append(interpretation)\n return parsed", "def parse_option():\n parser = argparse.ArgumentParser(\"zdm H0 I Figures\")\n parser.add_argument(\n \"figure\",\n type=str,\n help=\"function to execute: ('fiducial, 'varyH0', 'H0vsEmax')\",\n )\n # parser.add_argument('--cmap', type=str, help=\"Color map\")\n # parser.add_argument('--distr', type=str, default='normal',\n # help='Distribution to fit [normal, lognorm]')\n args = parser.parse_args()\n\n return args", "def parseOpts():\n global dir_source\n global fileList\n global suffix\n global begin\n global name\n \n shouldExit = False\n \n # check options. If options is None, exit. \n for o, a in opts:\n if o in (\"-h\", \"--help\"): # get help\n getHelp()\n shouldExit = True\n elif o in (\"-v\", \"--version\"): # show version\n showVersion()\n shouldExit = True\n elif o in (\"-s\", \"--suffix\"): # set suffix\n suffix = a\n elif o in (\"-b\", \"--begin\"): # set begin\n begin = int(a)\n elif o in (\"-n\", \"--name\"): # specify a name\n name = a\n \n if shouldExit:\n sys.exit()\n \n # get dir_source\n if args is None or len(args) == 0:\n print \"SRT:no source dictionary.\"\n sys.exit()\n dir_source = args[0]\n try:\n fileList = os.listdir(dir_source)\n fileList.sort()\n except:\n print \"SRT:wrong path\"\n sys.exit()\n else:\n renameFiles()", "def cli_options():\n\n parser = argparse.ArgumentParser(\n description='c[apirca]grep',\n formatter_class=argparse.RawTextHelpFormatter\n )\n\n parser.add_argument('-d', '--def', dest='defs',\n help='Network Definitions directory location. \\n',\n default='./def')\n\n # -i and -t can be used together, but not with any other option.\n ip_group = parser.add_argument_group()\n # take 1 or more IPs\n ip_group.add_argument('-i', '--ip', dest='ip', nargs='+', type=is_valid_ip,\n help='Return list of definitions containing the '\n 'IP(s).\\nMultiple IPs permitted.')\n\n ip_group.add_argument('-t', '--token', dest='token',\n help=('See if an IP is contained within the given '\n 'token.\\nMust be used in conjunction with '\n '-i/--ip [addr].'))\n\n exclusive_group = parser.add_mutually_exclusive_group()\n # the rest of the arguments are mutually exclusive with each other,\n # and -i / -t\n exclusive_group.add_argument('-c', '--cmp', dest='cmp', nargs=2,\n metavar=('OBJ', 'OBJ'),\n help=('Compare the two given network '\n 'definition tokens'))\n\n exclusive_group.add_argument('-g', '--gmp', dest='gmp', nargs=2,\n type=is_valid_ip, metavar=('IP', 'IP'),\n help=('Diff the network objects to'\n ' which the given IP(s) belong'))\n\n exclusive_group.add_argument('-o', '--obj', dest='obj', nargs='+',\n help=('Return list of IP(s) contained within '\n 'the given token(s)'))\n\n exclusive_group.add_argument('-s', '--svc', dest='svc', nargs='+',\n help=('Return list of port(s) contained '\n 'within given token(s)'))\n\n exclusive_group.add_argument('-p', '--port', dest='port', nargs=2,\n metavar=('PORT', 'PROTO'),\n help=('Returns a list of tokens containing '\n 'the given port and protocol'))\n\n return parser", "def option(self, spec):\n return spec.options[self.rng.integers(len(spec.options))]", "def get_tag_options(label_matches):\r\n\ttag_options = []\r\n\tfor key in label_matches.keys():\r\n\t\tif key[1] not in tag_options:\r\n\t\t\ttag_options.append(key[1])\r\n\treturn tag_options", "def _ParseFilterOptions(self, options):\n names = [u'date_filters', u'filter_file']\n helpers_manager.ArgumentHelperManager.ParseOptions(\n options, self, names=names)\n\n extensions_string = self.ParseStringOption(options, u'extensions_string')\n self._ParseExtensionsString(extensions_string)\n\n names_string = getattr(options, u'names_string', None)\n self._ParseNamesString(names_string)\n\n signature_identifiers = getattr(options, u'signature_identifiers', None)\n try:\n self._ParseSignatureIdentifiers(\n self._data_location, signature_identifiers)\n except (IOError, ValueError) as exception:\n raise errors.BadConfigOption(exception)\n\n if self._filter_file:\n self.has_filters = True\n else:\n self.has_filters = self._filter_collection.HasFilters()", "def parse_options_header(value):\n def _tokenize(string):\n for match in _option_header_piece_re.finditer(string):\n key, value = match.groups()\n key = unquote_header_value(key)\n if value is not None:\n value = unquote_header_value(value)\n yield key, value\n\n if not value:\n return '', {}\n\n parts = _tokenize(';' + value)\n name = next(parts)[0]\n extra = dict(parts)\n return name, extra", "def parse_options():\n global parser\n parser.add_option(\"-r\", \"--regions\", dest=\"input_brain_regions\",\n help=\"Input file for brain region data\",\n action=\"store\", type='string')\n\n parser.add_option(\"-g\", \"--gray\", dest=\"input_gray_levels\",\n help=\"Input file for gray level data\",\n action=\"store\", type='string')\n\n parser.add_option(\"-n\", \"--nissl\", dest=\"input_nissl\",\n help=\"Input file for nissl data\",\n action=\"store\", type='string')\n\n parser.add_option(\"-o\", \"--output\", dest=\"output_folder_path\",\n help=\"Output folder for extracted data files\",\n action=\"store\", type='string')\n\n return parser.parse_args()", "def _ParseSendChangeOptions(bot_spec, options):\n values = [\n ('user', options.user),\n ('name', options.name),\n ]\n # A list of options to copy.\n optional_values = (\n 'email',\n 'revision',\n 'root',\n 'patchlevel',\n 'issue',\n 'patchset',\n 'target',\n 'project',\n )\n for option_name in optional_values:\n value = getattr(options, option_name)\n if value:\n values.append((option_name, value))\n\n # Not putting clobber to optional_names\n # because it used to have lower-case 'true'.\n if options.clobber:\n values.append(('clobber', 'true'))\n\n for bot, tests in bot_spec:\n values.append(('bot', ('%s:%s' % (bot, ','.join(tests)))))\n\n return values", "def user_input_choices(self, msg, *options):\n choices = ['%s %s' % (self.prefix, msg)]\n choices += [\n \"%s. %s\" % (num, opt) for num, opt in enumerate(options, 1)]\n try:\n input_str = int(\n vim.eval('inputlist(%s)' % self.prepare_value(choices)))\n except (KeyboardInterrupt, ValueError):\n input_str = 0\n\n if not input_str:\n self.message('Cancelled!')\n return False\n\n try:\n return options[input_str - 1]\n except (IndexError, ValueError):\n self.error('Invalid option: %s' % input_str)\n return self.user_input_choices(msg, *options)", "def _parse_arg_list(self):\n\t\targ_list = {}\n\t\tfor arg in getopt.getopt(sys.argv[1:], 'c:r:j:d')[0]:\n\t\t\targ_list[arg[0][1:]] = arg[1]\n\t\n\t\treturn arg_list", "def _find_options(self):\r\n elements = self.xml.findall('./options/option')\r\n return [{\r\n 'id': index,\r\n 'description': option.text,\r\n 'choice': option.get('choice')\r\n } for (index, option) in enumerate(elements)]", "def split_option(option, length):\n length = list(length)\n args = option.split(',')\n if len(args) not in length:\n sys.stderr.write('mpl-graph: Argument expected length {}. '\n 'Actual length of \"{}\" is {}\\n'.format(length, option, len(args)))\n sys.exit(ERR_NUM_OPTIONS)\n return args", "def parse_options() -> Namespace:\n\n opt_parser = OptionParser(\n \"liftoff\",\n [\n \"script\",\n \"config_path\",\n \"procs_no\",\n \"gpus\",\n \"per_gpu\",\n \"no_detach\",\n \"verbose\",\n \"copy_to_clipboard\",\n \"time_limit\", # This should be removed in favour of start_by\n \"start_by\",\n \"end_by\",\n \"optimize\",\n \"args\",\n \"filters\",\n \"results_path\",\n \"name\",\n \"max_runs\",\n \"shuffle\",\n ],\n )\n return opt_parser.parse_args()", "def options(self, section, *args):\n cnt = self._check_args('options', 2, 3, args)\n try:\n return ConfigParser.RawConfigParser.options(self, section)\n except ConfigParser.NoSectionError:\n if cnt == 1:\n return args[0]\n raise", "def get_options(defaults, usage, description='',epilog=''):\n parser=OptionParser(usage=usage,description=description,epilog=epilog)\n parser.add_option(\"-i\",\"--infile\",action=\"store\",dest=\"infile\",type=\"string\",\n default=defaults.get('infiles'),\n help='Name of input file of contigs, in .fasta')\n parser.add_option(\"-k\",\"--kmers\",action=\"store\",dest=\"kmers\",type=\"string\",\n default=defaults.get('kmers'),\n help='Sizes of k-mers to use as features, comma separated list')\n (options,args)=parser.parse_args()\n\n return (options, args)" ]
[ "0.5681802", "0.5620391", "0.5565046", "0.541359", "0.53751975", "0.5340231", "0.5283279", "0.5276728", "0.52554685", "0.5251639", "0.51984483", "0.5196909", "0.519201", "0.5174309", "0.5122386", "0.51024044", "0.51020473", "0.5095427", "0.5034037", "0.5030856", "0.50224656", "0.50154805", "0.4999125", "0.4985558", "0.49723536", "0.49649593", "0.49568874", "0.49501932", "0.49457142", "0.49314326", "0.4909702", "0.49089214", "0.4899176", "0.48945922", "0.48856264", "0.48747867", "0.48660278", "0.48649848", "0.48600608", "0.48480764", "0.4844047", "0.48378232", "0.48315924", "0.48313382", "0.48311242", "0.4830954", "0.48240346", "0.48235592", "0.4821683", "0.48210287", "0.48099124", "0.4808087", "0.4804755", "0.47982782", "0.47952265", "0.47937483", "0.47907114", "0.47824603", "0.4775003", "0.4769538", "0.47612673", "0.47603697", "0.4759387", "0.47589877", "0.4757818", "0.47543478", "0.47529587", "0.47526434", "0.4747803", "0.4744013", "0.47419336", "0.4735502", "0.47326082", "0.4730961", "0.4726055", "0.47010168", "0.46821746", "0.46771702", "0.4671631", "0.46689928", "0.46685436", "0.46616855", "0.46570438", "0.46568704", "0.46459672", "0.46450046", "0.46309984", "0.46299446", "0.46276668", "0.4624354", "0.46225905", "0.46185157", "0.46089193", "0.460565", "0.46038994", "0.4596164", "0.45940652", "0.45884493", "0.45779943", "0.45770994" ]
0.5834238
0
namespaces typecodes representing global elements with literal encoding. typeCode typecode representing an element. namespaceURI namespace literal True/False
def _globalElement(self, typeCode, namespaceURI, literal): if literal: typeCode.oname = '%(prefix)s:%(name)s xmlns:%(prefix)s="%(namespaceURI)s"' \ %{'prefix':self._getPrefix(namespaceURI), 'name':typeCode.oname, 'namespaceURI':namespaceURI}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def XmlTypeNamespace(self) -> str:", "def is_namespace_type(self):\n raise exceptions.NotImplementedError()", "def GetNamespaces(self):\n return list(self.type_namespaces_map.values())", "def element_type(self) -> global___Type:", "def patch_well_known_namespaces(etree_module):\n etree_module._namespace_map.update({\n \"http://www.w3.org/1999/02/22-rdf-syntax-ns#\": \"rdf\", \n \"http://purl.org/rss/1.0/\": \"rss\", \n \"http://purl.org/rss/1.0/modules/taxonomy/\": \"taxo\", \n \"http://purl.org/dc/elements/1.1/\": \"dc\", \n \"http://purl.org/rss/1.0/modules/syndication/\": \"syn\", \n \"http://www.w3.org/2003/01/geo/wgs84_pos#\": \"geo\"})", "def _AppIdNamespaceKindForKey(self, key):\n last_path = key.path().element_list()[-1]\n return (datastore_types.EncodeAppIdNamespace(key.app(), key.name_space()),\n last_path.type())", "def hasNamespaceURI(self, *args):\n return _libsbml.XMLToken_hasNamespaceURI(self, *args)", "def GetNamespace(self, namespace_name):\n return self.type_namespaces_map.get(namespace_name, None)", "def visit_Typedef(self, node):\n return str_node(node)", "def test_namespaceFound(self):\n xp = XPathQuery(\"/foo[@xmlns='testns']/bar\")\n self.assertEqual(xp.matches(self.e), 1)", "def namespaces(self) -> NamespacesType:\n return self.schema.namespaces", "def getEnumerationTypeXmlStub (typeName): \n\tsimpleType = createSchemaElement(\"simpleType\")\n\tsimpleType.setAttribute (\"name\",typeName)\n\trestriction = createSchemaElement(\"restriction\")\n\trestriction.setAttribute (\"base\", qp(\"token\"))\n\tsimpleType.appendChild (restriction)\n\treturn simpleType", "def _getTypeClass(self, namespaceURI, localName):\r\n bti = BaseTypeInterpreter()\r\n simpleTypeClass = bti.get_typeclass(localName, namespaceURI)\r\n return simpleTypeClass", "def XmlTypeName(self) -> str:", "def XmlNamespace(self) -> str:", "def getNamespaceURI(self, *args):\n return _libsbml.XMLToken_getNamespaceURI(self, *args)", "def translate_custom_types(self):\n\n\t\t# Preparing variables\n\t\ta_residue_names = self.a_atoms[\"residue_name\"]\t\t# Loads the names of residues\n\t\ta_atom_name = self.a_atoms[\"atom_name\"]\t\t# Loads the names of the atoms\n\t\ta_atom_symbol = self.a_atoms[\"element_symbol\"]\t\t# Loads the elements symbols\n\t\tl_s_custom_types = []\t\t# Contains the list of converted types\n\t\td_translate_custom = {\t\t# Conversion dictionary for custom types\n\t\t\t\"O\": \"OC\",\n\t\t\t\"H\": \"H\",\n\t\t\t\"N\": \"NAM\",\n\t\t\t\"C\": \"XOT\",\n\t\t\t\"CA\": \"XOT\",\n\t\t\t\"CB\": \"XOT\",\n\t\t\t\"OXT\": \"XOT\"\n\t\t}\n\n\t\t# STEP 1 : Converting the atom types ---------------- #\n\t\t# For each element to convert\n\t\tfor i_element in range(len(a_residue_names)):\n\n\t\t\t# If the residue is one of the main amino acids\n\t\t\tif a_residue_names[i_element] in elem_config.RES:\n\n\t\t\t\t# Hydrogen\n\t\t\t\tif a_atom_symbol[i_element] == \"H\":\n\t\t\t\t\ts_custom_type = \"H\"\n\n\t\t\t\t# If the atom is one of the main carbon chain\n\t\t\t\telif a_atom_name[i_element] in d_translate_custom.keys():\n\t\t\t\t\ts_custom_type = d_translate_custom[a_atom_name[i_element]]\n\n\t\t\t\t# Nitrogen in Arginine\n\t\t\t\telif a_residue_names[i_element] == \"ARG\" and a_atom_name[i_element] in elem_config.NARG[a_residue_names[i_element]]:\n\t\t\t\t\ts_custom_type = \"NBAS\"\n\n\t\t\t\t# Carbon SP2 in aromatic ring\n\t\t\t\telif a_residue_names[i_element] in elem_config.CAR.keys() and a_atom_name[i_element] in elem_config.CAR[a_residue_names[i_element]]:\n\t\t\t\t\ts_custom_type = \"CAR\"\n\n\t\t\t\t# Oxygen in hydroxyl or phenol\n\t\t\t\telif a_residue_names[i_element] in elem_config.OHY.keys() and a_atom_name[i_element] == elem_config.OHY[a_residue_names[i_element]]:\n\t\t\t\t\ts_custom_type = \"OH\"\n\n\t\t\t\t# Nitrogen in amide\n\t\t\t\telif a_residue_names[i_element] in elem_config.NAM.keys() and a_atom_name[i_element] == elem_config.NAM[a_residue_names[i_element]]:\n\t\t\t\t\ts_custom_type = \"NAM\"\n\n\t\t\t\t# Nitrogen in Histidine\n\t\t\t\telif a_residue_names[i_element] in elem_config.NHIS.keys() and a_atom_name[i_element] in elem_config.NHIS[a_residue_names[i_element]]:\n\t\t\t\t\ts_custom_type = \"NBAS\"\n\n\t\t\t\t# Central carbon from ARG, GLN, GLU, ASP, ASN\n\t\t\t\telif a_residue_names[i_element] in elem_config.CE.keys() and elem_config.CE[a_residue_names[i_element]] == a_atom_name[i_element]:\n\t\t\t\t\ts_custom_type = \"CAR\"\n\n\t\t\t\t# Oxygen in carbonyl\n\t\t\t\telif a_residue_names[i_element] in elem_config.OC.keys() and a_atom_name[i_element] == elem_config.OC[a_residue_names[i_element]]:\n\t\t\t\t\ts_custom_type = \"OC\"\n\n\t\t\t\t# Oxygen in carboxylate and oxygen in C-terminal\n\t\t\t\telif a_residue_names[i_element] in elem_config.OOX.keys() and \\\n\t\t\t\t\t\t(a_atom_name[i_element] == elem_config.OOX[a_residue_names[i_element]][0] or\n\t\t\t\t\t\t a_atom_name[i_element] == elem_config.OOX[a_residue_names[i_element]][1]):\n\t\t\t\t\ts_custom_type = \"OOX\"\n\n\t\t\t\t# Nitrogen in Lysine\n\t\t\t\telif a_residue_names[i_element] in elem_config.NLYS.keys() and a_atom_name[i_element] == elem_config.NLYS[a_residue_names[i_element]]:\n\t\t\t\t\ts_custom_type = \"NBAS\"\n\n\t\t\t\t# Unknown element within a amino acid\n\t\t\t\telse:\n\t\t\t\t\ts_custom_type = \"XOT\"\n\t\t\t# End if\n\n\t\t\t# If the element is a metallic atom\n\t\t\telif a_atom_symbol[i_element] in elem_config.METAL:\n\t\t\t\ts_custom_type = \"META\"\n\n\t\t\t# If the element is a halogen\n\t\t\telif a_atom_symbol[i_element] in elem_config.HALO:\n\t\t\t\ts_custom_type = \"HALO\"\n\n\t\t\t# If the element is a water molecule\n\t\t\telif a_residue_names[i_element] == \"HOH\" and a_atom_name[i_element] == \"O\":\n\t\t\t\ts_custom_type = \"OOW\"\n\n\t\t\t# If the element is not known\n\t\t\telse:\n\n\t\t\t\t# If the element can be converted\n\t\t\t\tif a_atom_symbol[i_element] in d_translate_custom.keys():\n\t\t\t\t\ts_custom_type = d_translate_custom[a_atom_symbol[i_element]]\n\n\t\t\t\t# If it cannot\n\t\t\t\telse:\n\t\t\t\t\ts_custom_type = \"HETATM\"\n\t\t\t# End if\n\n\t\t\tl_s_custom_types.append(s_custom_type)\t\t# Saves the new element type\n\t\t# End for\n\t\t# END STEP 1 ---------------------------------------- #\n\n\t\t# STEP 2 : Saving the list of custom types ---------- #\n\t\tself.a_atoms[\"custom_type\"] = l_s_custom_types\t\t# Saves the list of custom types\n\t\t# END STEP 2 ---------------------------------------- #", "def header_hook(header, data):\n\n for e in header.enums:\n e[\"x_namespace\"] = e[\"namespace\"]", "def namespace_for(uri: Union[URIRef, Namespace, str]) -> str:\n uri = str(uri)\n if uri not in namespaces.values():\n namespaces[AnonNS().ns] = uri\n return [k for k, v in namespaces.items() if uri == v][0]", "def hasNamespaceNS(self, *args):\n return _libsbml.XMLToken_hasNamespaceNS(self, *args)", "def _getElement(self, element, literal=False, local=False, namespaceURI=None):\r\n if not element.isElement():\r\n raise TypeError, 'Expecting an ElementDeclaration'\r\n\r\n tc = None\r\n elementName = element.getAttribute('name')\r\n tp = element.getTypeDefinition('type')\r\n\r\n typeObj = None\r\n if not (tp or element.content):\r\n nsuriType,localName = element.getAttribute('type')\r\n typeClass = self._getTypeClass(nsuriType,localName)\r\n \r\n typeObj = typeClass(elementName)\r\n elif not tp:\r\n tp = element.content\r\n\r\n if not typeObj:\r\n typeObj = self._getType(tp, elementName, literal, local, namespaceURI)\r\n\r\n minOccurs = int(element.getAttribute('minOccurs'))\r\n typeObj.optional = not minOccurs\r\n\r\n maxOccurs = element.getAttribute('maxOccurs')\r\n typeObj.repeatable = (maxOccurs == 'unbounded') or (int(maxOccurs) > 1)\r\n\r\n return typeObj", "def xmlrpc_namespace():", "def test_xml_to_dict_net_namespace(self):\n xml = \"\"\"\n <a\n xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\"\n >\n <b xmlns=\"something\">b</b>\n <!-- Comment, ignore it -->\n </a>\n \"\"\"\n xmlns = {\n \"_\": utils.NETCONF_NAMESPACE\n }\n result = utils.generate_dict_node(etree.XML(xml), xmlns)\n # check dict\n self.assertEqual(\n {'a': {'_something@b': 'b'}},\n result\n )\n # check xmlns\n self.assertEqual(\n {\n '_': utils.NETCONF_NAMESPACE,\n '_something': 'something'\n }, xmlns\n )", "def __init__ (self, ns_or_tagraw, pred=None, value=None) :\n self.__namespace__ = None\n self.__predicate__ = None\n self.__value__ = None\n self.__ismachinetag__ = False\n self.__isnumeric__ = False\n\n if pred :\n\n re_nspred = re.compile(r\"^([a-z](?:[a-z0-9_]+))$\", re.IGNORECASE)\n\n if re_nspred.match(ns_or_tagraw) and re_nspred.match(pred) and value :\n self.__namespace__ = ns_or_tagraw\n self.__predicate__ = pred\n self.__value__ = value\n else :\n\n re_tag = re.compile(r\"^([a-z](?:[a-z0-9_]+))\\:([a-z](?:[a-z0-9_]+))\\=(.+)$\", re.IGNORECASE)\n m = re_tag.findall(ns_or_tagraw)\n\n if m :\n self.__namespace__ = m[0][0]\n self.__predicate__ = m[0][1]\n self.__value__ = m[0][2]\n\n if self.__namespace__ and self.__predicate__ and self.__value__ :\n self.__ismachinetag__ = True\n\n valtype = type(self.__value__)\n\n if valtype == types.IntType or valtype == types.FloatType :\n self.__isnumeric__ = True\n else :\n re_num = re.compile(r\"^-?\\d+(\\.\\d+)?$\", re.IGNORECASE)\n m = re_num.findall(self.__value__)\n\n if m :\n\n self.__isnumeric__ = True\n self.__value__ = unicode(self.__value__)\n\n if m[0] :\n self.__value_numeric__ = float(self.__value__)\n else :\n self.__value_numeric__ = int(self.__value__)", "def test_getLocalType(self):\n cases = [\n (self.test_eac + \"NE00800.xml\", \"Archival Series\"),\n (self.test_eac + \"NE00916.xml\", \"Archival Collection\"),\n (self.test_eac + \"NE01201.xml\", \"Person\"),\n (self.test_eac + \"NE01000.xml\", \"Glossary Term\"),\n ]\n for case in cases:\n source, expected = case\n doc = EacCpf.EacCpf(source,'http://www.example.com')\n self.assertNotEqual(doc, None)\n result = doc.getLocalType()\n self.assertEqual(result, expected)", "def hasURI(self, *args):\n return _libsbml.XMLNamespaces_hasURI(self, *args)", "def getNamespace(self):\n pass;", "def idl_type(field, namespace):\n\n out = ''\n if field.is_map:\n out = 'map <{0},'.format(idl_type(field.map_key, namespace))\n\n if field.is_array:\n out += 'repeated '\n\n if field.data_type in (schema.Field.DataType.STRUCT,\n schema.Field.DataType.ENUM):\n out += field.metadata.full_name.replace(namespace, '').strip('.')\n else:\n out += field.data_type.value\n\n if field.is_map:\n out += '>'\n\n return out", "def namespaces(self):\n return ()", "def getNamespace(self, parent: ghidra.program.model.symbol.Namespace, namespaceName: unicode) -> ghidra.program.model.symbol.Namespace:\n ...", "def test_get_namespaces_names(self):\n pass", "def _getTypeCode(self, parameters, literal=False):\r\n ofwhat = []\r\n for part in parameters:\r\n namespaceURI,localName = part.type\r\n\r\n if part.element_type:\r\n #global element\r\n element = self._wsdl.types[namespaceURI].elements[localName]\r\n tc = self._getElement(element, literal=literal, local=False, namespaceURI=namespaceURI)\r\n else:\r\n #local element\r\n name = part.name\r\n typeClass = self._getTypeClass(namespaceURI, localName)\r\n if not typeClass:\r\n tp = self._wsdl.types[namespaceURI].types[localName]\r\n tc = self._getType(tp, name, literal, local=True, namespaceURI=namespaceURI)\r\n else:\r\n tc = typeClass(name)\r\n ofwhat.append(tc)\r\n return ofwhat", "def _match_entry_type_string(code_entry, string_list):\n entry_type = re.match(r\"<(AST.*):.*\", code_entry.get('type')).group(1)\n return bool(entry_type in string_list)", "def type(self) -> global___Type:", "def _getPrefix(self, namespaceURI):\r\n prefixDict = self._getPrefixDict()\r\n if prefixDict.has_key(namespaceURI):\r\n prefix = prefixDict[namespaceURI]\r\n else:\r\n prefix = 'ns1'\r\n while prefix in prefixDict.values():\r\n prefix = 'ns%d' %int(prefix[-1]) + 1\r\n prefixDict[namespaceURI] = prefix\r\n return prefix", "def test_createElementNS():\n\n assert not _do_test_raw(\"\"\"\n var x = \"foo\";\n x.createElementNS();\n x.createElementNS(\"foo\");\n x.createElementNS(\"foo\", \"bar\");\n \"\"\").failed()\n\n assert _do_test_raw(\"\"\"\n var x = \"foo\";\n x.createElementNS(\"foo\", \"script\");\n \"\"\").failed()\n\n assert _do_test_raw(\"\"\"\n var x = \"foo\";\n x.createElementNS(\"foo\", bar);\n \"\"\").failed()\n\n # Test for https://github.com/mozilla/amo-validator/issues/368\n assert not _do_test_raw(\"\"\"\n var x = \"foo\",\n nsXUL = \"http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul\";\n\n x.createElementNS(nsXUL, 'panelview')\n \"\"\").failed()\n\n # Creating a <script> element raises a warning of course.\n assert _do_test_raw(\"\"\"\n var x = \"foo\",\n nsXUL = \"http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul\";\n\n x.createElementNS(nsXUL, 'script')\n \"\"\").failed()", "def _fixNS(self, namespace):\n if isinstance(namespace, bytes):\n namespace = str(namespace, encoding=\"utf-8\")\n\n if namespace == OPENID_NS:\n if self._openid_ns_uri is None:\n raise UndefinedOpenIDNamespace('OpenID namespace not set')\n else:\n namespace = self._openid_ns_uri\n\n if namespace != BARE_NS and not isinstance(namespace, str):\n raise TypeError(\n \"Namespace must be BARE_NS, OPENID_NS or a string. got %r\" %\n (namespace, ))\n\n if namespace != BARE_NS and ':' not in namespace:\n fmt = 'OpenID 2.0 namespace identifiers SHOULD be URIs. Got %r'\n warnings.warn(fmt % (namespace, ), DeprecationWarning)\n\n if namespace == 'sreg':\n fmt = 'Using %r instead of \"sreg\" as namespace'\n warnings.warn(\n fmt % (SREG_URI, ),\n DeprecationWarning, )\n return SREG_URI\n\n return namespace", "def containsUri(self, *args):\n return _libsbml.XMLNamespaces_containsUri(self, *args)", "def _get_element_ns(self, element):\n\n if (self._client == None):\n raise ValueError('Specification is not imported yet')\n\n ns = None\n for key in self._client.wsdl.schema.types.keys():\n if (key[0] == element):\n ns = key[1]\n break\n\n return ns", "def SBMLNamespaces_isSBMLNamespace(*args):\n return _libsbml.SBMLNamespaces_isSBMLNamespace(*args)", "def test_typedef00801m1_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00801m/typeDef00801m1.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00801m/typeDef00801m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def hasNS(self, *args):\n return _libsbml.XMLNamespaces_hasNS(self, *args)", "def test_type_attribute(self):\n\n self._create_string()\n self.assertEquals(\"%s:%s\" % (\"xs\",\"string\"), self.string.schema_node.get(\"type\"))", "def get_pyxb_namespaces():\n return pyxb.namespace.utility.AvailableNamespaces()", "def _BuildTypeMaps(self, type_namespaces):\n for type_namespace in type_namespaces:\n self.type_namespaces_map[type_namespace.namespace] = type_namespace\n for entity_type in type_namespace.valid_types_map.values():\n if entity_type.guid:\n if entity_type.guid in self.type_guids_map:\n dup_guid_entry = self.type_guids_map[entity_type.guid]\n dup_guid_type = self.GetEntityType(dup_guid_entry.namespace,\n dup_guid_entry.typename)\n if dup_guid_type is None:\n raise RuntimeError('Duplicate type with guid ' + entity_type.guid\n + ' should always be mapped')\n entity_type.AddFinding(\n findings_lib.DuplicateGuidsError(type_namespace.namespace,\n entity_type, dup_guid_type))\n dup_guid_type.AddFinding(\n findings_lib.DuplicateGuidsError(dup_guid_entry.namespace,\n dup_guid_type, entity_type))\n self.type_guids_map[entity_type.guid] = EntityIdByEntry(\n namespace=type_namespace.namespace, typename=entity_type.typename)", "def test_read_net_namespace(self):\n pass", "def test_typedef00205m_type_def00205m1_p(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00205m/typeDef00205m.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00205m/typeDef00205m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def test_typedef00802m1_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00802m/typeDef00802m1.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00802m/typeDef00802m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def get_type(self) -> str:\n # Note: this name conflicts with existing python builtins\n return self[\"Sns\"][\"Type\"]", "def pyxb_get_type_name(obj_pyxb):\n return pyxb_get_namespace_name(obj_pyxb).split('}')[-1]", "def getTypeCode(self):\n return _libsbml.ReplacedBy_getTypeCode(self)", "def test_typedef00202m_type_def00202m1_p(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00202m/typeDef00202m.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00202m/typeDef00202m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def test_typedef01201m1_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef01201m/typeDef01201m1.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef01201m/typeDef01201m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def astType(cls, source):\n if source == '':\n return cls.BLANK\n if source == \"OPENQASM 2.0;\":\n return cls.DECLARATION_QASM_2_0\n x = QTRegEx.COMMENT.search(source)\n if x:\n return cls.COMMENT\n x = QTRegEx.INCLUDE.search(source)\n if x:\n return cls.INCLUDE\n x = QTRegEx.CTL_2.search(source)\n if x:\n if x.group(1) == 'if':\n return cls.CTL_2\n x = QTRegEx.QREG.search(source)\n if x:\n return cls.QREG\n x = QTRegEx.CREG.search(source)\n if x:\n return cls.CREG\n x = QTRegEx.MEASURE.search(source)\n if x:\n return cls.MEASURE\n x = QTRegEx.BARRIER.search(source)\n if x:\n return cls.BARRIER\n x = QTRegEx.GATE.search(source)\n if x:\n return cls.GATE\n x = QTRegEx.OP.search(source)\n if x:\n return cls.OP\n return cls.UNKNOWN", "def validateOneNamespace(self, doc, elem, prefix, ns, value):\n if doc is None: doc__o = None\n else: doc__o = doc._o\n if elem is None: elem__o = None\n else: elem__o = elem._o\n if ns is None: ns__o = None\n else: ns__o = ns._o\n ret = libxml2mod.xmlValidateOneNamespace(self._o, doc__o, elem__o, prefix, ns__o, value)\n return ret", "def test_get_node_type_name(self):\n pass", "def test_typedef00502m1_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00502m/typeDef00502m1.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00502m/typeDef00502m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def test_typedef00204m_type_def00204m1_p(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00204m/typeDef00204m.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00204m/typeDef00204m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def SBMLNamespaces_getSupportedNamespaces():\n return _libsbml.SBMLNamespaces_getSupportedNamespaces()", "def __generate_object_term__(self, datatype, value):\n if datatype == NS_MGR.xsd.anyURI.rdflib:\n term = rdflib.URIRef(value)\n elif datatype:\n term = rdflib.Literal(value, datatype=datatype)\n else:\n term = rdflib.Literal(value)\n return term", "def XrefTypeName(typecode):\n assert typecode in _ref_types, \"unknown reference type %d\" % typecode\n return _ref_types[typecode]", "def getTypeCode(self):\n return _libsbml.SBMLDocument_getTypeCode(self)", "def test_typedef00203m_type_def00203m1_p(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00203m/typeDef00203m.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00203m/typeDef00203m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def _BuildNamespaceFolderMap(self, type_folders):\n for folder in type_folders:\n self.namespace_folder_map[folder.local_namespace.namespace] = folder", "def setOpenIDNamespace(self, openid_ns_uri, implicit):\n if isinstance(openid_ns_uri, bytes):\n openid_ns_uri = str(openid_ns_uri, encoding=\"utf-8\")\n if openid_ns_uri not in self.allowed_openid_namespaces:\n raise InvalidOpenIDNamespace(openid_ns_uri)\n\n self.namespaces.addAlias(openid_ns_uri, NULL_NAMESPACE, implicit)\n self._openid_ns_uri = openid_ns_uri", "def explore_type(name, datatype, is_child):\n actual_type = datatype.strip_typedefs()\n if is_child:\n print (\"The type of %s is a typedef of type '%s'.\" %\n (name, str(actual_type)))\n else:\n print (\"The type '%s' is a typedef of type '%s'.\" %\n (name, str(actual_type)))\n\n Explorer.explore_type(name, actual_type, is_child)\n return False", "def getTypeCode(self):\n return _libsbml.ReplacedElement_getTypeCode(self)", "def get_type_label(type_url):\n return type_dict[type_url]", "def _cim_scope_code_type():\n return {\n 'name' : 'cim_scope_code_type',\n 'is_open' : False,\n 'doc' : 'This would cover quality issues with the CIM itself',\n 'members' : [\n ('dataset', None),\n ('software', None),\n ('service', None),\n ('model', None),\n ('modelComponent', None),\n ('simulation', None),\n ('experiment', None),\n ('numericalRequirement', None),\n ('ensemble', None),\n ('file', None),\n ],\n }", "def test_typedef00402m_type_def00402m1_p(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00402m/typeDef00402m.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00402m/typeDef00402m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def _some_namespaces(self):\n n = Namespace(doc='top')\n n.add_option('aaa', '2011-05-04T15:10:00', 'the a',\n short_form='a',\n from_string_converter=dtu.datetime_from_ISO_string\n )\n n.c = Namespace(doc='c space')\n n.c.add_option('fred', 'stupid', 'husband from Flintstones')\n n.c.add_option('wilma', 'waspish', 'wife from Flintstones')\n n.c.e = Namespace(doc='e space')\n n.c.e.add_option('dwight',\n default=97,\n doc='my uncle')\n n.c.add_option('dwight',\n default=98,\n doc='your uncle')\n n.d = Namespace(doc='d space')\n n.d.add_option('fred', 'crabby', 'male neighbor from I Love Lucy')\n n.d.add_option('ethel', 'silly',\n 'female neighbor from I Love Lucy')\n n.x = Namespace(doc='x space')\n n.x.add_option('size', 100, 'how big in tons', short_form='s')\n n.x.add_option('password', 'secret', 'the password')\n return n", "def test_typedef00501m1_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00501m/typeDef00501m1.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00501m/typeDef00501m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def make_key(element_name, element_type, namespace):\n # only distinguish 'element' vs other types\n if element_type in ('complexType', 'simpleType'):\n eltype = 'complexType'\n else:\n eltype = element_type\n if eltype not in ('element', 'complexType', 'simpleType'):\n raise RuntimeError(\"Unknown element type %s = %s\" % (element_name, eltype))\n return (element_name, eltype, namespace)", "def test_typedef00901m1_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00901m/typeDef00901m1.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00901m/typeDef00901m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def _add_type(self, production, index, m_type):\n fully_qualified_name = None\n current_namespace = self._get_current_namespace()\n if current_namespace is not None:\n fully_qualified_name = current_namespace.fully_qualified_name()\n namespace_types = self._get_type_or_namespace_from_fully_qualified_name(fully_qualified_name)\n if m_type.name in namespace_types:\n raise ParseError(self.production_to_coord(production, index),\n \"Name '{0}' already exists\".format(m_type.fully_qualified_name()))\n namespace_types[m_type.name] = m_type", "def test_typedef01202m1_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef01202m/typeDef01202m1.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef01202m/typeDef01202m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def isCountyName(elem):\r\n return (elem.attrib['k'] == \"tiger:county\")", "def typeName (self, typecode):\n if typecode == qmf2.SCHEMA_DATA_VOID: return \"void\"\n elif typecode == qmf2.SCHEMA_DATA_BOOL: return \"bool\"\n elif typecode == qmf2.SCHEMA_DATA_INT: return \"int\"\n elif typecode == qmf2.SCHEMA_DATA_FLOAT: return \"float\"\n elif typecode == qmf2.SCHEMA_DATA_STRING: return \"string\"\n elif typecode == qmf2.SCHEMA_DATA_MAP: return \"map\"\n elif typecode == qmf2.SCHEMA_DATA_LIST: return \"list\"\n elif typecode == qmf2.SCHEMA_DATA_UUID: return \"uuid\"\n else:\n raise ValueError (\"Invalid type code: %s\" % str(typecode))", "def element_type(self):\r\n result = conf.lib.clang_getElementType(self)\r\n if result.kind == TypeKind.INVALID:\r\n raise Exception('Element type not available on this type.')\r\n\r\n return result", "def getURI(self):\n return _libsbml.ISBMLExtensionNamespaces_getURI(self)", "def type(self) -> URIType:\n if self.study_instance_uid is None:\n return URIType.SERVICE\n elif self.series_instance_uid is None:\n return URIType.STUDY\n elif self.sop_instance_uid is None:\n return URIType.SERIES\n elif self.frames is None:\n return URIType.INSTANCE\n return URIType.FRAME", "def exportTypes( c ) :\n assert str(type(c)) == \"<type '_mysql.connection'>\"\n xml = \"\"\n cT = sqlQuery ( c, \"select * from CrisisKind;\" )\n oT = sqlQuery ( c, \"select * from OrganizationKind;\" )\n pT = sqlQuery ( c, \"select * from PersonKind;\" ) \n for i in cT:\n xml += openTagAtt (\"CrisisKind\", \"crisisKindIdent\", i[0])\n xml += openCloseTag (\"Name\", i[1])\n xml += openCloseTag (\"Description\", i[2])\n xml += closeTag (\"CrisisKind\") \n for i in oT:\n xml += openTagAtt (\"OrganizationKind\", \"organizationKindIdent\", i[0])\n xml += openCloseTag (\"Name\", i[1])\n xml += openCloseTag (\"Description\", i[2])\n xml += closeTag (\"OrganizationKind\")\n for i in pT:\n xml += openTagAtt (\"PersonKind\", \"personKindIdent\", i[0])\n xml += openCloseTag (\"Name\", i[1])\n xml += openCloseTag (\"Description\", i[2])\n xml += closeTag (\"PersonKind\")\n assert str ( type ( xml ) ) == \"<type 'str'>\"\n return xml", "def page_namespace(tree):\n root_name = ET.QName(tree.getroot().tag)\n if root_name.localname == \"PcGts\":\n return root_name.namespace\n else:\n raise ValueError(\"Not a PAGE tree\")", "def SBMLNamespaces_getSBMLNamespaceURI(*args):\n return _libsbml.SBMLNamespaces_getSBMLNamespaceURI(*args)", "def _get_type_mapping():\n return {\n Box.SPACE_NAME: Box,\n Dict.SPACE_NAME: Dict,\n Discrete.SPACE_NAME: Discrete\n }", "def test_typedef00201m_type_def00201m1_p(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00201m/typeDef00201m.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00201m/typeDef00201m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def dnaxmlroot(dnaxmlformattype):\n if dnaxmlformattype == 'stn':\n formattext = 'Station File'\n elif dnaxmlformattype == 'msr':\n formattext = 'Measurement File'\n else:\n raise ValueError(\"ValueError: dnaxmlformattype must be either 'stn' or 'msr'\")\n NS = 'http://www.w3.org/2001/XMLSchema-instance'\n location_attribute = '{%s}noNamespaceSchemaLocation' % NS\n dnaxmlroot = ET.Element('DnaXmlFormat', attrib={location_attribute: 'DynaML.xsd'})\n dnaxmlroot.set('type', formattext)\n return dnaxmlroot", "def test_typedef00301m_type_def00301m1_p(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00301m/typeDef00301m.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00301m/typeDef00301m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def _from_c_repr(c_repr):\n # We create a dummy module with a global variable of the requested type,\n # parse that module, and return the type of the global variable.\n # Include stdint.h to recognize the intX_t typedefs.\n module = parse(\"\"\"\n #include <stdint.h>\n\n {} a;\n \"\"\".format(c_repr))\n return module.global_vars['a'].type", "def type(name):", "def test_value():\n uri = 'http://dbpedia.org/resource/California'\n values = [\n Value.from_uri(uri),\n Value.from_namespace_fragment('dbr', 'California'),\n ]\n for val in values:\n assert val.is_uri\n assert not val.is_literal\n assert val.uri == uri\n assert val.namespace == 'dbr'\n assert val.prefix == 'http://dbpedia.org/resource/'\n assert val.fragment == 'California'\n assert str(val) == val.rdf_format == f'<{uri}>'\n try:\n print(val.literal_value)\n assert False, 'should raise ValueError'\n except ValueError:\n pass\n try:\n print(val.lang)\n assert False, 'should raise ValueError'\n except ValueError:\n pass\n try:\n print(val.datatype)\n assert False, 'should raise ValueError'\n except ValueError:\n pass\n literal = '\"xyz\"@en^^<http://example.org/ns/userDatatype>'\n val = Value.from_literal(literal)\n assert val.literal_value == 'xyz'\n assert val.lang == 'en'\n assert val.datatype.rdf_format == '<http://example.org/ns/userDatatype>'\n assert val.rdf_format == literal\n literals = [\n ('false', 'boolean'),\n ('1', 'integer'),\n ('3.14', 'double'),\n ('\"hello\"', None),\n ]\n for literal, literal_type in literals:\n if literal in ('true', 'false'):\n python_literal = literal_eval(literal.title())\n else:\n python_literal = literal_eval(literal)\n print(literal, literal.title(), python_literal)\n for val in [Value.from_literal(literal), Value.from_python_literal(python_literal)]:\n assert val.literal_value == python_literal\n assert val.lang is None\n if literal_type is None:\n assert val.datatype is None\n else:\n assert val.datatype.rdf_format == f'<http://www.w3.org/2001/XMLSchema#{literal_type}>'\n assert val.rdf_format == literal", "def test_typedef01301m1_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef01301m/typeDef01301m1.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef01301m/typeDef01301m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def explore_expr(expr, value, is_child):\n actual_type = value.type.strip_typedefs()\n print (\"The value of '%s' is of type '%s' \"\n \"which is a typedef of type '%s'\" %\n (expr, str(value.type), str(actual_type)))\n\n Explorer.explore_expr(expr, value.cast(actual_type), is_child)\n return False", "def rdf_type(self):\n return self._rdf_type", "def test_typedef00403m_type_def00403m1_p(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00403m/typeDef00403m.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00403m/typeDef00403m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def test_translate_resourcetypes_type_uid(self):\n self.assertEqual(\"nodes/007ff4e5-fe72-4c4b-b858-4c5f37dff946\",\n util.translate_resourcetypes(\"/nodes/007ff4e5-fe72-4c4b-b858-4c5f37dff946/\"))\n self.assertEqual(\"nodes/007ff4e5-fe72-4c4b-b858-4c5f37dff946\",\n util.translate_resourcetypes(\"/nodes/007FF4E5-FE72-4C4B-B858-4C5F37DFF946\"))", "def page_namespace(tree):\n root_name = ET.QName(tree.getroot().tag)\n if root_name.localname == 'PcGts':\n return root_name.namespace\n else:\n raise ValueError('Not a PAGE tree')", "def getURI(self):\n return _libsbml.SBMLNamespaces_getURI(self)", "def test_typedef00401m_type_def00401m1_p(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00401m/typeDef00401m.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00401m/typeDef00401m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def xsd_type(dtype):\n return XSD_TYPE_MAP.get(dtype,'string')" ]
[ "0.66442066", "0.5593534", "0.5443724", "0.5412149", "0.5365882", "0.5329653", "0.5311929", "0.5237586", "0.5178215", "0.5165827", "0.5055696", "0.5044016", "0.5007665", "0.4926739", "0.48959085", "0.48641986", "0.48631665", "0.4855509", "0.48434836", "0.48183277", "0.4792356", "0.47862798", "0.47404408", "0.47294393", "0.4723793", "0.47111255", "0.4702394", "0.46919808", "0.46903983", "0.46584937", "0.46271244", "0.46186867", "0.4617531", "0.4604043", "0.45993757", "0.45985585", "0.4591038", "0.4586497", "0.45848095", "0.45825526", "0.45807338", "0.4573924", "0.45574927", "0.45526972", "0.45472842", "0.45332465", "0.4530956", "0.45251143", "0.45243555", "0.45225555", "0.45212564", "0.45162475", "0.45096263", "0.44960347", "0.4493138", "0.44918758", "0.44866842", "0.44828385", "0.44737914", "0.4473183", "0.4472023", "0.44692904", "0.44670665", "0.44466957", "0.44392848", "0.44391468", "0.44376433", "0.4437629", "0.44349527", "0.44334894", "0.4433334", "0.44330135", "0.44300586", "0.44294038", "0.44290915", "0.44268504", "0.442076", "0.4419924", "0.4417466", "0.44173992", "0.43981576", "0.4397718", "0.43933913", "0.43869466", "0.4383478", "0.43826568", "0.43791485", "0.4378129", "0.43760198", "0.43731675", "0.43726635", "0.43709844", "0.43701172", "0.43693835", "0.4368849", "0.43682677", "0.43614888", "0.43580577", "0.43551713", "0.43525833" ]
0.6797826
0
Returns a True if a customer identifier does not belongs to dataframe used to build classifier model.
def is_customer_out_sample(self, customerID): listCustomer = list(self._df_invoice_line_out_sample.CustomerID.unique()) is_flag = customerID in listCustomer return is_flag
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_customer(self) -> bool:\n return self.customer_id is not None", "def has_customer(self):\n return self.customer is not None", "def is_customer(self):\n return self.user_type == 'C'", "def is_label_dataframe(label, df):\n\n setdiff = set(label) - set(df.columns.tolist())\n\n if len(setdiff) == 0:\n return True\n else:\n return False", "def is_customer(self):\n return self.rol == ProfileRoles.CUSTOMER", "def check_data(dataframe):\n if dataframe.iloc[0, 0] == 'No data available':\n return False\n else:\n return True", "def is_velas_df(df):\n empty_df = get_empty_df()\n \n if sorted(list(empty_df.columns)) != sorted(list(df.columns)):\n return False\n if empty_df.index.name != df.index.name:\n return False\n return True", "def is_dataset(self):\n return self._dataset is not None", "def clean_customer_df(customer_df: pd.DataFrame) -> pd.DataFrame:\n \n # remove customers with more than 20 purchases\n if 'frequency' in customer_df.columns:\n customer_df = customer_df[customer_df.frequency < 20]\n \n return customer_df", "def test_case_user_not_yet_customer(self):\n pass", "def generate_customer_info(self):\n consecutive = check_consecutive(self.customerIds)\n print(\"consecutive\" + str(consecutive))\n if consecutive:\n for customer_id in self.customerIds:\n # next we need to 1) decide if the customer has insurance, and if yes, generate the EOB that gets sent to the customer\n insured = np.random.choice(\n self.distributions[\"insurance_status\"],\n 1,\n p=self.distributions[\"insurance_distribution\"],\n )[0]\n dob = self.dobs[customer_id - 1]\n customer = pd.DataFrame(\n {\n \"customer_id\": [customer_id],\n \"dob\": str(dob),\n \"insurance\": insured,\n \"experiment_id\": random.randint(1, 2),\n }\n )\n self.Customers = self.Customers.append(customer)\n else:\n print(\"Error generating customer info: customerIds aren't consecutive\")\n return True", "def is_appropriate_data_instance(self, df) -> bool:\n return isinstance(df, pl.DataFrame) or isinstance(df, pl.LazyFrame)", "def customer_wants_condiments(self):\n return True", "def has_data(self):\n return ([0] != self.__contexts) and ([0] != self.__weights)", "def is_specific(self) -> bool:\n return False", "def is_customer_id_exist(customer_id) -> bool:\n with MY_CONNECTION as connection:\n cursor = connection.cursor()\n cursor.execute(\"SELECT exists(SELECT 1 FROM Customers WHERE id_customer=?)\", (customer_id,))\n return cursor.fetchone()[0] == 1", "def check_user(self):\n try:\n if self.get_customer()[0][0] == self.dni:\n return True\n else:\n return False\n except:\n return False", "def has_cid(self):\n return hasattr(self, 'cid')", "def _isIndexedDataframe(self, dataframe):\n return len(dataframe.index.names) > 1 or not dataframe.index.names[0] is None", "def __is_nan(self):\n return _VirtualBooleanColumn(\n df_name=self.thisptr[\"df_name_\"],\n operator=\"is_nan\",\n operand1=self,\n operand2=None\n )", "def is_new_data(df: pd.DataFrame) -> bool:\n import io\n\n with io.StringIO() as s:\n save_as_data_table(df, s)\n new_data = s.getvalue()\n\n existing_data = read_data_table(as_text=True)\n\n return new_data != existing_data", "def check_sparkdf_not_nulls(sparkdf,columns):\n\n\tfor column in columns:\n\n\t\tempties = sparkdf.select(col(column)).where(col(column).isNull())\n\t\tif len(empties.head(1)) > 0:\n\t\t\tprint(\"Checking DataFrame. I found null values in column\", column)\n\t\t\treturn False\n\t\telse:\n\t\t\tprint(\"Checking DataFrame. No null values found in column\", column)\n\n\treturn True", "def is_false(self):\n return _VirtualBooleanColumn(\n df_name=self.thisptr[\"df_name_\"],\n operator=\"not\",\n operand1=self,\n operand2=None\n )", "def is_non_reducing(self):\n return bool(set(self.kind) & set(\"ABC\"))", "def single_records(df,\n key_cols=['report_date', 'plant_id_eia', 'generator_id']):\n len_1 = len(df)\n len_2 = len(df.drop_duplicates(subset=key_cols))\n return bool(len_1 == len_2)", "def to_drop(self):\n return self.id is None", "def __bool__(self):\n return self.fam.c_nonzero(self)", "def has_customers(self, asn):\n for neighbor in nx.all_neighbors(self, asn):\n edge_data = self.get_edge_data(asn, neighbor)\n\n # node is a provider of neighbor\n if edge_data[\"relationship\"] == -1 and edge_data[\"as1\"] == asn:\n return True\n return False", "def bool(self) -> bool:\n if isinstance(self, ps.DataFrame):\n df = self\n elif isinstance(self, ps.Series):\n df = self.to_dataframe()\n return df.head(2)._to_internal_pandas().bool()", "def __is_inf(self):\n return _VirtualBooleanColumn(\n df_name=self.thisptr[\"df_name_\"],\n operator=\"is_inf\",\n operand1=self,\n operand2=None\n )", "def is_valid(self, dataset):\n pass", "def noyable(self):\n return False", "def is_relevant(self):\n return self.metadata.is_relevant", "def have_cdc() -> bool:", "def train_column_is_all_null(self, column):\n return self.df_column_is_all_null(self.train, column)", "def test_import_customer_without_data(self):\n\n customer = self.import_customer.create_customer_object(\"cust002010\", {})\n self.assertIsInstance(customer, Customer)", "def test_not_contains_user(self):\n print('(' + self.test_not_contains_user.__name__+')',\n self.test_not_contains_user.__doc__)\n # non existing doctor, it could be patient as well\n self.assertFalse(self.connection.contains_user(\n NON_EXIST_DOCTOR_USERNAME))", "def test_single_missing_column():\n imp = MissingnessClassifier()\n imp.fit_predict(dfs.df_mis_classifier)\n imp.fit_predict_proba(dfs.df_mis_classifier)", "def _has_data(cls):\n return User.objects.count() > 0", "def is_isolated_cds(gene, gene_info, idx):\n\n if len(gene_info.vertex_succ_list[idx]) > 0:\n return False\n\n return np.sum(gene.splicegraph.edges[:, idx]) == 0", "def is_absent(self, tokenized_record):\n\n return bool(set(tokenized_record).intersection(self.absent_markers))", "def has_stockrecords(self):\n try:\n a=self.stockrecords.pk\n return True\n except:\n return False", "def isUnConditional(self) -> bool:\n ...", "def customers_presence(self):\n return self._customers_presence", "def _exists(self) -> bool:\n client = MlflowClient()\n all_metrics = client._tracking_client.store.get_all_metrics(\n run_uuid=self.run_id\n )\n return any(self._is_dataset_metric(x) for x in all_metrics)", "def has_no_uses(self) -> bool:\n\n return len(self.users_) == 0", "def verify_pandas(self):\n self.check_dataset_duplicate_ids(self.vertices)\n # self.check_dataset_children_ids()\n self.check_dataset_litter_ids()\n self.check_dataset_dates()", "def ref_known_flag(self):\n if CredentialApplication.objects.filter(\n reference_email__iexact=self.reference_email,\n reference_contact_datetime__isnull=False).exclude(\n reference_email=''):\n return True\n elif LegacyCredential.objects.filter(\n reference_email__iexact=self.reference_email).exclude(\n reference_email=''):\n return True\n else:\n return False", "def cols_valid(self,\n df: pd.DataFrame,\n req_cols: set) -> bool:\n missing_cols = req_cols.difference(df.columns)\n\n if len(missing_cols) > 0:\n logging.error(f\"{missing_cols} columns required but missing\")\n return False\n\n return True", "def __contains__(self, item):\n return item in self.default_dataset", "def _check_notnull(self):\n candnull = self.df_test_resampled[self.candidate_col_name].isnull().all()\n refnull = self.df_test_resampled[self.reference_col_name].isnull().all()\n if candnull or refnull:\n return 1, 'No data for selected time frame'\n else:\n return 0, 'No error occurred'", "def validate(\n self, feature_set: FeatureSet, dataframe: DataFrame, spark_client: SparkClient\n ) -> Any:", "def _is_dataclass_instance(obj):\n return hasattr(type(obj), '__dataclass_fields__')", "def does_usage_charges_grid_have_no_records(self):\n return self.is_element_present(self.usage_charges_grid_no_record_found_message_locator)", "def is_identity(self):\n\n if self.rows != self.columns:\n return False\n\n for i in range(self.rows):\n row = self.row(i + 1)\n for j in range(self.columns):\n if i == j and row[j] != 1:\n return False\n\n if i != j and row[j] != 0:\n return False\n\n return True;", "def is_allc(self):\n g = self.get_gene().get_seq()\n for i in range(1, len(g)):\n if g[i] is 'd':\n return False\n return True", "def is_indicator():\n return True", "def is_id_only(self):\n for key, value in self.items():\n if key not in {'names', 'labels', 'roles'} and value:\n return False\n if self.names or self.labels:\n return True\n return False", "def has_carbon(self):\n return len(self.c_indices) > 0", "def isEmptyLandmarkset(self):\n return self.subsetpointcloud is None", "def _is_categorical(df, field):\n return df[field].dtype.name == 'category'", "def is_dataclass_instance(obj: Any) -> bool:\n return dataclasses.is_dataclass(obj) and not isinstance(obj, type)", "def is_ode_noad_link(self):\n if self.project_name in IDENTIFIERS:\n return True\n else:\n return False", "def is_yummy(self):\n return False", "def is_customers_room(room, customer_id):\n room_last_10 = room.title[-10:]\n customer_id_last10 = customer_id[-10:]\n if room_last_10 == customer_id_last10:\n return True\n logging.log(logging.INFO, \"NO MATCH for %s %s \" % (room_last_10, customer_id_last10))\n return False", "def is_emptiable(self) -> bool:\n raise NotImplementedError()", "def __ne__(self, other):\n if not isinstance(other, BusinessInvoiceAnalysisRequest):\n return True\n\n return self.to_dict() != other.to_dict()", "def __nonzero__(self):\n return True", "def check_if_sectors_are_naics(df_load, crosswalk_list, column_headers):\n\n # create a df of non-sectors to export\n non_sectors_df = []\n # create a df of just the non-sectors column\n non_sectors_list = []\n # loop through the df headers and determine if value\n # is not in crosswalk list\n for c in column_headers:\n # create df where sectors do not exist in master crosswalk\n non_sectors = df_load[~df_load[c].isin(crosswalk_list)]\n # drop rows where c is empty\n non_sectors = non_sectors[non_sectors[c] != '']\n # subset to just the sector column\n if len(non_sectors) != 0:\n sectors = non_sectors[[c]].rename(columns={c: 'NonSectors'})\n non_sectors_df.append(non_sectors)\n non_sectors_list.append(sectors)\n\n if len(non_sectors_df) != 0:\n # concat the df and the df of sectors\n ns_list = pd.concat(non_sectors_list, sort=False, ignore_index=True)\n # print the NonSectors\n non_sectors = ns_list['NonSectors'].drop_duplicates().tolist()\n vLog.debug('There are sectors that are not NAICS 2012 Codes')\n vLog.debug(non_sectors)\n else:\n vLog.debug('All sectors are NAICS 2012 Codes')\n\n return non_sectors", "def isInternal(self):\n if self.data.depend_er_job == self.data.depend_on_job:\n return True\n return False", "def is_duplicate(self, **kwargs):\n return len(list(self.c.select(**kwargs))) > 0", "def _isNewTxn(self, identifier, reply, txnId) -> bool:\n return (identifier not in self.processedRequests or\n reply.reqId not in self.processedRequests[identifier]) and \\\n txnId is not None", "def __ne__(self, other):\n if not isinstance(other, CreditSupportAnnex):\n return True\n\n return self.to_dict() != other.to_dict()", "def exists(cls, customer_id):\n customer_id = int(customer_id)\n cust = DB_CUSTOMER_TABLE.get(doc_id=customer_id)\n if not cust:\n raise ValueError(f\"unknown customer '{customer_id}'\")\n return customer_id", "def _check_primary_key(df: \"pd.DataFrame\", primary_key_name: str):\n if primary_key_name in df.columns and primary_key_name == df.index.name:\n raise primary_key.Ambiguous(\n f\"Index {primary_key_name} has the same name as column {primary_key_name}\"\n )\n elif primary_key_name not in df.columns and primary_key_name != df.index.name:\n raise primary_key.NotFound(\n f\"Primary key: {primary_key_name} is not DataFrame index name: {df.index.name} or in\"\n f\" DataFrame column names: {df.columns}\"\n )", "def is_distributed_model(model):\n try:\n get_tenant_field(model)\n return True\n except ValueError:\n return False", "def is_dnf(self):\n return False", "def is_data(i):\n keys = ['_id', '_time']\n return all(i != k for k in keys)", "def check_for_null_values(self):\r\n print(self.dataframe_name)\r\n print(self.data_frame.isna().sum())", "def invariant(self):\n\t\treturn ((self.tenant != \"\") and (self.loginUser != \"\"))", "def validateModelCol(self):\n \n ret = False\n \n dc = self.__args['datacolumn'].upper() \n if \"MODEL\" in dc or dc == 'ALL':\n ret = True\n\n return ret", "def invariant(self):\n\t\treturn ((self.name != \"\") and (self.locationId != \"\"))", "def checkfornan(chosen_df):\n if not chosen_df.isnull().values.any():\n raise ValueError('NaN in DataFrame')", "def tie_exists(self):\n return len(self.marks) == 9", "def __eq__(self, other):\n if not isinstance(other, Customer):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, CustomersCustomerDetails):\n return False\n\n return self.__dict__ == other.__dict__", "def is_alld(self):\n g = self.get_gene().get_seq()\n for i in range(1, len(g)):\n if g[i] is 'c':\n return False\n return True", "def chcek_exist(df, Point):\n\n exist = (df.OperatingPoint == Point)\n exist = exist.sum()\n if exist == 0:\n return False\n else:\n return True", "def unmatching_driver_id(df):\n\ttemp = df[df['driver_id_bkg'].notnull()]\n\torder_ids = temp[temp['driver_id_bkg'] != temp['driver_id_pnt']]['order_id'].values\n\treturn df[~df['order_id'].isin(order_ids)]", "def get_customer_count(self):\n return self._df_invoice_original.CustomerID.unique().shape[0]", "def delete_customer(customer_id):\n del_query = Customer.get(Customer.customer_id == customer_id)\n return bool(del_query.delete_instance())", "def _check_features_df(df, features):\n # check columns\n if not set(features).issubset(df.columns):\n raise ValueError(\"The dataframe does not seem to have the right \"\n \"features. {0} instead of {1}\"\n .format(df.columns, features))\n\n return", "def is_internal(self):\n return bool(self.is_reducing() and self.is_non_reducing())", "def is_indeed(self) -> bool:\n return self.mukluk > 5", "def IgnorePersistedDecision(self) -> bool:", "def is_inequality(self): \n return False", "def __bool__(self):\n return self.taxonomy.exists", "def is_person_identifier_used(person_id):\n try:\n conn = sqlite3.connect(settings.database_name)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute(\"PRAGMA foreign_keys = ON\")\n c.execute(\"SELECT personid FROM person WHERE personid =?\", (person_id,))\n person_identifier = \"\"\n is_used = True\n for row in c:\n person_identifier = row[\"personid\"]\n conn.close()\n if len(person_identifier) == 0:\n is_used = False\n if len(person_identifier) > 0:\n is_used = True\n return is_used\n except:\n return False", "def is_trainable(self):\n return False", "def isDeleted(self):\n return self.air is None" ]
[ "0.661681", "0.6462838", "0.59257627", "0.5741564", "0.5628499", "0.55574435", "0.5537199", "0.5509296", "0.5507439", "0.5461328", "0.5405652", "0.53825015", "0.5370098", "0.5365358", "0.5328702", "0.53143424", "0.529039", "0.52698815", "0.52663475", "0.52634853", "0.52370775", "0.52231854", "0.5220413", "0.51524615", "0.5137677", "0.51331055", "0.5102486", "0.5101493", "0.5096956", "0.5080815", "0.50726473", "0.50654197", "0.50583327", "0.50484157", "0.5031694", "0.50285304", "0.50268906", "0.5021727", "0.50192124", "0.49972594", "0.49842697", "0.49818462", "0.49800837", "0.4974319", "0.49574834", "0.49545977", "0.49542335", "0.49349925", "0.4929173", "0.49284396", "0.49217662", "0.4920269", "0.49192423", "0.49140215", "0.49076545", "0.4907331", "0.49031", "0.48979303", "0.4897661", "0.48917526", "0.4886968", "0.48839128", "0.48584569", "0.48571783", "0.4854011", "0.48536837", "0.48520273", "0.4849794", "0.4846591", "0.4843272", "0.48382357", "0.48360077", "0.48343554", "0.48331633", "0.48315686", "0.48314548", "0.4828786", "0.48276383", "0.4825132", "0.48237842", "0.48231232", "0.48219508", "0.4821141", "0.48177683", "0.48146838", "0.48142496", "0.4813486", "0.48127973", "0.48126787", "0.481166", "0.4810733", "0.48083964", "0.48059034", "0.48043257", "0.4796356", "0.47944203", "0.47888148", "0.47876745", "0.47837427", "0.47829413" ]
0.63775516
2
Returns RFM score from dataframe given from parameter. RFM score is computed from local RFM matrix threshold.
def get_rfm(self, df): df_tmp, df_RFM, df_RFM_threshold, day_now \ = p5_util.p5_df_rfm_build(df, df_RFM_threshold=self.df_RFM_quantiles ,day_now = self._day_now) RFM = df_RFM.RFM.iloc[0] return str(RFM)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_rfm_score(dataframe):\n\n dataframe[\"recency_score\"] = pd.qcut(dataframe['recency'].rank(method=\"first\"), 5, labels=[5, 4, 3, 2, 1])\n dataframe[\"frequency_score\"] = pd.cut(dataframe['frequency'], bins=[0, 4, 8, 13, 17, 20], labels=[1, 2, 3, 4, 5])\n dataframe[\"RFM_SCORE\"] = (dataframe['recency_score'].astype(str) +\n dataframe['frequency_score'].astype(str))\n\n return dataframe", "def score(self, df: pd.DataFrame, label_column: str) -> float:\n assert label_column not in self.feature_columns, 'Label column is in the feature list.'\n assert label_column in df.columns, 'Label column is not in the dataframe.'\n\n rounded_preds = self.predict(df).round()\n return f1_score(df[label_column].values, rounded_preds)", "def PP_SPF_AVG_PFL_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_AVG_PFL']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_AVG_PFL'])\n Feature_DF.loc[:,'PP_SPF_AVG_PFL_TRS'] = Feature_DF.loc[:,'PP_SPF_AVG_PFL'].apply(lambda x : (1+x-min_value)**(9/5))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_AVG_PFL_TRS']]\n\n return Feature_DF", "def compute_FRR(self, genuine_score, thresholds=0.01):\r\n print('Computing FRR')\r\n condition = lambda score, thr: score <= thr\r\n return self._F_performance(genuine_score, thresholds, condition)", "def RC_PFL_JPRE_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_PFL_JPRE']]\n min_value = min(Feature_DF.loc[:,'RC_PFL_JPRE'])\n Feature_DF.loc[:,'RC_PFL_JPRE_TRS'] = Feature_DF.loc[:,'RC_PFL_JPRE'].apply(lambda x : (1+x-min_value)**(6/5))\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_PFL_JPRE_TRS']]\n\n return Feature_DF", "def RC_PFL_HPRE_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_PFL_HPRE']]\n min_value = min(Feature_DF.loc[:,'RC_PFL_HPRE'])\n Feature_DF.loc[:,'RC_PFL_HPRE_TRS'] = Feature_DF.loc[:,'RC_PFL_HPRE'].apply(lambda x : (1+x-min_value)**(7/4))\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_PFL_HPRE_TRS']]\n\n return Feature_DF", "def PP_SPF_KNN_PFL_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_KNN_PFL']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_KNN_PFL'])\n Feature_DF.loc[:,'PP_SPF_KNN_PFL_TRS'] = Feature_DF.loc[:,'PP_SPF_KNN_PFL'].apply(lambda x : (1+x-min_value)**(5/2))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_KNN_PFL_TRS']]\n\n return Feature_DF", "def PP_PAF_STL_AVG_PFL_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_PAF_STL_AVG_PFL']]\n min_value = min(Feature_DF.loc[:,'PP_PAF_STL_AVG_PFL'])\n Feature_DF.loc[:,'PP_PAF_STL_AVG_PFL_TRS'] = Feature_DF.loc[:,'PP_PAF_STL_AVG_PFL'].apply(lambda x : (1+x-min_value)**(9/4))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_PAF_STL_AVG_PFL_TRS']]\n\n return Feature_DF", "def PP_PAF_FP_AVG_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_PAF_FP_AVG']]\n min_value = min(Feature_DF.loc[:,'PP_PAF_FP_AVG'])\n Feature_DF.loc[:,'PP_PAF_FP_AVG_TRS'] = Feature_DF.loc[:,'PP_PAF_FP_AVG'].apply(lambda x : (1+x-min_value)**(5/2))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_PAF_FP_AVG_TRS']]\n\n return Feature_DF", "def PP_SPF_AVG_SUR_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_AVG_SUR']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_AVG_SUR'])\n Feature_DF.loc[:,'PP_SPF_AVG_SUR_TRS'] = Feature_DF.loc[:,'PP_SPF_AVG_SUR'].apply(lambda x : (1+x-min_value)**(8/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_AVG_SUR_TRS']]\n\n return Feature_DF", "def fScore(cMatrix):\n if precision(cMatrix) + recall(cMatrix) == 0:\n return precision(cMatrix)\n else:\n return 2 * precision(cMatrix) * recall(cMatrix) / (precision(cMatrix) + recall(cMatrix))", "def PP_SPF_AVGRW_PFL_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_AVGRW_PFL']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_AVGRW_PFL'])\n Feature_DF.loc[:,'PP_SPF_AVGRW_PFL_TRS'] = Feature_DF.loc[:,'PP_SPF_AVGRW_PFL'].apply(lambda x : (1+x-min_value)**(9/5))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_AVGRW_PFL_TRS']]\n\n return Feature_DF", "def score( self ):\r\n result = 0.0\r\n for rr in self.ee.getRsrcs( ):\r\n value = self.scoreRsrc( rr )\r\n result += value\r\n print( \"INFO: Value for the schedule is %s \" % ( rr, result ) )\r\n return( result )", "def score(df, tmo, label):\n\n\tif str(type(tmo)) != \"<class 'sklearn.ensemble._forest.RandomForestRegressor'>\":\n\t\traise TypeError('Wrong model type!')\n\n\tX_test = df.loc[:, df.columns != label]\n\t\n\t# predict on test data\n\ty_pred = tmo.predict(X_test)\n\tdf['predict'] = y_pred\n\n\treturn df", "def PP_SPF_AVG_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_AVG']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_AVG'])\n Feature_DF.loc[:,'PP_SPF_AVG_TRS'] = Feature_DF.loc[:,'PP_SPF_AVG'].apply(lambda x : (1+x-min_value)**(8/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_AVG_TRS']]\n\n return Feature_DF", "def PP_FH_FP_AVG_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_FH_FP_AVG']]\n min_value = min(Feature_DF.loc[:,'PP_FH_FP_AVG'])\n Feature_DF.loc[:,'PP_FH_FP_AVG_TRS'] = Feature_DF.loc[:,'PP_FH_FP_AVG'].apply(lambda x : (1+x-min_value)**(-1/4))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_FH_FP_AVG_TRS']]\n\n return Feature_DF", "def PP_SPF_AVG_SIM_DIST_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_AVG_SIM_DIST']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_AVG_SIM_DIST'])\n Feature_DF.loc[:,'PP_SPF_AVG_SIM_DIST_TRS'] = Feature_DF.loc[:,'PP_SPF_AVG_SIM_DIST'].apply(lambda x : (1+x-min_value)**(5/2))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_AVG_SIM_DIST_TRS']]\n\n return Feature_DF", "def PP_FH_FP_PFL_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_FH_FP_PFL']]\n min_value = min(Feature_DF.loc[:,'PP_FH_FP_PFL'])\n Feature_DF.loc[:,'PP_FH_FP_PFL_TRS'] = Feature_DF.loc[:,'PP_FH_FP_PFL'].apply(lambda x : (1+x-min_value)**(3/4))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_FH_FP_PFL_TRS']]\n\n return Feature_DF", "def computeFScores(self, targetLabels, actualLabels):\r\n if self.prMeasures is None:\r\n self.prMeasures = self.computePRMeasures(targetLabels, actualLabels)\r\n if self.prMeasures[0] == 0:\r\n return 0\r\n self.f1score = 2 * self.prMeasures[0] * self.prMeasures[1] / (0.0 + self.prMeasures[0] + self.prMeasures[1])\r\n return self.f1score", "def PP_PAF_BEST_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_PAF_BEST']]\n min_value = min(Feature_DF.loc[:,'PP_PAF_BEST'])\n Feature_DF.loc[:,'PP_PAF_BEST_TRS'] = Feature_DF.loc[:,'PP_PAF_BEST'].apply(lambda x : (1+x-min_value)**(9/5))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_PAF_BEST_TRS']]\n\n return Feature_DF", "def PP_SPF_TOP_PFL_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_TOP_PFL']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_TOP_PFL'])\n Feature_DF.loc[:,'PP_SPF_TOP_PFL_TRS'] = Feature_DF.loc[:,'PP_SPF_TOP_PFL'].apply(lambda x : (1+x-min_value)**(5/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_TOP_PFL_TRS']]\n\n return Feature_DF", "def raw_score(self,X,Y):\n return self.rf.score(X,Y)", "def OD_PR_LPAVG_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','OD_PR_LPAVG']]\n min_value = min(Feature_DF.loc[:,'OD_PR_LPAVG'])\n Feature_DF.loc[:,'OD_PR_LPAVG_TRS'] = Feature_DF.loc[:,'OD_PR_LPAVG'].apply(lambda x : (1+x-min_value)**(9/5))\n Feature_DF = Feature_DF.loc[:,['HNAME','OD_PR_LPAVG_TRS']]\n\n return Feature_DF", "def RC_SUR_SPRE_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_SUR_SPRE']]\n min_value = min(Feature_DF.loc[:,'RC_SUR_SPRE'])\n Feature_DF.loc[:,'RC_SUR_SPRE_TRS'] = Feature_DF.loc[:,'RC_SUR_SPRE'].apply(lambda x : (1+x-min_value)**(6/5))\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_SUR_SPRE_TRS']]\n\n return Feature_DF", "def RC_SUR_JPRE_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_SUR_JPRE']]\n min_value = min(Feature_DF.loc[:,'RC_SUR_JPRE'])\n Feature_DF.loc[:,'RC_SUR_JPRE_TRS'] = Feature_DF.loc[:,'RC_SUR_JPRE'].apply(lambda x : (1+x-min_value)**(4/5))\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_SUR_JPRE_TRS']]\n\n return Feature_DF", "def PP_SPF_AVG_DIST_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_AVG_DIST']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_AVG_DIST'])\n Feature_DF.loc[:,'PP_SPF_AVG_DIST_TRS'] = Feature_DF.loc[:,'PP_SPF_AVG_DIST'].apply(lambda x : (1+x-min_value)**(5/2))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_AVG_DIST_TRS']]\n\n return Feature_DF", "def evaluate(self, predicted_df):\n logging.info(\"Evaluating model: {}\".format(self.model_type))\n y_true = predicted_df[\"user_label\"].as_matrix()\n y_pred = predicted_df[\"label\"].as_matrix()\n\n scores_cols = [col for col in predicted_df.columns if col.startswith(\"scores_\")]\n print(\"scores_cols: {}\".format(scores_cols))\n\n y_pred_scores = predicted_df[scores_cols].copy().fillna(value=0).as_matrix()\n print(\"predicted scores: {}\".format(y_pred_scores))\n y_true_scores = []\n for lab in predicted_df[\"user_label\"]:\n trues = [0 for _ in range(len(scores_cols))]\n if \"scores_\"+lab in scores_cols:\n trues[scores_cols.index(\"scores_\"+lab)] = 1\n y_true_scores.append(trues)\n print(\"true scores: {}\".format(y_true_scores))\n y_true_scores = np.array(y_true_scores)\n\n performance = {\"model\": self.model_type, \"description\": self.description}\n if 'categorical_accuracy' in self.metrics:\n logging.info(\"Calculating categorical accuracy for {}\".format(self))\n performance['categorical_accuracy'] = sklearn.metrics.accuracy_score(y_true,\n y_pred) # np.mean(y_pred == y_true)\n if 'fmeasure' in self.metrics:\n logging.info(\"Calculating fmeasure for {}\".format(self))\n performance['fmeasure'] = sklearn.metrics.f1_score(y_true, y_pred, average=self.metrics_average)\n if 'MRR' in self.metrics:\n logging.info(\"Calculating MRR for {}\".format(self))\n performance['MRR'] = sklearn.metrics.label_ranking_average_precision_score(y_true_scores, y_pred_scores)\n logging.info(\"Calculated performance: {}\".format(performance))\n print(performance)\n return pd.DataFrame(performance, index=[0])", "def get_score(predictions_df: pd.DataFrame,\n real_df: pd.DataFrame,\n k: int = 5) -> float:\n y_pred = predictions_df.genres.apply(lambda x: x.split(\" \"))\n y_real = real_df.genres.apply(lambda x: x.split(\" \"))\n return mapk(y_real, y_pred, k)", "def PP_BL_AVGF_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_BL_AVGF']]\n min_value = min(Feature_DF.loc[:,'PP_BL_AVGF'])\n Feature_DF.loc[:,'PP_BL_AVGF_TRS'] = Feature_DF.loc[:,'PP_BL_AVGF'].apply(lambda x : (1+x-min_value)**(7/2))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_BL_AVGF_TRS']]\n\n return Feature_DF", "def PP_PAF_AP_AVG_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_PAF_AP_AVG']]\n min_value = min(Feature_DF.loc[:,'PP_PAF_AP_AVG'])\n Feature_DF.loc[:,'PP_PAF_AP_AVG_TRS'] = Feature_DF.loc[:,'PP_PAF_AP_AVG'].apply(lambda x : (1+x-min_value)**(8/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_PAF_AP_AVG_TRS']]\n\n return Feature_DF", "def RC_DIST_SPRE_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_DIST_SPRE']]\n min_value = min(Feature_DF.loc[:,'RC_DIST_SPRE'])\n Feature_DF.loc[:,'RC_DIST_SPRE_TRS'] = Feature_DF.loc[:,'RC_DIST_SPRE'].apply(lambda x : (1+x-min_value)**(2/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_DIST_SPRE_TRS']]\n\n return Feature_DF", "def RC_SUR_HPRE_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_SUR_HPRE']]\n min_value = min(Feature_DF.loc[:,'RC_SUR_HPRE'])\n Feature_DF.loc[:,'RC_SUR_HPRE_TRS'] = Feature_DF.loc[:,'RC_SUR_HPRE'].apply(lambda x : (1+x-min_value)**(5/6))\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_SUR_HPRE_TRS']]\n\n return Feature_DF", "def PP_PAF_SP_AVG_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_PAF_SP_AVG']]\n min_value = min(Feature_DF.loc[:,'PP_PAF_SP_AVG'])\n Feature_DF.loc[:,'PP_PAF_SP_AVG_TRS'] = Feature_DF.loc[:,'PP_PAF_SP_AVG'].apply(lambda x : (1+x-min_value)**(8/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_PAF_SP_AVG_TRS']]\n\n return Feature_DF", "def PP_SPF_AVG_GO_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_AVG_GO']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_AVG_GO'])\n Feature_DF.loc[:,'PP_SPF_AVG_GO_TRS'] = Feature_DF.loc[:,'PP_SPF_AVG_GO'].apply(lambda x : (1+x-min_value)**(8/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_AVG_GO_TRS']]\n\n return Feature_DF", "def run_score(df, tmo_path, score_path, label):\n\n\ttry:\n\t\t# load the model\n\t\twith open(tmo_path, 'rb') as f:\n\t\t\ttmo = pickle.load(f)\n\texcept OSError:\n\t\tlogger.error(\"Cannot open %s\", tmo_path)\n\texcept Exception as e:\n\t\tlogger.error(e)\n\n\tlogger.info(\"Scoring the trained model...\")\n\n\tdata = score(df, tmo, label)\n\n\t# write score results\n\tdata.to_csv(score_path, index=False)\n\tlogger.info('Model scoring results saved to %s', score_path)", "def RC_GO_JPRE_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_GO_JPRE']]\n min_value = min(Feature_DF.loc[:,'RC_GO_JPRE'])\n Feature_DF.loc[:,'RC_GO_JPRE_TRS'] = Feature_DF.loc[:,'RC_GO_JPRE'].apply(lambda x : (1+x-min_value)**(9/5))\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_GO_JPRE_TRS']]\n\n return Feature_DF", "def PP_BL_AVG_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_BL_AVG']]\n min_value = min(Feature_DF.loc[:,'PP_BL_AVG'])\n Feature_DF.loc[:,'PP_BL_AVG_TRS'] = Feature_DF.loc[:,'PP_BL_AVG'].apply(lambda x : (1+x-min_value)**(-3/4))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_BL_AVG_TRS']]\n\n return Feature_DF", "def get_r_score(self):\n return self.r_score", "def PP_PAF_STL_B_PFL_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_PAF_STL_B_PFL']]\n min_value = min(Feature_DF.loc[:,'PP_PAF_STL_B_PFL'])\n Feature_DF.loc[:,'PP_PAF_STL_B_PFL_TRS'] = Feature_DF.loc[:,'PP_PAF_STL_B_PFL'].apply(lambda x : (1+x-min_value)**(9/4))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_PAF_STL_B_PFL_TRS']]\n\n return Feature_DF", "def RC_LOC_JPRE_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_LOC_JPRE']]\n min_value = min(Feature_DF.loc[:,'RC_LOC_JPRE'])\n Feature_DF.loc[:,'RC_LOC_JPRE_TRS'] = Feature_DF.loc[:,'RC_LOC_JPRE'].apply(lambda x : (1+x-min_value)**(5/6))\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_LOC_JPRE_TRS']]\n\n return Feature_DF", "def RC_LOC_SPRE_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_LOC_SPRE']]\n min_value = min(Feature_DF.loc[:,'RC_LOC_SPRE'])\n Feature_DF.loc[:,'RC_LOC_SPRE_TRS'] = Feature_DF.loc[:,'RC_LOC_SPRE'].apply(lambda x : (1+x-min_value)**(2/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_LOC_SPRE_TRS']]\n\n return Feature_DF", "def PP_BL_AVGF_SUR_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_BL_AVGF_SUR']]\n min_value = min(Feature_DF.loc[:,'PP_BL_AVGF_SUR'])\n Feature_DF.loc[:,'PP_BL_AVGF_SUR_TRS'] = Feature_DF.loc[:,'PP_BL_AVGF_SUR'].apply(lambda x : (1+x-min_value)**(-9/2))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_BL_AVGF_SUR_TRS']]\n\n return Feature_DF", "def RC_DIST_JPRE_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_DIST_JPRE']]\n min_value = min(Feature_DF.loc[:,'RC_DIST_JPRE'])\n Feature_DF.loc[:,'RC_DIST_JPRE_TRS'] = Feature_DF.loc[:,'RC_DIST_JPRE'].apply(lambda x : (1+x-min_value)**(1/2))\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_DIST_JPRE_TRS']]\n\n return Feature_DF", "def PP_PAF_FP_AVGRW_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_PAF_FP_AVGRW']]\n min_value = min(Feature_DF.loc[:,'PP_PAF_FP_AVGRW'])\n Feature_DF.loc[:,'PP_PAF_FP_AVGRW_TRS'] = Feature_DF.loc[:,'PP_PAF_FP_AVGRW'].apply(lambda x : (1+x-min_value)**(5/2))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_PAF_FP_AVGRW_TRS']]\n\n return Feature_DF", "def RC_DIST_HPRE_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_DIST_HPRE']]\n min_value = min(Feature_DF.loc[:,'RC_DIST_HPRE'])\n Feature_DF.loc[:,'RC_DIST_HPRE_TRS'] = Feature_DF.loc[:,'RC_DIST_HPRE'].apply(lambda x : (1+x-min_value)**(9/10))\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_DIST_HPRE_TRS']]\n\n return Feature_DF", "def RC_GO_SPRE_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_GO_SPRE']]\n min_value = min(Feature_DF.loc[:,'RC_GO_SPRE'])\n Feature_DF.loc[:,'RC_GO_SPRE_TRS'] = Feature_DF.loc[:,'RC_GO_SPRE'].apply(lambda x : (1+x-min_value)**(9/10))\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_GO_SPRE_TRS']]\n\n return Feature_DF", "def PredictRunwayRMSE(DF, predictor, input_func, name=None):\r\n if name:\r\n print 'Predicted win for %s:' % name\r\n weighted1 = 0\r\n weighted2 = 0\r\n total_weight = 0\r\n for df in DF:\r\n series_in = input_func(df)\r\n assert all(len(s) == len(series_in[0]) for s in series_in)\r\n X, _ = Transform(series_in, [])\r\n df['prediction'] = predictor.predict(X)\r\n assert len(df.prediction) == len(series_in[0]), (len(df.prediction), len(series_in[0]))\r\n\r\n filter_runway = (df.actual_runway_arrival < df.actual_gate_arrival)\r\n golden_runway = df.actual_runway_arrival[filter_runway]\r\n r1 = util.RMSE(golden_runway, df.last_era_update[filter_runway])\r\n r2 = util.RMSE(golden_runway, (df.last_era_update + df.prediction)[filter_runway])\r\n w = len(df.last_era_update[filter_runway])\r\n weighted1 += r1 * w\r\n weighted2 += r2 * w\r\n total_weight += w\r\n #print 'Runway: %.2f' % (r1 - r2)\r\n\r\n weighted_score = ((weighted1 - weighted2) / total_weight)\r\n print 'Weighted: %.4f' % weighted_score\r\n return weighted_score", "def PP_FH_FP_BIN_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_FH_FP_BIN']]\n min_value = min(Feature_DF.loc[:,'PP_FH_FP_BIN'])\n Feature_DF.loc[:,'PP_FH_FP_BIN_TRS'] = Feature_DF.loc[:,'PP_FH_FP_BIN'].apply(lambda x : (1+x-min_value)**(3/2))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_FH_FP_BIN_TRS']]\n\n return Feature_DF", "def PP_FH_FP_AVGRW_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_FH_FP_AVGRW']]\n min_value = min(Feature_DF.loc[:,'PP_FH_FP_AVGRW'])\n Feature_DF.loc[:,'PP_FH_FP_AVGRW_TRS'] = Feature_DF.loc[:,'PP_FH_FP_AVGRW'].apply(lambda x : (1+x-min_value)**(9/10))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_FH_FP_AVGRW_TRS']]\n\n return Feature_DF", "def RC_SUR_AVG_SUR_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_SUR_AVG_SUR']]\n Feature_DF.loc[:,'RC_SUR_AVG_SUR_TRS'] = Feature_DF.loc[:,'RC_SUR_AVG_SUR'].pow(4)\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_SUR_AVG_SUR_TRS']]\n\n return Feature_DF", "def RC_GO_AVG_GO_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_GO_AVG_GO']]\n min_value = min(Feature_DF.loc[:,'RC_GO_AVG_GO'])\n Feature_DF.loc[:,'RC_GO_AVG_GO_TRS'] = Feature_DF.loc[:,'RC_GO_AVG_GO'].apply(lambda x : (1+x-min_value)**(5/4))\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_GO_AVG_GO_TRS']]\n\n return Feature_DF", "def PP_SPF_TOP_SUR_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_TOP_SUR']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_TOP_SUR'])\n Feature_DF.loc[:,'PP_SPF_TOP_SUR_TRS'] = Feature_DF.loc[:,'PP_SPF_TOP_SUR'].apply(lambda x : (1+x-min_value)**(9/4))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_TOP_SUR_TRS']]\n\n return Feature_DF", "def JS_S_FPRW_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','JS_S_FPRW']]\n min_value = min(Feature_DF.loc[:,'JS_S_FPRW'])\n Feature_DF.loc[:,'JS_S_FPRW_TRS'] = Feature_DF.loc[:,'JS_S_FPRW'].apply(lambda x : (1+x-min_value)**(3/2))\n Feature_DF = Feature_DF.loc[:,['HNAME','JS_S_FPRW_TRS']]\n\n return Feature_DF", "def PP_SPF_AVGRW_SUR_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_AVGRW_SUR']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_AVGRW_SUR'])\n Feature_DF.loc[:,'PP_SPF_AVGRW_SUR_TRS'] = Feature_DF.loc[:,'PP_SPF_AVGRW_SUR'].apply(lambda x : (1+x-min_value)**(8/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_AVGRW_SUR_TRS']]\n\n return Feature_DF", "def JS_J_FPRW_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','JS_J_FPRW']]\n min_value = min(Feature_DF.loc[:,'JS_J_FPRW'])\n Feature_DF.loc[:,'JS_J_FPRW_TRS'] = Feature_DF.loc[:,'JS_J_FPRW'].apply(lambda x : (1+x-min_value)**(5/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','JS_J_FPRW_TRS']]\n\n return Feature_DF", "def PP_FH_FP_SUR_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_FH_FP_SUR']]\n min_value = min(Feature_DF.loc[:,'PP_FH_FP_SUR'])\n Feature_DF.loc[:,'PP_FH_FP_SUR_TRS'] = Feature_DF.loc[:,'PP_FH_FP_SUR'].apply(lambda x : (1+x-min_value)**(-2/9))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_FH_FP_SUR_TRS']]\n\n return Feature_DF", "def fit_rf_model(df, X_train, y_train, X_test, y_test, mask_test):\n print (\"**** RANDOM FOREST Grid Search ****\")\n random_forest_grid = {'max_depth': [3, None],\n 'max_features': ['sqrt', 'log2', round(X_train.shape[1]/3), None],\n 'min_samples_split': [2, 4],\n 'min_samples_leaf': [1, 2, 4],\n 'bootstrap': [True, False],\n 'n_estimators': [100,300,500],\n 'random_state': [10]}\n\n rf_gridsearch = GridSearchCV(RandomForestRegressor(),\n random_forest_grid,\n n_jobs=-1,\n verbose=True,\n scoring='neg_mean_squared_error')\n rf_gridsearch.fit(X_train, y_train)\n print(\"Best Parameters:\", rf_gridsearch.best_params_)\n print(' ')\n\n best_rf_model = rf_gridsearch.best_estimator_\n\n feature_importance = {}\n for label, importance in zip(X_train.columns, best_rf_model.feature_importances_):\n feature_importance[label] = importance\n print(\"Sorted Feature Importance:\")\n sorted_feature_imp = sorted(feature_importance.items(), key=lambda x: (-x[1]))\n for e in sorted_feature_imp:\n print(e)\n\n y_pred_test = best_rf_model.predict(X_test)\n df_test = pd.concat([df[mask_test][['player','wkts','year1_wkts_pm']].reset_index(),\n pd.DataFrame(y_pred_test).reset_index()],axis=1,)\n df_test = df_test.drop('index',axis=1)\n df_test.columns = ['player','wkts','wkts_baseline','wkts_exp']\n\n df_by_player = df_test.groupby('player').sum()\n\n print(' ')\n print('Explained Variance (RF model): ' + str(explained_variance_score(df_by_player.wkts,df_by_player.wkts_exp)))\n print('Explained Variance (Baseline): ' + str(explained_variance_score(df_by_player.wkts,df_by_player.wkts_baseline)))\n print('----')\n print('Mean Squared Error (RF model): ' + str(mean_squared_error(df_by_player.wkts,df_by_player.wkts_exp)))\n print('Mean Squared Error (Baseline): ' + str(mean_squared_error(df_by_player.wkts,df_by_player.wkts_baseline)))\n print('----')\n print(' ')", "def PP_PAF_EDL_PFL_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_PAF_EDL_PFL']]\n min_value = min(Feature_DF.loc[:,'PP_PAF_EDL_PFL'])\n Feature_DF.loc[:,'PP_PAF_EDL_PFL_TRS'] = Feature_DF.loc[:,'PP_PAF_EDL_PFL'].apply(lambda x : (1+x-min_value)**(-7/5))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_PAF_EDL_PFL_TRS']]\n\n return Feature_DF", "def PP_SPF_L2_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_L2']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_L2'])\n Feature_DF.loc[:,'PP_SPF_L2_TRS'] = Feature_DF.loc[:,'PP_SPF_L2'].apply(lambda x : (1+x-min_value)**(7/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_L2_TRS']]\n\n return Feature_DF", "def PP_SPF_TOP_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_TOP']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_TOP'])\n Feature_DF.loc[:,'PP_SPF_TOP_TRS'] = Feature_DF.loc[:,'PP_SPF_TOP'].apply(lambda x : (1+x-min_value)**(9/5))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_TOP_TRS']]\n\n return Feature_DF", "def RC_PP_JPRE_JPFL_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_PP_JPRE_JPFL']]\n Feature_DF.loc[:,'RC_PP_JPRE_JPFL_TRS'] = Feature_DF.loc[:,'RC_PP_JPRE_JPFL'].apply(lambda x : (1+x)**(-1/5))\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_PP_JPRE_JPFL_TRS']]\n\n return Feature_DF", "def evaluate(self):\n self.df['Score'] = self.df[self.review_col].apply(self.analyzer)\n\n return self.df", "def PP_SPF_AVGRW_SIM_DIST_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_AVGRW_SIM_DIST']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_AVGRW_SIM_DIST'])\n Feature_DF.loc[:,'PP_SPF_AVGRW_SIM_DIST_TRS'] = Feature_DF.loc[:,'PP_SPF_AVGRW_SIM_DIST'].apply(lambda x : (1+x-min_value)**(7/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_AVGRW_SIM_DIST_TRS']]\n\n return Feature_DF", "def performance_matrix(df):\r\n total = df.count()\r\n nP = df.filter((F.col('prediction') == 1)).count()\r\n nN = df.filter((F.col('prediction') == 0)).count()\r\n TP = df.filter((F.col('prediction') == 1) & (F.col('label') == 1)).count()\r\n FP = df.filter((F.col('prediction') == 1) & (F.col('label') == 0)).count()\r\n FN = df.filter((F.col('prediction') == 0) & (F.col('label') == 1)).count()\r\n TN = df.filter((F.col('prediction') == 0) & (F.col('label') == 0)).count()\r\n \r\n print('num positive: {}'.format(nP))\r\n print('num negative: {}'.format(nN))\r\n print(\"True Positives:\", TP)\r\n print(\"True Negatives:\", TN)\r\n print(\"False Positives:\", FP)\r\n print(\"False Negatives:\", FN)\r\n print('accuracy: {}'.format((TP + TN) / total))\r\n \r\n if TP == 0:\r\n print(\"Precision: 0\")\r\n print(\"Recall: 0\")\r\n \r\n else:\r\n print('recall: {}'.format(TP / (TP + FN)))\r\n print('precision: {}'.format(TP / (TP + FP)))", "def evalBaseline(self, df = None):\n \n if (df is None):\n self.r_b = self.df.merge(self.df_user[[\"user ind\", \"b_u\"]], on = \"user ind\")\n self.r_b = self.r_b.merge(self.df_item[[\"item ind\", \"b_i\"]], on = \"item ind\")\n self.r_b[\"baseline\"] = self.r_mean + self.r_b[\"b_u\"] + self.r_b[\"b_i\"]\n \n \n return self.r_b[[\"user id\", \"item id\", \"baseline\"]]\n \n else:\n df = df.merge(self.df_user, on = \"user id\").merge(self.df_item, on = \"item id\")\n df[\"baseline\"] = self.r_mean + df[\"b_u\"] + df[\"b_i\"]\n \n # clip the score to the interval [1, 5]\n df[\"baseline\"] = np.minimum(np.maximum(df[\"baseline\"], 1), 5)\n \n return df[[\"user id\", \"item id\", \"baseline\"]]", "def _F_performance(self, score, thresholds, condition):\r\n if type(thresholds) is float:\r\n thresholds = self._compute_thresholds(thresholds)\r\n F = np.zeros(shape=(1, len(thresholds)))\r\n impostors = 0\r\n L = len(score)\r\n for count, thr in enumerate(thresholds):\r\n N = 0\r\n for idx in range(0, L):\r\n N += condition(score[idx], thr)\r\n F[0, count] = N / L\r\n return F[0]", "def RC_DIST_AVG_DIST_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_DIST_AVG_DIST']]\n Feature_DF.loc[:,'RC_DIST_AVG_DIST_TRS'] = Feature_DF.loc[:,'RC_DIST_AVG_DIST'].apply(lambda x : (1+x)**(-5/6))\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_DIST_AVG_DIST_TRS']]\n\n return Feature_DF", "def scoreRsrc( self, rr ):\r\n result = 0.0\r\n for tt in self.getSched( )[rr.getid( )]:\r\n for se in tt:\r\n result += 1\r\n print( \"INFO: Value for %s: %s \" % ( rr, result ) )\r\n return( result )", "def PP_SPF_D_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_D']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_D'])\n Feature_DF.loc[:,'PP_SPF_D_TRS'] = Feature_DF.loc[:,'PP_SPF_D'].apply(lambda x : (1+x-min_value)**(1/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_D_TRS']]\n\n return Feature_DF", "def PP_SPF_AVGRW_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_AVGRW']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_AVGRW'])\n Feature_DF.loc[:,'PP_SPF_AVGRW_TRS'] = Feature_DF.loc[:,'PP_SPF_AVGRW'].apply(lambda x : (1+x-min_value)**(8/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_AVGRW_TRS']]\n\n return Feature_DF", "def _chrf_score_compute(total_preds_char_n_grams: Dict[int, Tensor], total_preds_word_n_grams: Dict[int, Tensor], total_target_char_n_grams: Dict[int, Tensor], total_target_word_n_grams: Dict[int, Tensor], total_matching_char_n_grams: Dict[int, Tensor], total_matching_word_n_grams: Dict[int, Tensor], n_order: float, beta: float) ->Tensor:\n chrf_f_score = _calculate_fscore(total_matching_char_n_grams, total_matching_word_n_grams, total_preds_char_n_grams, total_preds_word_n_grams, total_target_char_n_grams, total_target_word_n_grams, n_order, beta)\n return chrf_f_score", "def score_sc1(self, prediction_file):\n fh = TempFile()\n gs1, _ = self.download_gs()\n script = self.classpath + os.sep + \"DREAM_Olfaction_scoring_Q1.pl\"\n cmd = \"perl %s %s %s %s\"\n cmd = cmd % (script, prediction_file, fh.name, gs1)\n shellcmd(cmd)\n df = pd.read_csv(fh.name, sep='\\t', index_col=None).ix[0]\n fh.delete()\n return df\n\n\n # score sub1 = (zint +zple +zdec)/3\n # sigma_int = 0.0787\n # sigma_ple = 0.176\n # signa_dec = 0.0042\n\n # final is average of zscores", "def RC_PP_PFLA_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_PP_PFLA']]\n Feature_DF.loc[:,'RC_PP_PFLA_TRS'] = Feature_DF.loc[:,'RC_PP_PFLA'].pow(1/10)\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_PP_PFLA_TRS']]\n\n return Feature_DF", "def PP_PAF_AP_AVGRW_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_PAF_AP_AVGRW']]\n min_value = min(Feature_DF.loc[:,'PP_PAF_AP_AVGRW'])\n Feature_DF.loc[:,'PP_PAF_AP_AVGRW_TRS'] = Feature_DF.loc[:,'PP_PAF_AP_AVGRW'].apply(lambda x : (1+x-min_value)**(8/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_PAF_AP_AVGRW_TRS']]\n\n return Feature_DF", "def PP_FH_FP_SIM_DIST_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_FH_FP_SIM_DIST']]\n min_value = min(Feature_DF.loc[:,'PP_FH_FP_SIM_DIST'])\n Feature_DF.loc[:,'PP_FH_FP_SIM_DIST_TRS'] = Feature_DF.loc[:,'PP_FH_FP_SIM_DIST'].apply(lambda x : (1+x-min_value)**(4/9))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_FH_FP_SIM_DIST_TRS']]\n\n return Feature_DF", "def PP_PAF_BEST_GOPFL_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_PAF_BEST_GOPFL']]\n Feature_DF.loc[:,'PP_PAF_BEST_GOPFL_TRS'] = Feature_DF.loc[:,'PP_PAF_BEST_GOPFL'].pow(4/5)\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_PAF_BEST_GOPFL_TRS']]\n\n return Feature_DF", "def PP_PAF_SP_AVGRW_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_PAF_SP_AVGRW']]\n min_value = min(Feature_DF.loc[:,'PP_PAF_SP_AVGRW'])\n Feature_DF.loc[:,'PP_PAF_SP_AVGRW_TRS'] = Feature_DF.loc[:,'PP_PAF_SP_AVGRW'].apply(lambda x : (1+x-min_value)**(8/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_PAF_SP_AVGRW_TRS']]\n\n return Feature_DF", "def PP_SPF_L1_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_L1']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_L1'])\n Feature_DF.loc[:,'PP_SPF_L1_TRS'] = Feature_DF.loc[:,'PP_SPF_L1'].apply(lambda x : (1+x-min_value)**(8/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_L1_TRS']]\n\n return Feature_DF", "def PP_SPF_TOP_SIM_DIST_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_TOP_SIM_DIST']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_TOP_SIM_DIST'])\n Feature_DF.loc[:,'PP_SPF_TOP_SIM_DIST_TRS'] = Feature_DF.loc[:,'PP_SPF_TOP_SIM_DIST'].apply(lambda x : (1+x-min_value)**(9/4))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_TOP_SIM_DIST_TRS']]\n\n return Feature_DF", "def score(self, ref_im):\n \n if isinstance(self.res, type(None)):\n raise Exception('Result is not yet aviable.')\n \n shift = register_translation(ref_im, self.res)[0]\n shifted_res = fourier_shift(np.fft.fft2(self.res), shift)\n shifted_res = np.real(np.fft.ifft2(shifted_res))\n \n mse = np.linalg.norm(shifted_res - ref_im)\n drange = np.max(shifted_res) - np.min(shifted_res)\n ssim = compare_ssim(ref_im, shifted_res, data_range=drange)\n \n return mse, ssim", "def get_tuned_f1(results_df):\n df = results_df\n scores = []\n f1s = []\n lambdas = np.arange(.5, 5, .05)\n\n def add_weighted(df, lam):\n \"\"\" Calculates different weighted PMI values after already having mut_inf scores \"\"\"\n df['mut_inf_weighted'] = df.mut_inf + (lam - 1) * (df.head_conditional + df.tail_conditional) / 2.\n\n for lam in lambdas:\n ss = StandardScaler()\n add_weighted(df, lam=lam)\n model = GaussianMixture(2, n_init=1)\n dat = ss.fit_transform(df[['mut_inf_weighted']])\n pred = model.fit_predict(dat)\n score = model.aic(dat)\n f1 = f1_score((model.means_.argmax() == df.label), pred)\n scores.append(score)\n f1s.append(f1)\n\n scores = np.array(scores)\n f1s = np.array(f1s)\n lam = lambdas[scores.argmax()]\n\n optimal_lambda = lambdas[scores.argmax()]\n optimal_f1 = f1s[scores.argmax()]\n return optimal_f1, optimal_lambda", "def PP_SPF_AVGRW_DIST_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_AVGRW_DIST']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_AVGRW_DIST'])\n Feature_DF.loc[:,'PP_SPF_AVGRW_DIST_TRS'] = Feature_DF.loc[:,'PP_SPF_AVGRW_DIST'].apply(lambda x : (1+x-min_value)**(5/2))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_AVGRW_DIST_TRS']]\n\n return Feature_DF", "def PP_SPF_SEC_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_SEC']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_SEC'])\n Feature_DF.loc[:,'PP_SPF_SEC_TRS'] = Feature_DF.loc[:,'PP_SPF_SEC'].apply(lambda x : (1+x-min_value)**(8/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_SEC_TRS']]\n\n return Feature_DF", "def PP_SPF_TOP_GO_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_TOP_GO']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_TOP_GO'])\n Feature_DF.loc[:,'PP_SPF_TOP_GO_TRS'] = Feature_DF.loc[:,'PP_SPF_TOP_GO'].apply(lambda x : (1+x-min_value)**(5/2))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_TOP_GO_TRS']]\n\n return Feature_DF", "def PP_SPF_AVGRW_GO_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_AVGRW_GO']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_AVGRW_GO'])\n Feature_DF.loc[:,'PP_SPF_AVGRW_GO_TRS'] = Feature_DF.loc[:,'PP_SPF_AVGRW_GO'].apply(lambda x : (1+x-min_value)**(8/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_AVGRW_GO_TRS']]\n\n return Feature_DF", "def PP_FH_FP_GO_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_FH_FP_GO']]\n min_value = min(Feature_DF.loc[:,'PP_FH_FP_GO'])\n Feature_DF.loc[:,'PP_FH_FP_GO_TRS'] = Feature_DF.loc[:,'PP_FH_FP_GO'].apply(lambda x : (1+x-min_value)**(-2/9))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_FH_FP_GO_TRS']]\n\n return Feature_DF", "def PP_SPF_D1_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_D1']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_D1'])\n Feature_DF.loc[:,'PP_SPF_D1_TRS'] = Feature_DF.loc[:,'PP_SPF_D1'].apply(lambda x : (1+x-min_value)**(7/6))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_D1_TRS']]\n\n return Feature_DF", "def PP_FH_FP_DIST_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_FH_FP_DIST']]\n min_value = min(Feature_DF.loc[:,'PP_FH_FP_DIST'])\n Feature_DF.loc[:,'PP_FH_FP_DIST_TRS'] = Feature_DF.loc[:,'PP_FH_FP_DIST'].apply(lambda x : (1+x-min_value)**(1/5))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_FH_FP_DIST_TRS']]\n\n return Feature_DF", "def f1_score(model_id, test_set_id, rubric_id):\n result = {'true_positive': 0, 'false_positive': 0, 'true_negative': 0, 'false_negative': 0}\n # right answers\n answers = db.get_rubric_answers(test_set_id, rubric_id)\n # rubrication results\n rubrication_result = db.get_rubrication_result(model_id, test_set_id, rubric_id)\n\n for key in rubrication_result:\n if rubrication_result[key] == answers[key]:\n if rubrication_result[key] == 1:\n result['true_positive'] += 1\n else:\n result['true_negative'] += 1\n else:\n if rubrication_result[key] == 1:\n result['false_positive'] += 1\n else:\n result['false_negative'] += 1\n if (result['true_positive'] + result['false_positive']) > 0:\n result['precision'] = result['true_positive'] / (result['true_positive'] + result['false_positive'])\n else:\n result['precision'] = 0\n if (result['true_positive'] + result['false_negative']) > 0:\n result['recall'] = result['true_positive'] / (result['true_positive'] + result['false_negative'])\n else:\n result['recall'] = 0\n if (result['precision'] + result['recall']) > 0:\n result['f1'] = 2 * result['precision'] * result['recall'] / (result['precision'] + result['recall'])\n else:\n result['f1'] = 0\n return result", "def JS_J_HJ_SPAVG_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','JS_J_HJ_SPAVG']]\n min_value = min(Feature_DF.loc[:,'JS_J_HJ_SPAVG'])\n Feature_DF.loc[:,'JS_J_HJ_SPAVG_TRS'] = Feature_DF.loc[:,'JS_J_HJ_SPAVG'].apply(lambda x : (1+x-min_value)**(-9/5))\n Feature_DF = Feature_DF.loc[:,['HNAME','JS_J_HJ_SPAVG_TRS']]\n\n return Feature_DF", "def relative_quality(self, id, scores, neighbors_list_highdim):\n neighbors_highdim = neighbors_list_highdim[id]\n score = scores[id]\n avg_score = np.mean([scores[i] for i in neighbors_highdim])\n relative_quality_score = score / avg_score\n return relative_quality_score", "def f1_score(y_true, y_pred, threshold, macro = False, eps = 1e-9):\n\n y_pred = torch.ge(y_pred.float(), threshold).float()\n\n y_true = y_true.float()\n\n tp_l = (y_pred * y_true).sum(0).float()\n\n fp_l = (y_pred * (1 - y_true)).sum(0).float()\n\n fn_l = ((1 - y_pred) * y_true).sum(0).float()\n\n precision_label = tp_l.div(tp_l + fp_l + eps)\n\n recall_label = tp_l.div(tp_l + fn_l + eps)\n\n if macro:\n\n f1_macro = torch.mean((precision_label * recall_label).div(precision_label + recall_label + eps) * 2)\n\n return f1_macro.item(), torch.mean(precision_label).item(), torch.mean(recall_label).item()\n\n else: \n\n tp = tp_l.sum()\n\n fp = fp_l.sum()\n\n fn = fn_l.sum()\n\n precision = tp / (tp + fp + eps)\n\n recall = tp / (tp + fn + eps)\n\n f1_micro = (precision * recall).div(precision + recall + eps) * 2\n\n return f1_micro.item(), precision.item(), recall.item()", "def dataset_quality_score(data_matrix, threshold=0.2, good_days=None,\n use_advanced=True):\n if good_days is None:\n if use_advanced:\n good_days = daily_missing_data_advanced(data_matrix, threshold=threshold)\n else:\n good_days = daily_missing_data_simple(data_matrix, threshold=threshold)\n score = np.sum(good_days) / data_matrix.shape[1]\n return score", "def RC_PP_PFLEP_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_PP_PFLEP']]\n Feature_DF.loc[:,'RC_PP_PFLEP_TRS'] = Feature_DF.loc[:,'RC_PP_PFLEP'].pow(6/7)\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_PP_PFLEP_TRS']]\n\n return Feature_DF", "def PP_SPF_TOP_DIST_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_TOP_DIST']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_TOP_DIST'])\n Feature_DF.loc[:,'PP_SPF_TOP_DIST_TRS'] = Feature_DF.loc[:,'PP_SPF_TOP_DIST'].apply(lambda x : (1+x-min_value)**(9/4))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_TOP_DIST_TRS']]\n\n return Feature_DF", "def f1_score(prediction, ground_truth):\n return precision_recall_f1(prediction, ground_truth)[2]", "def CC_REC_NUM_LT3_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','CC_REC_NUM_LT3']]\n min_value = min(Feature_DF.loc[:,'CC_REC_NUM_LT3'])\n Feature_DF.loc[:,'CC_REC_NUM_LT3_TRS'] = Feature_DF.loc[:,'CC_REC_NUM_LT3'].apply(lambda x : (1+x-min_value)**(-3/8))\n Feature_DF = Feature_DF.loc[:,['HNAME','CC_REC_NUM_LT3_TRS']]\n\n return Feature_DF", "def data_transform_rfm(self) :\n \n is_built_step = False\n if self._encoder_rfm is None:\n is_built_step = True \n \n #-------------------------------------------------------------------------\n # RFM feature is built\n #-------------------------------------------------------------------------\n ser_invoice_date = self._df_invoice_line.InvoiceDate\n \n self.df_invoice_line, df_RFM, self.df_RFM_quantiles, self._day_now \\\n = p5_util.p5_df_rfm_build(self.df_invoice_line, day_now = self._day_now\\\n , df_RFM_threshold=self.df_RFM_quantiles)\n \n self._df_invoice_line.InvoiceDate = ser_invoice_date\n \n #-------------------------------------------------------------------------\n # RFM score is added to dataframe\n #-------------------------------------------------------------------------\n df_merged = pd.merge(self.df_invoice_line\\\n , df_RFM[['CustomerID','RFM']], how='left', on=['CustomerID'])\n\n self._df_invoice_line \\\n = pd.DataFrame(df_merged.values, index = self._df_invoice_line.index\\\n , columns=df_merged.columns)\n \n\n #self._df_invoice_line \\\n #= pd.concat([ self.df_invoice_line,df_RFM[['CustomerID','RFM']] ], axis=1\\\n #,join='inner')\n \n \n #-------------------------------------------------------------------------\n # RFM encoding\n #-------------------------------------------------------------------------\n self._encoder_rfm, df_RFM_encoded \\\n = p5_util.df_rfm_one_hot_encode(df_RFM,'RFM', encoder=self._encoder_rfm)\n\n #-------------------------------------------------------------------------\n # Encoded RFM features are renamed\n #-------------------------------------------------------------------------\n df_customers_rfm, list_col_unchanged \\\n = p5_util.df_rename_columns(df_RFM_encoded, df_RFM_encoded.columns\\\n , 'w_rfm_')\n \n self.strprint(\"df_customers_rfm =\" +str(df_customers_rfm.shape))\n\n #-------------------------------------------------------------------------\n # dataframe with RFM encoded values per customer is dumped\n #-------------------------------------------------------------------------\n if is_built_step is True:\n p5_util.object_dump(df_customers_rfm, self.df_customers_rfm_fileName)\n else :\n self._df_customers_rfm = df_customers_rfm.copy()\n return", "def f1_score(self):", "def get_score(self, solution: np.array) -> float:\n pass" ]
[ "0.6958095", "0.61751866", "0.60585284", "0.5860262", "0.58369166", "0.5765395", "0.57476515", "0.5725581", "0.5717504", "0.5708911", "0.5702308", "0.5689124", "0.5686297", "0.5685354", "0.56756556", "0.5671112", "0.5645583", "0.5639483", "0.56138635", "0.56126195", "0.5591871", "0.558586", "0.55645734", "0.5559015", "0.5542081", "0.55401313", "0.55349123", "0.55247056", "0.5522429", "0.55209035", "0.5513474", "0.5512501", "0.55113596", "0.5506145", "0.55003357", "0.54916275", "0.54895705", "0.54887617", "0.5483196", "0.5479682", "0.54772335", "0.5476744", "0.545181", "0.5450318", "0.54459655", "0.5445379", "0.5439702", "0.54340345", "0.54265815", "0.54244334", "0.5421819", "0.54209435", "0.5419816", "0.5410021", "0.5408646", "0.53993857", "0.5393935", "0.5392057", "0.5384322", "0.53817165", "0.53804994", "0.5378055", "0.53694624", "0.5368241", "0.53576493", "0.53504324", "0.53478706", "0.53416604", "0.53337806", "0.533023", "0.53275967", "0.53220594", "0.53214765", "0.5307145", "0.5295316", "0.5294045", "0.5285411", "0.5274034", "0.52721965", "0.52652884", "0.52558047", "0.52520627", "0.5250812", "0.5248488", "0.52436244", "0.52350974", "0.5232461", "0.5231751", "0.5229915", "0.5214895", "0.52136236", "0.52123487", "0.5193365", "0.51923805", "0.5185531", "0.5183491", "0.5181619", "0.5171984", "0.51695067", "0.51690114" ]
0.60182154
3
Returns the data representation of the timer, will be used to send it over the web socket.
def get_list_data(self): key = 'timer' if self.repeated: key += '_repeat' return '%s %s' % (key, self.data.get_list_data())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_timer_data(self):\n return {\"sRef\": self._timer_service_ref_entry.get_text(),\n \"begin\": int(datetime.strptime(self._timer_begins_entry.get_text(), self._TIME_STR).timestamp()),\n \"end\": int(datetime.strptime(self._timer_ends_entry.get_text(), self._TIME_STR).timestamp()),\n \"name\": self._timer_name_entry.get_text(),\n \"description\": self._timer_desc_entry.get_text(),\n \"dirname\": \"\",\n \"eit\": self._timer_event_id_entry.get_text(),\n \"disabled\": int(not self._timer_enabled_switch.get_active()),\n \"justplay\": self._timer_action_combo_box.get_active_id(),\n \"afterevent\": self._timer_after_combo_box.get_active_id(),\n \"repeated\": self.get_repetition_flags()}", "def serialize(self):\n if self._serialized_timer is None:\n self._serialized_timer = self._enclave_wait_timer.serialize()\n\n return self._serialized_timer", "def timer(self):\n\n res = self.read_block(REG_TIMER, 4)\n\n return (res[3] << 24) + (res[2] << 16) + (res[1] << 8) + (res[0] << 0)", "def get_time(self, async = False):\n\n\t\tself._send_message(\"TIME\", \"\\x00\")\n\n\t\tif not async:\n\t\t\treturn EndpointSync(self, \"TIME\").get_data()", "def getTime(sock):\n MagicNo = 0x497E .to_bytes(2, \"big\")\n PacketType = 0x0002 .to_bytes(2, \"big\")\n if sock is s_english:\n LanguageCode = 0x0001\n flag = \"english\"\n elif sock is s_maori:\n LanguageCode = 0x0002\n flag = \"maori\"\n elif sock is s_german:\n LanguageCode = 0x0003\n flag = \"german\"\n date = datetime.datetime.today()\n LanguageCode = LanguageCode.to_bytes(2, \"big\")\n year = date.year.to_bytes(2, \"big\")\n month = (date.month).to_bytes(1, \"big\")\n day = date.day.to_bytes(1, \"big\")\n hour = date.hour.to_bytes(1, \"big\")\n minute = date.minute.to_bytes(1, \"big\")\n if flag == \"english\":\n text = f\"The current time is {date.hour}:{date.minute}\"\n elif flag == \"maori\":\n text = f\"Ko te wa o tenei wa {date.hour}:{date.minute}\"\n else:\n text = f\"Die Uhrzeit ist {date.hour}:{date.minute}\"\n\n lengthNow = len(text)\n length = lengthNow.to_bytes(1, \"big\")\n\n bytelist = [\n MagicNo,\n PacketType,\n LanguageCode,\n year,\n month,\n day,\n hour,\n minute,\n length,\n ]\n\n out = bytearray()\n\n for byteset in bytelist:\n out += byteset\n\n out.extend(text.encode(\"utf-8\"))\n\n return out", "def time(self):\n return self.raw[\"logTime\"]", "def _get_time(self):\n # get the current time in UTC (make sure we are timezone aware)\n now_utc = datetime.datetime.now(pytz.UTC)\n \n # convert to our local timezone\n timenow = now_utc.astimezone(self.timezone)\n \n # save the data to our data\n self.data['year'][0] = timenow.year\n self.data['month'][0] = timenow.month\n self.data['day'][0] = timenow.day\n self.data['hour'][0] = timenow.hour\n self.data['minute'][0] = timenow.minute\n self.data['second'][0] = timenow.second\n \n return", "def time(self):\n return self.time_array", "def on_timer(context, data_type, data):\n pass", "def heartbeat():\n return jsonify(int(time.time()))", "def gettime(self):\n return self.t", "def time(self) -> int:\n return self.raw[\"time\"]", "def getTime(self):\n return self.time", "def _send_data(self, data, time):\n pass", "def time(self):\n return signal_base_get_time(self.obj)", "def data(self) -> datetime:\n return self._data", "def recv_ts(self) -> int:\n pass", "def read_clock():\n return json.loads(_pump_output(\"read_clock\"))", "def output(self):\n return {\n \"time\": self.time,\n \"dmx\": self.dmx\n }", "def getTime(self, request, context):\n\t\t\n date = re.split(\"\\s\", datetime.utcnow().strftime(\"%Y %m %d %H %M %S\"))\n\n return droneconnect_pb2.Time(year = int(date[0]), month = int(date[1]), day = int(date[2]), hour = int(date[3]), minute = int(date[4]), second = int(date[5]))", "def getTimestamp(self):\r\n\t\treturn self.data['timestamp']", "def time(self):\n raise \"use method time of class ReactorNet\"\n #return _cantera.reactor_time(self.__reactor_id)", "def time(self):\n\t\treturn self._time", "def get_time(self):\n return self._ticks", "def time(self):\n return _cantera.reactornet_time(self.__reactornet_id)", "def get_time(self):\n return \"%02u:%02u:%02u (%d)\" % self.rtc.datetime()[4:8]", "def timer(self):\n\n # Start timer, if it hasn't been started already\n if self.start is None:\n self.start = datetime.datetime.now()\n return 0\n # End the timer, calculate elapsed time, format it, and return it\n else:\n self.end = datetime.datetime.now()\n elapsed = self.end - self.start\n minutes, seconds = divmod(elapsed.total_seconds(), 60)\n minutes = int(minutes)\n seconds = int(seconds)\n if seconds < 10:\n seconds = '0' + str(seconds)\n result = str(minutes) + ':' + str(seconds)\n return result", "def sendTime(self):\n timestamp = datetime.datetime.now().strftime(\"%A, %d. %B %Y %I:%M%p\")\n self.send(timestamp)", "def __create_msg(self, ping):\n now = rospy.get_rostime()\n output = {\n \"info\": {},\n \"timestamp\": int(now.secs * 1e3 + now.nsecs * 1e-6),\n \"data\": ping.T.tolist()\n }\n return json.dumps(output)", "def makeIdleData():\n return simplejson.dumps([\"idle\",[]])", "def servertime(self):\r\n return servertime.Servertime(self)", "def get_timed(self):\n ret = self.send(\"?T\", recv=True)\n ret = int(ret, 10)\n # FIXME: range?\n assert 1 <= ret <= 9999\n return ret", "def get_time(self) -> float:\n raise NotImplementedError()", "def getdata(self):\n return self.cwt", "def encode(self):\r\n tint = long(self.time)\r\n tfrac = long((self.time - tint)*1000000)\r\n return struct.pack(Format.Event, tsec, tfrac, self.eventType,\r\n self.eventCode, self.eventValue)", "def getRxTime(self):\n return self.rx_time", "def data(self):\n\t\tself.dworker()\n\t\treturn self.d", "def time(self):\n return self._time", "def get_time(self):\n return self.time", "def request_realtime_info(self):\n self.socket_datastream.sendto(b\"!r\", self.ip_port_arduino_datastream)\n self.socket_datastream.sendto(b\"!s\", self.ip_port_arduino_datastream)", "def output(self):\n time = \"%04d-%02d-%02d %02d:%02d:%02d\" % (self.year, self.month, \\\n self.date, self.hours, self.minutes, self.seconds)\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"type\": 4,\n \"enabled\": 1,\n \"abstime\": time\n }", "def _time(self):\n return self.r.eval(self.LUA_TIME, 1, 1)", "def time(self):\n return parse_time(self['timestamp'])", "def timers(self):\n return self['timers']", "def timers(self):\n return self.client.call('GET', self.name + '/timers')", "def timestamp(self):\n return self._data.get('timestamp')", "def json(self):\n beat = self.beat + 1.4 # replace with hjd\n w, h = self.getWidth(), self.getHeight()\n \n return {\n \"_time\": beat,\n \"_duration\": self.dur,\n #\"_lineIndex\": 0,\n #\"_type\": 0,\n #\"_width\": 0,\n \"_customData\": {\n # to undo the local rotation z transform we have to take trig parts of it and multiply them by the dimensions of the wall, then add them to the position\n \"_position\": [self.l + math.cos(math.radians(self.lrot[2] - 90)) * h / 2, self.d + math.sin(math.radians(self.lrot[2]-90)) * h / 2 + h / 2],\n \"_scale\": [w, h],\n \"_rotation\": self.rot,\n \"_localRotation\": self.lrot\n }\n }", "def get_data(self): # TODO: add smooth possibility\n return self.data", "def _send_time(self):\n if 'time' not in self.loopback_guard:\n content = {'time': self.time.isoformat()}\n self.send_action('set_time', content)", "def _get_meas_times_web_service(self, last_meas_time):\n subst = ''\n if self._segment and self._segment_value:\n if self._segment['partition_value_type'] == 'int':\n subst = self._segment_value['value_int']\n elif self._segment['partition_value_type'] == 'varchar':\n subst = self._segment_value['value_varchar']\n data_fetch_command_bind_parameter = self._segment['data_fetch_command_bind_parameter']\n else:\n data_fetch_command_bind_parameter = ''\n subst = ''\n\n #meas_times = self._outer_conn.query(last_meas_time, data_fetch_command_bind_parameter, subst, 'get_meas_times', None)\n ret_data = self._outer_conn.query(last_meas_time, data_fetch_command_bind_parameter, subst)\n self._web_service_data = dict()\n meas_times = {'header':'meas_time', 'data': list()}\n for meas_time, meas_data in ret_data.iteritems():\n meas_times['data'].append([meas_time])\n self._web_service_data[meas_time] = meas_data \n \n return meas_times", "def time_encoded(self):\n # type: () -> int\n return self._time_encoded", "def get_timestamp(self, data):\n timestamp = data['timestamp']\n return timestamp", "def getTime(self) -> float:\n return self.t", "def time(self):\r\n raise NotImplementedError", "def get_operation_times(self):\n self.write(\"TIMERS?\")\n timers = {}\n timers['psu'] = int(re.search(r\"\\d+\", self.read()).group())\n timers['laser'] = int(re.search(r\"\\d+\", self.read()).group())\n timers['laser_above_1A'] = int(re.search(r\"\\d+\", self.read()).group())\n self.read() # an empty line is at the end.\n return timers", "def elapsed(self):\n done, data1 = self._request('GS')\n if done:\n if data1[0] != '3':\n raise NotCharging\n done, data2 = self._request('GU')\n if done:\n return {\n 'seconds': int(data1[1]),\n 'Wh': float(data2[0])/3600\n }\n raise EvseError", "def t(self):\n return self._data_writer.get_current_run_time_ms()", "def get_time() -> int:\n return store.time", "def get_time(self):\n return self._time", "def get_time(self):\n return self._time", "def get_time(self):\n return self._time", "def recording_data(self):\n return self._get('recording/data')", "def time(self):\n raise NotImplementedError()", "def get_time(self):\n return self.widget().time()", "def get_io_time(self):\n return self._io_time", "def realtime(self):\r\n return resource.RealTime(self)", "def get_time(self):\n return self._total_time", "def get_timer_details(id):\n\twith postgres, postgres.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cur:\n\t\tcur.execute(\"select * from mustard.timers where id=%s\", (id,))\n\t\treturn cur.fetchone()", "def read_raw(self):\n beats = self.microblaze.read_mailbox(0x4)\n interval_ms = self.microblaze.read_mailbox(0x8 + (beats % 4)*4)\n return beats, interval_ms", "def data(self) -> RawData:\r\n\r\n return self.__data", "def ts(self):\n return self._ts", "def into_data(self) -> Dict[str, Any]:\n data = dict(producer=self.producer)\n if self.mtime_ns > 0:\n data[\"mtime\"] = str(_datetime_from_nanoseconds(self.mtime_ns))\n return data", "def get_time(self):\n return ''", "def total_timer(msg):\n start = timer()\n yield\n t = timer() - start\n _TOTAL_TIMER_DATA[msg].feed(t)", "def data(self) -> dict:\n return self._event.get('data')", "def time_return(self):\n return self.time", "def get_time(self):\n return self.__time", "def time(self):\n return self._begin", "def json(self):\n\t\treturn datetime.now()", "def getDate(sock):\n months = {\n \"english\": [\n \"January\",\n \"February\",\n \"March\",\n \"April\",\n \"May\",\n \"June\",\n \"July\",\n \"August\",\n \"September\",\n \"October\",\n \"November\",\n \"December\",\n ],\n \"maori\": [\n \"Kohitatea\",\n \"Hui-tanguru\",\n \"Poutu ̄-te-rangi\",\n \"Paenga-whawha\",\n \"Haratua\",\n \"Pipiri\",\n \"Hongongoi\",\n \"Here-turi-koka\",\n \"Mahuru\",\n \"Whiringa-a-nuku\",\n \"Whiringa-a-rangi\",\n \"Hakihea\",\n ],\n \"german\": [\n \"Januar\",\n \"Februar\",\n \"Marz\",\n \"April\",\n \"Mai\",\n \"Juni\",\n \"Juli\",\n \"August\",\n \"September\",\n \"Oktober\",\n \"November\",\n \"Dezember\",\n ],\n }\n\n MagicNo = 0x497E .to_bytes(2, \"big\")\n PacketType = 0x0002 .to_bytes(2, \"big\")\n if sock is s_english:\n LanguageCode = 0x0001\n flag = \"english\"\n elif sock is s_maori:\n LanguageCode = 0x0002\n flag = \"maori\"\n elif sock is s_german:\n LanguageCode = 0x0003\n flag = \"german\"\n date = datetime.datetime.today()\n LanguageCode = LanguageCode.to_bytes(2, \"big\")\n year = date.year.to_bytes(2, \"big\")\n language_months = months[flag]\n chosen_month = language_months[(date.month - 1)]\n month = date.month.to_bytes(1, \"big\")\n day = date.day.to_bytes(1, \"big\")\n hour = date.hour.to_bytes(1, \"big\")\n minute = date.minute.to_bytes(1, \"big\")\n if flag == \"english\":\n text = \"Today's date is {} {}, {}\".format(chosen_month, date.day, date.year)\n elif flag == \"maori\":\n text = \"Ko te ra o tenei ra ko {} {}, {}\".format(\n chosen_month, date.day, date.year\n )\n else:\n text = \"Heute ist der {} {}, {}\".format(chosen_month, date.day, date.year)\n\n lengthNow = len(text)\n length = lengthNow.to_bytes(1, \"big\")\n\n bytelist = [\n MagicNo,\n PacketType,\n LanguageCode,\n year,\n month,\n day,\n hour,\n minute,\n length,\n ]\n\n out = bytearray()\n\n for byteset in bytelist:\n out += byteset\n\n out.extend(text.encode(\"utf-8\"))\n\n return out", "def get_measurements(self):\r\n self.msg_send_upr.data[0] = b\"\\xff\"[0]\r\n self.send_and_flush(self.msg_send_upr)", "def data(self):\n return self._data", "def get_time_info(self):\n\n raise NotImplementedError", "def encode(self, rosMsg):\r\n try:\r\n return (rosMsg.to_sec(), {})\r\n except AttributeError:\r\n raise TypeError('Received object is not a Duration instance.')", "def time(self) -> str:\n return typing.cast(\n str,\n self._properties.get(\"time\"),\n )", "def text(message):\n room = session.get('room')\n emit('timerupdate', {'msg': message}, room=room)", "def __str__(self):\n return_text = \"Time-Triggered Frame information =>\\n\"\n return_text += \" Sender id : \" + str(self.__sender_id) + \"\\n\"\n return_text += \" Receivers ids : \" + str(self.__receivers_id) + \"\\n\"\n return_text += \" Path : \" + str(self.__paths) + \"\\n\"\n return_text += \" End_to_End : \" + str(self.__end_to_end_delay) + \" nanoseconds\\n\"\n return_text += \" Period : \" + str(self.__period) + \" nanoseconds\\n\"\n return_text += \" Starting : \" + str(self.__starting_time) + \" nanoseconds\\n\"\n return_text += \" Deadline : \" + str(self.__deadline) + \" nanoseconds\\n\"\n return_text += \" Size : \" + str(self.__size) + \" bytes\"\n return return_text", "def get_data(self):\n data = {\n \"ts\": self.drone.pos[0][0],\n \"drone\": self.drone,\n \"subject\": self.subject,\n \"peds\": self.peds, # can be None\n \"objs\": self.objs # can be None\n }\n self.empty_bag()\n return data", "def recv_time(self) -> float:\n return ntp_to_system_time(self.recv_timestamp)", "def net_delay_data(self):\n return self._net_delay_data", "def rcvStrTimeOut(self, num=1, tou=0.1):\r\n\t\treturn self.rcvDataTimeOut(num, tou)", "def time(self):\n # type: () -> int\n return self._time", "def get_time(self) -> int:\n t = str(self.eval(\"pyb.RTC().datetime()\").encode(\"utf-8\"))[1:-1].split(\", \")\n return int(t[4]) * 3600 + int(t[5]) * 60 + int(t[6])", "def get_time(self):\n return self._current_time_sec", "def time(self):\n self.convert_window(\"Time\", \"seconds\", [\"centuries\", \"days\", \"decades\", \"femtoseconds\", \"fortnights\", \"hours\", \"microseconds\", \"millenia\", \"milliseconds\", \"minutes\", \"months(Common)\", \"months(Synodic)\", \"nanoseconds\", \"picoseconds\", \"quarters(Common)\", \"seconds\", \"shakes\", \"weeks\", \"years(Average Gregorian)\", \"years(Common)\", \"years(Julian)\", \"years(Leap)\", \"years(Tropical)\"])", "def get_rtt_message(self):\n return self.messages[\"rtt\"].get()", "def getTimeframedData(self, website, timeframe, currentTime=time.time()):\n timeList = list(website.log.keys())\n # inside the dic from most recent to most ancient\n # reverse order\n # list of time of requests\n inFrame = []\n # getting the times within the timeframe\n for listind in range(len(timeList)):\n if (currentTime-timeList[len(timeList)-1-listind] <= timeframe):\n inFrame.append(timeList[len(timeList)-1-listind])\n # Indicators\n # Max\n maxTime = self.computeMaxResponseTime(website, inFrame)\n # Avg\n avgTime = self.computeAvgResponsetime(website, inFrame)\n # Availability\n availability = self.computeAvailability(website, inFrame)\n # Status\n status = self.computeStatus(website, currentTime)\n\n # Alert checking with 120 timeframe\n if (timeframe == 120):\n self.checkForIsDownAlert(website= website, availability= availability)\n self.checkForIsUpAlert(website=website, availability=availability)\n\n\n return {'website': website, 'frame': timeframe,'time': currentTime, 'indicators': {'maxTime': maxTime, 'avgTime': avgTime, 'availability': availability, 'status': status}}", "def gettime(self):\n interval, value = _timerfd._timerfd.gettime(self)\n interval = self._join_time(*interval)\n value = self._join_time(*value)\n return interval, value", "def get_time(self):\n\t\treturn time.time()", "async def send_data(message, nats_handler, shared_storage, logger):\n time = message.data[\"time\"]\n for i in range(shared_storage[\"data_rate\"]):\n time_struct= datetime.strptime(time, \"%Y-%m-%dT%H:%M:%S.%f\").timetuple()\n x = time_struct.tm_mon + time_struct.tm_mday/30 + time_struct.tm_hour/720 + time_struct.tm_min/43200 + time_struct.tm_sec/2592000\n data_value = 4.2*math.sin((270000*(x + 1))*3.14/6) + 13.7\n message = nats_handler.create_message(data_value, MessageSchemas.IOT_DATA_MESSAGE)\n message.message_type = \"temperature\"\n await nats_handler.send_data(\"data.out\", message)\n await asyncio.sleep(0.5/shared_storage[\"data_rate\"])" ]
[ "0.7045231", "0.6424909", "0.63322824", "0.61530703", "0.61145544", "0.6017771", "0.592645", "0.59186465", "0.59073746", "0.5903025", "0.5891447", "0.58581436", "0.5799672", "0.5785056", "0.575003", "0.5743803", "0.5738986", "0.5706574", "0.5649952", "0.5620999", "0.5616731", "0.5606849", "0.5595778", "0.5593622", "0.5584529", "0.5584442", "0.5577718", "0.5561627", "0.5555483", "0.5553853", "0.55471677", "0.5538207", "0.5536148", "0.5513676", "0.55039215", "0.55031854", "0.54957896", "0.5467836", "0.54626304", "0.5455963", "0.54494894", "0.5443651", "0.5437653", "0.54345816", "0.5424659", "0.5422028", "0.54120797", "0.540859", "0.5404225", "0.5385586", "0.5377068", "0.53737056", "0.53676236", "0.5367555", "0.53603905", "0.5357288", "0.5356994", "0.53509796", "0.5350158", "0.5350158", "0.5350158", "0.53497076", "0.53478116", "0.53445387", "0.5342203", "0.5341307", "0.5338737", "0.53176314", "0.5314807", "0.5309566", "0.5296483", "0.5293535", "0.52933407", "0.52891076", "0.5285794", "0.5277746", "0.5263928", "0.5263774", "0.5261562", "0.52610075", "0.52596754", "0.5256898", "0.5252535", "0.52502924", "0.52431315", "0.5241716", "0.5239632", "0.5237435", "0.5234069", "0.52209926", "0.52195686", "0.521871", "0.52177995", "0.5217759", "0.52176595", "0.5213565", "0.5209078", "0.5207727", "0.5201932", "0.5198534" ]
0.63464636
2
Executing every forest in collection, activating their networks. By the way collecting data about best fitness function.
def execute(self): process_list = [] forests_queue = Queue(self.power) iterational = 0 print '| |-starting evaluation, training and validation' for one_forest in self._forests: process_list.append( Process(target=main_async_method, args=(forests_queue, copy(one_forest.to_portal()), iterational, self.settings))) iterational += 1 for proc in process_list: proc.start() for proc in process_list: proc.join() for smth in range(forests_queue.qsize()): tmp = forests_queue.get() self._forests[tmp['place']].fitness = tmp['fitness'] fitness_summ = sum(map(lambda forest: forest.fitness, self._forests)) fss = map(lambda x: x.fitness, self._forests) print 'avg = ', str(sum(fss) / len(fss)), 'max = ', max(fss) self.roulet = map(lambda x: x.fitness / fitness_summ, self._forests)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mutate(self):\n for forest in self._forests:\n forest.mutate(self._fullInput)", "def run(self, num_iterations = 50, **kwargs):\n \n #setup system\n self.cost_calculator = t.CostCalculator(self.suppliers_allcards, self.all_ensembles_dict)\n bounds = np.array(self.cost_calculator.ensemble_sizes) - 1\n #define cost functions\n cost_func = lambda p: sum(self.cost_calculator.get_cost(p))\n #create model\n self.model = ga(cost_func, bounds, **kwargs)\n \n fitness_list = [];\n \n for i in range(num_iterations):\n #Update\n f = next(self.model)\n #get fitness values\n fitness_list.append(f[0])\n #Output\n print('\\r(%d/%d) '%(i+1,num_iterations), end = '')\n print('top ensemble fitness: %1.1f '%f[0], end = '')\n \n print('\\nDone')\n self.solution = self.cost_calculator.decode_arrangement(self.model.get_solution())", "def main():\n t = []\n for i in range(1, 19):\n t.append(i)\n config = Config()\n config.DEBUG = True\n config['time_list']=t\n config['load_graphs_from_xml']=True\n\n defaults = dict(num_samples=100, max_depth=5, run=0, num_runs=1,num_trees=100, stat='logrank', split_stat='logrank', num_folds=None,exp='flood',\n verbose=True, folds=None, load_graphs_from_xml=True, time_list=t)\n for key, value in defaults.items():\n cur_value = config.get(key, None)\n # print(\"key={0}:cur_value={1}\".format(key,cur_value))\n config[key] = value if cur_value is None else cur_value\n config.DEBUG = True\n #loadExperimentFile(config, filename=experiment_Path, experiment_name=\"flood\")\n #config.parseOpts()\n print('Start Grow Forest')\n growForest(config)", "def learn(self):\n for a in self.agents:\n a.learn()", "def main():\n create_sets()\n optimal_weights = genetic_algorithm()\n obtain_best_model(optimal_weights)", "def runner(self):\n\n print('[ INFO ]: Initializing the forest fires program runner...')\n\n df, features, predictor = self.preprocess()", "def run(self, verbose=False):\n\n cost = {}; cost[\"best\"] = []; cost[\"mean\"] = []\n for i in range(self.max_iters):\n\n # prints out information at current cycle\n if verbose:\n print(\"Iteration: {}\".format(i),\n \"Fitness: {}\".format(self.forest[0][0]))\n\n # reproduction phase\n self.reproduce()\n\n # seed dispersal phase\n self.seedlings = []\n for tree in self.population:\n self.disperse(tree[1])\n tree[1].year += 1\n\n # selection phase\n self.select()\n\n # decays exploration parameters\n if (self.epsilon > 0):\n self.epsilon -= self.epsilon_decay\n\n # stores statistics and updates counter of iterations\n cost[\"best\"].append(self.population[0][0])\n cost[\"mean\"].append( sum( [ tree[0] for tree in self.population ] )\\\n / len(self.population) )\n self.iteration += 1\n\n return cost", "def train(self):\n self.ae_train(self.net0, self.ae0_optimizer, self.train0_loader, self.val_loader, name='Net0')\n self.ae_train(self.net1, self.ae1_optimizer, self.train1_loader, self.val_loader, name='Net1')\n self.ae_train(self.net2, self.ae2_optimizer, self.train2_loader, self.val_loader, name='Net2')\n\n self.classifier_train(self.net0, self.optimizer0, self.train0_loader, self.val_loader, name='Net0')\n self.classifier_train(self.net1, self.optimizer1, self.train1_loader, self.val_loader, name='Net1')\n self.classifier_train(self.net2, self.optimizer2, self.train2_loader, self.val_loader, name='Net2')", "def apply_neurons(self):\n for neuron in range(self.n_outputs):\n self.uf_activate(neuron)", "def run_all(self):\n # print(\"running all nodes\")\n executed = set()\n node_update_states = {node: node.block_updates for node in self.flow_view.node_items}\n\n def traverse_upwards(node):\n # Traverse upwards to the top of data flow graph\n if node in executed:\n return\n for port in node.inputs:\n for connection in port.connections:\n traverse_upwards(connection.out.node)\n # print(\"executing\", node)\n node.update_event()\n executed.add(node)\n\n for node in self.flow_view.node_items:\n node.block_updates = True\n\n for node in self.flow_view.node_items:\n traverse_upwards(node)\n\n for node in self.flow_view.node_items:\n node.block_updates = node_update_states[node]\n # print(\"All nodes executed\")", "def run(self):\n count = self.neuron_count\n for i in range(0, count):\n self.run(i)", "def run_all():\n db = DBInterface()\n year = Config.get_property(\"league_year\")\n session = Session(bind=db.engine)\n\n scraper.scrape_all(db, session, year)\n session.commit()\n\n bets.predict_all(db, session)\n session.commit()\n session.close()", "def run(self):\n for i in range(self.generations):\n log.info(f'Training population in generation {i + 1}...')\n if i == 0:\n self.create_first_generation()\n else:\n self.create_next_generation()\n log.info(f'best individual: {self.best_individual()[1]}')\n log.info(f'best individual score: {self.best_individual()[0]}')", "def train(self):\n for name in self.network_names:\n if isinstance(name, str):\n net = getattr(self, 'net')\n net.train()", "def run_evolutionary_generations(self):\n \n # Evolve the generation.\n for i in range(self.generations):\n logging.info(\"***Doing generation %d of %d***\" %\n (i + 1, self.generations))\n \n self.train_networks(self.networks)\n \n if self.is_classification:\n average_accuracy, highest_accuracy, lowest_accuracy, highest_scoring_network = self.get_accuracy_stats(self.networks) \n \n if highest_scoring_network is not None:\n highest_scoring_network.save_trained_model(os.path.join(self.save_directory, self.dataset + \"_best_network_at_iteration_%d_acc%f\" % (i, highest_accuracy)))\n \n logging.info(\"Generation average: %.2f%%\" % (average_accuracy * 100))\n logging.info(\"Generation best: %.2f%%\" % (highest_accuracy * 100))\n logging.info(\"Generation worst: %.2f%%\" % (lowest_accuracy * 100))\n logging.info('-'*80)\n else:\n average_loss, highest_loss, lowest_loss, best_scoring_network = self.get_loss_stats(self.networks) \n if best_scoring_network is not None:\n best_scoring_network.save_trained_model(os.path.join(self.save_directory, self.dataset + \"_best_network_at_iteration_%d_loss%f\" % (i, lowest_loss)))\n \n logging.info(\"Generation average: %.2f%%\" % (average_loss * 100))\n logging.info(\"Generation best: %.2f%%\" % (highest_loss * 100))\n logging.info(\"Generation worst: %.2f%%\" % (lowest_loss * 100))\n logging.info('-'*80)\n # Evolve, except on the last iteration.\n if i != self.generations - 1:\n self.networks = self.optimizer.evolve(self.networks)\n \n self.save_network_objects(self.networks)\n \n if self.is_classification:\n self.networks = sorted(self.networks, key=lambda x: x.accuracy, reverse=True)\n else:\n self.networks = sorted(self.networks, key=lambda x: x.loss, reverse=False)\n \n self.print_networks(self.networks[:5])\n \n self.save_trained_network_models(self.dataset, self.networks[:5])", "def do_make_(self):\n global g_list_of_classifier\n\n for ite_clf in g_list_of_classifier:\n ite_clf.learn()\n return ''", "def main():\n datasets = {}\n for dataset_name in tqdm(SOURCE_DATASET_NAMES, desc=\"Processing datasets and fitting base models\"):\n logger.info(f\"processing dataset {dataset_name}\")\n clusters_path: Optional[str] = None\n if dataset_name not in PAIRWISE_ONLY_DATASETS:\n clusters_path = os.path.join(DATA_DIR, dataset_name, dataset_name + \"_clusters.json\")\n train_pairs_path = None\n val_pairs_path = None\n test_pairs_path = None\n else:\n train_pairs_path = os.path.join(DATA_DIR, dataset_name, \"train_pairs.csv\")\n val_pairs_path = os.path.join(DATA_DIR, dataset_name, \"val_pairs.csv\")\n if not os.path.exists(val_pairs_path):\n val_pairs_path = None\n test_pairs_path = os.path.join(DATA_DIR, dataset_name, \"test_pairs.csv\")\n\n logger.info(f\"loading dataset {dataset_name}\")\n anddata = ANDData(\n signatures=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_signatures.json\"),\n papers=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_papers.json\"),\n name=dataset_name,\n mode=\"train\",\n specter_embeddings=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_specter.pickle\"),\n clusters=clusters_path,\n block_type=BLOCK_TYPE,\n train_pairs=train_pairs_path,\n val_pairs=val_pairs_path,\n test_pairs=test_pairs_path,\n train_pairs_size=N_TRAIN_PAIRS_SIZE,\n val_pairs_size=N_VAL_TEST_SIZE,\n test_pairs_size=N_VAL_TEST_SIZE,\n preprocess=True,\n )\n\n logger.info(f\"featurizing {dataset_name}\")\n train, val, test = featurize(\n anddata,\n FEATURIZER_INFO,\n n_jobs=N_JOBS,\n use_cache=True,\n chunk_size=100,\n nameless_featurizer_info=NAMELESS_FEATURIZER_INFO,\n nan_value=NAN_VALUE,\n )\n X_train, y_train, nameless_X_train = train\n X_val, y_val, nameless_X_val = val\n X_test, y_test, nameless_X_test = test\n\n dataset = {}\n dataset[\"anddata\"] = anddata\n dataset[\"X_train\"] = X_train\n dataset[\"y_train\"] = y_train\n dataset[\"X_val\"] = X_val\n dataset[\"y_val\"] = y_val\n dataset[\"X_test\"] = X_test\n dataset[\"y_test\"] = y_test\n dataset[\"nameless_X_train\"] = nameless_X_train\n dataset[\"nameless_X_val\"] = nameless_X_val\n dataset[\"nameless_X_test\"] = nameless_X_test\n dataset[\"name\"] = anddata.name\n datasets[dataset_name] = dataset\n\n anddatas = [\n datasets[dataset_name][\"anddata\"]\n for dataset_name in SOURCE_DATASET_NAMES\n if dataset_name not in PAIRWISE_ONLY_DATASETS\n ]\n\n X_train = np.vstack([datasets[dataset_name][\"X_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n y_train = np.hstack([datasets[dataset_name][\"y_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n X_val = np.vstack(\n [datasets[dataset_name][\"X_val\"] for dataset_name in SOURCE_DATASET_NAMES if dataset_name not in {\"augmented\"}]\n )\n y_val = np.hstack(\n [datasets[dataset_name][\"y_val\"] for dataset_name in SOURCE_DATASET_NAMES if dataset_name not in {\"augmented\"}]\n )\n\n nameless_X_train = np.vstack([datasets[dataset_name][\"nameless_X_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n nameless_X_val = np.vstack(\n [\n datasets[dataset_name][\"nameless_X_val\"]\n for dataset_name in SOURCE_DATASET_NAMES\n if dataset_name not in {\"augmented\"}\n ]\n )\n\n logger.info(\"fitting pairwise\")\n union_classifier = PairwiseModeler(n_iter=N_ITER, monotone_constraints=MONOTONE_CONSTRAINTS)\n union_classifier.fit(X_train, y_train, X_val, y_val)\n\n nameless_union_classifier = None\n if USE_NAMELESS_MODEL:\n logger.info(\"nameless fitting pairwise for \" + str(SOURCE_DATASET_NAMES))\n nameless_union_classifier = PairwiseModeler(\n n_iter=N_ITER,\n monotone_constraints=NAMELESS_MONOTONE_CONSTRAINTS,\n )\n nameless_union_classifier.fit(nameless_X_train, y_train, nameless_X_val, y_val)\n logger.info(\"nameless pairwise fit for \" + str(SOURCE_DATASET_NAMES))\n\n logger.info(\"fitting clusterer for\")\n union_clusterer = Clusterer(\n FEATURIZER_INFO,\n union_classifier.classifier,\n cluster_model=FastCluster(),\n search_space=search_space,\n n_jobs=N_JOBS,\n nameless_classifier=nameless_union_classifier.classifier if nameless_union_classifier is not None else None,\n nameless_featurizer_info=NAMELESS_FEATURIZER_INFO if nameless_union_classifier is not None else None,\n )\n union_clusterer.fit(anddatas)\n print(\n \"best clustering parameters:\",\n union_clusterer.best_params,\n )\n\n models = {}\n models[\"clusterer\"] = union_clusterer\n\n with open(\n f\"full_union_model_script_dump_average_{FEATURIZER_VERSION}.pickle\",\n \"wb\",\n ) as _pickle_file:\n pickle.dump(models, _pickle_file)\n logger.info(\"Done.\")", "def get_forest(self, verbose):\n _antecessors = []\n for key, cluster in self.clusters.items():\n if cluster.leaf_cluster is True:\n _antecessors.append(cluster.antecessor)\n _antecessors = remdup_preserve_order(_antecessors)\n _antecessors = sorted(_antecessors, key=get_cluster_idx, reverse=True)\n\n _tree_idx = 0\n\n print('Generating forest...')\n print('')\n count= 0.0\n if verbose:\n progress_bar = progress_bar = AnimatedProgressBar(end=len(_antecessors), width=50, \\\n fill='=', blank='.')\n for antecessor in _antecessors:\n if verbose and (count % 1 == 0):\n progress_bar + 1\n progress_bar.show_progress()\n tree = Tree(antecessor, idx = _tree_idx, acorns=self)\n self.forest[_tree_idx] = tree\n _tree_idx += 1\n\n if verbose:\n progress_bar.progress = 100 # Done\n progress_bar.show_progress()\n print('')\n print('')\n\n return", "def set_train(self):\n for m in self.models.values():\n m.train()", "def train(self) -> None:\n for module in self.modules.values():\n module.train()\n return", "def _forest_nodes(self):\n\n self.arbor._grow_tree(self)\n root = self.root\n for link in root._links:\n yield self.arbor._generate_tree_node(self.root, link)", "def run(self):\n for _ in range(self.epoch, conf.FX_MAX_EPOCHS):\n self.train()\n\n with torch.no_grad():\n self.test()\n\n self.epoch += 1\n self.save_ck()\n\n self.show_completion_msg()", "def train(self):\n max_tuple = self.max_gain()\n # If that gain is 0 then every node should be a pure leaf (hopefully) and you can stop\n while max_tuple.gain != 0:\n max_tuple.node.split(max_tuple.attribute)\n max_tuple = self.max_gain()", "def forest(self):\n\n forest_parameters = [{'n_estimators': hel.powerlist(10, 2, 4),\n 'min_samples_leaf': list(range(2, 10, 1)),\n 'criterion': ['mae', 'mse'],\n 'random_state': [1], 'n_jobs': [-1]}]\n forest_grid = GridSearchCV(estimator=RandomForestRegressor(),\n param_grid=forest_parameters,\n scoring=self.scorer, cv=5, n_jobs=-1,\n iid=False)\n forest_grid_result = forest_grid.fit(self.X_train, self.y_train)\n best_forest_parameters = forest_grid_result.best_params_\n forest_score = forest_grid_result.best_score_\n print('Best forest params: ' + str(best_forest_parameters))\n print('Forest score: ' + str(forest_score))\n return RandomForestRegressor(\n n_estimators=best_forest_parameters['n_estimators'],\n min_samples_leaf=best_forest_parameters['min_samples_leaf'],\n criterion=best_forest_parameters['criterion'],\n random_state=1, n_jobs=-1)", "def run_all_tests():\n remove_dbs()\n run_training_tests()\n run_custom_training_tests()\n run_training_save_tests()\n run_validation_tests()\n run_feature_extraction_tests()", "def run(self):\n for i in range(self.exploration_steps):\n self.single_step(i)\n if self.save_checkpoints:\n self.save_results()\n self.save_results()\n if self.verbose:\n print(\"\\nExploration completed\")\n return", "def explore(self):\n\n i = 0\n while True:\n i += 1\n \n state_counts = {game.__class__.__name__: Counter() for game in self.games} \n\n policies_prime = []\n pi_sum = 0\n v_sum = 0\n counter = 0\n \n # bookkeeping\n log.info(f'Starting Exploration Iteration #{i} ...')\n\n # for task in tasks...\n for _ in range(self.args['taskBatchSize']):\n\n # create deepcopy for training a theta'\n policy_prime = copy.deepcopy(self.nnet)\n \n # sample a game (task)\n game = np.random.choice(self.games, p=self.probs)\n log.info(f'Sampled game {type(game).__name__} ...')\n\n # multiprocess to get our training examples\n iterationTrainExamples = deque([], maxlen=self.args['maxlenOfQueue'])\n iterationTrainExamples = run_apply_async_multiprocessing(self.executeEpisode, [(MCTS(game, self.nnet, self.args), type(game)(), self.args.copy())] * self.args['numEps'], self.args['numWorkers'], desc='Self Play')\n iterationTrainExamples, iter_counters = zip(*iterationTrainExamples)\n\n iterationTrainExamples = list(itertools.chain.from_iterable(iterationTrainExamples))\n state_counts[game.__class__.__name__] += sum(iter_counters, Counter())\n\n # shuffle examples before training\n shuffle(iterationTrainExamples)\n\n # train our network\n pi_v_losses = policy_prime.train(iterationTrainExamples)\n\n policies_prime.append(policy_prime.state_dict())\n\n for pi,v in pi_v_losses:\n pi_sum += pi\n v_sum += v\n counter += 1\n \n # compute average parameters and load into self.nnet\n self.nnet.load_average_params(policies_prime)\n\n # training new network, keeping a copy of the old one\n self.nnet.save_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename='temp.pth.tar')\n self.pnet.load_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename='temp.pth.tar')\n pmcts = MCTS(self.games[0], self.pnet, self.args)\n\n\n # Arena if we choose to run it\n if self.args['arenaComparePerGame'] > 0:\n # ARENA\n nmcts = MCTS(self.games[0], self.nnet, self.args)\n\n log.info('PITTING AGAINST PREVIOUS VERSION')\n arena = Arena()\n pwins, nwins, draws = arena.playGames(self.pnet, self.nnet, self.args, self.games)\n\n log.info('NEW/PREV WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))\n if pwins + nwins == 0 or float(nwins) / (pwins + nwins) < self.args['updateThreshold']:\n log.info('REJECTING NEW MODEL')\n self.nnet.load_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename='temp.pth.tar')\n else:\n log.info('ACCEPTING NEW MODEL')\n self.nnet.save_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename=self.getCheckpointFile(i))\n self.nnet.save_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename='best.pth.tar')\n\n log.info('Iteration Complete. Writing counts to \"%s/%s\"...', *self.args['json_folder_file'])\n # create the json file\n path = os.path.join(self.args['json_folder_file'][0], self.args['json_folder_file'][1])\n with open(path, 'a+') as f:\n if os.stat(path).st_size == 0: ## file just created/empty\n log.info('No counts found. Writing to empty file.')\n old_counts = {game.__class__.__name__: Counter() for game in self.games}\n else: ## load the counts from the file\n log.info('Loading counts...')\n f.seek(0)\n str_counts = f.read()\n # print('STRING OF JSON:', type(str_counts), str_counts)\n old_counts = json.loads(str_counts)\n old_counts = {game: Counter(v) for game, v in old_counts.items()}\n master_counts = {game.__class__.__name__: state_counts[game.__class__.__name__]+old_counts[game.__class__.__name__] for game in self.games}\n # countiung logic: turn {gametype -> Counter} into {gametype -> {state -> count}}\n master_counts = {game: dict(counter) for game, counter in master_counts.items()}\n log.info('Writing...')\n f.truncate(0) #clear file\n json.dump(master_counts, f)\n log.info('Counts written to json file \"%s/%s\"...', *self.args['json_folder_file'])", "def _initilise_graph_db(self):\n for collector in self.collectors:\n collector.init_graph_db()", "def run(self, iterations):\n # print(f'Before:\\n {self.population}\\n')\n # self.best()\n # print(f'Best Genome before: {self.best_genome.array}, fitness={self.best_genome.fitness} ')\n\n mutator = Rand1MutationOperator(self.population, self.bounds, 0.2)\n mixer = ExponentialCrossoverOperator(self.minfun)\n replacer = ElitistReplacementOperator()\n\n for _ in range(iterations):\n candidate_population = Population(None, None, 0)\n for target in self.population.collection:\n # List with genomes who will be the donors\n mutant = mutator.apply(target)\n # Genome modified by replacing a few random positions\n candidate_genome = mixer.apply(target, mutant)\n\n candidate_population.add(candidate_genome)\n\n # Targets are replaced by candidates from the population if candidate has less fitness than target\n self.population = replacer.apply(self.population, candidate_population)\n\n # print(f'After:\\n {self.population}\\n')\n # self.best()\n # print(f'Best Genome after: {self.best_genome.array}, fitness={self.best_genome.fitness} ')", "def loadall(bot) :\n for feature in features :\n load(bot, feature)", "def fit(self, train_features, train_actuals):\n for name in self.models.keys():\n print('-'*shutil.get_terminal_size().columns)\n print(\"evaluating {}\".format(name).center(columns))\n print('-'*shutil.get_terminal_size().columns)\n estimator = self.models[name]\n est_params = self.params[name]\n gscv = GridSearchCV(estimator, est_params, cv=5, scoring=self.scoring_metric)\n gscv.fit(train_features, train_actuals)\n print(\"best parameters are: {}\".format(gscv.best_estimator_))\n self.single_classifier_best[name] = gscv", "def bulk_train(self):\n logger.info(\"collecting subfolders - relations\")\n relations = self.collect_subfolders(self.input_dir)\n logger.info(\"relations - {}\".format(relations))\n\n execution_times = []\n\n for rel, rel_path in tqdm(relations.items(), desc=\"relations\"):\n logger.info(\"collecting training files from {}\".format(rel_path))\n tr_files = self.collect_files(rel_path, self.regexp_train)\n hyper_params = self.get_hyperparams()\n hyper_params['graph'] = tr_files\n\n output_folder = os.path.join(self.output_dir, rel)\n if not os.path.exists(output_folder):\n logger.info(\"creating {} (did not exist)\".format(output_folder))\n os.makedirs(output_folder)\n\n for params in tqdm(ParameterGrid(hyper_params), desc=\"training embedding\"):\n logger.info(\"hyperparams: {}\".format(params))\n train_file = params['graph']\n model_name = self.compute_model_name(params, output_folder)\n logger.info('training starspace model \"{}\" from file \"{}\"'.format(\n model_name, train_file))\n external_output, delta = self.call_starspace(params, train_file, model_name)\n logger.info(\"executed in {:0.2f}s\".format(delta))\n\n logger.info(\"external command output logged in {}\".format(self.external_log))\n if not os.path.exists(self.output_dir):\n logger.info(\"creating {} (did not exist)\".format(self.output_dir))\n os.makedirs(self.output_dir)\n\n with open(self.external_log, 'a') as f:\n f.write(external_output)\n\n execution_times.append(dict({ 'time': delta }, **params))\n \n return execution_times", "def eval(self):\n for name in self.model_names:\n if isinstance(name, str):\n net = getattr(self, 'net' + name)\n net.eval()", "def eval(self):\n for name in self.model_names:\n if isinstance(name, str):\n net = getattr(self, 'net' + name)\n net.eval()", "def run(self):\n\n rf = RandomForestClassifier(**self._settings['specs'])\n\n # Extract sum stats and model indices from ref table\n indices = toArray(self._refTable, 'idx').flatten()\n sumStat = toArray(self._refTable, 'sumstat')\n\n # Do a 5-fold cross-validation\n accuracies = self._cross_val(sumStat, indices, rf, 5)\n\n # Fit on summary statistics (the more the better)\n rf.fit(sumStat, indices)\n\n # Predict probabilities of models on summary obs\n sumStatTest = np.array(self._pp.scaledSumStatObsData).reshape(1, -1)\n pred = rf.predict_proba(sumStatTest)\n\n return {mod : np.round(pred[0,i],3) for i, mod in enumerate(self._modelNames)}", "def eval(self):\n for name in self.model_names:\n if isinstance(name, str):\n net = getattr(self, 'net_' + name)\n net.eval()", "def main():\n data = load_data()\n analyze_features(data['full_features'])\n model = train(data)\n\n with open('model.pickle', 'wb') as f:\n pickle.dump(model, f)\n evaluate(model, data)", "def train_and_eva():\n for sol in _solvers:\n for sub_u_rate in _sub_u_rates:\n print(\"now processing \" + sol + \" \" + str(sub_u_rate))\n pu_first_stage_training(sol, sub_u_rate)\n first_stage_test(sol, sub_u_rate)\n print(\"\\n\\n\")", "def run(self, generations=1000):\n gcount = 0\n \n while gcount<=generations:\n try:\n print \"Gen: \"+str(gcount),\n self.population = zip (self.population, [self.target]*len(self.population))\n self.population = self.pool.map(f, self.population)\n except:\n pass\n for i in self.population:\n print i[0],i[1]\n self.population = [organism.Organism(x[0], x[1]) for x in self.population]\n self.population.sort()\n print \" Max fitness: \"+str(self.population[::-1][1].fitness)\n try:\n if self.population[0] <= self.ppop[0]:\n self.ppop = self.population[::-1][0:10] # The top ten organisms\n else:\n self.population = self.ppop # We got worse! go back!\n except:\n self.ppop = self.population\n self.population = self.population[::-1][0:10]\n try:\n self.breed()\n except:\n print \"Breeding error\"\n gcount+=1", "def eval_genomes(population, conf):\n for (_, g) in population:\n eval_genome(g, conf)", "def growForest(config, load_exp_file=True):\n\n silent = config.get('silent', False)\n experiment_Path = r\"C:\\Users\\user\\Desktop\\Prediction_model\\experiment\\flood.exp\"\n\n if load_exp_file:\n #loadExperimentFile(config, filename=config.exp)\n loadExperimentFile(config, filename=experiment_Path, experiment_name=\"flood\")\n\n forests = []\n results = []\n\n\n # do multiple runs if needed. note that we start at config.run, not zero\n for run in range(config.num_runs):\n training_graphs, testing_graphs = splitDict(config.graphs, int(len(config.graphs) * .8), random=True)\n\n \"\"\"\n # perform under-sampling if needed\n if hasattr(config, 'underlabel'):\n under_graphs = {}\n skip_count = 0\n for k in training_graphs.keys():\n if training_graphs[k].class_label == config.underlabel and random.random() <= config.underval:\n skip_count += 1\n else:\n under_graphs[k] = training_graphs[k]\n print('Undersampled ' + str(skip_count) + ' graphs')\n training_graphs = under_graphs\n \"\"\"\n # print out some useful info on the class distribution\n counts = defaultdict(int)\n for graph in training_graphs.values():\n counts[graph.class_label] += 1\n print('training:', len(training_graphs), counts)\n\n counts = defaultdict(int)\n for graph in testing_graphs.values():\n counts[graph.class_label] += 1\n print('testing:', len(testing_graphs), counts)\n\n for graph in training_graphs.values():\n counts[graph.class_label] += 1\n print('total:', len(config.graphs), counts)\n\n print('\\nrun:', run)\n config.run = run\n\n srrf = SRRForest(config)\n #srrf.growForest(training_graphs)\n srrf.growForest(config.graphs)\n forests.append(srrf)\n #srrf.training_graph_ids = list(training_graphs.keys())\n #training_labeling = srrf.labelGraphs(training_graphs,config.time_list)\n #outOfBagLabels=srrf.getOutOfBagLabels()\n #print(\"outOfBagLabels\")\n #print(outOfBagLabels)\n #c=srrf.compute_oob_score(training_graphs, outOfBagLabels)\n #print(\"concordance index:\")\n #print(c)\n config.saveTrees(srrf)\n\n #results.append(c)\n\n\n\n\n \"\"\"\n\n df = pd.DataFrame(columns=['lon', 'lat', 'survival_probability', 'time'])\n\n\n srrf.testing_graph_ids = testing_graphs.keys()\n testing_labeling = srrf.labelGraphs(testing_graphs,config.time_list)\n\n\n\n\n\n\n\n for i,h in testing_labeling.items():\n\n lat = i.graph.attributes_by_type.get(('cell', 'lat'))[0].value\n lon = i.graph.attributes_by_type.get(('cell', 'lon'))[0].value\n for t, label in h.items():\n df = df.append(\n {'lon': lon, 'lat': lat, 'survival_probability': label[1], 'time': t},\n ignore_index=True)\n\n sort_by_time = df.sort_values('time')\n print(sort_by_time.head())\n import plotly.express as px\n fig = px.scatter_mapbox(sort_by_time, lat=\"lat\", lon=\"lon\", hover_data=[\"survival_probability\"],\n color=\"survival_probability\", animation_frame=\"time\", animation_group=\"time\",\n color_continuous_scale=px.colors.cyclical.IceFire, size_max=15, zoom=10, height=500)\n fig.update_layout(mapbox_style=\"open-street-map\")\n fig.update_layout(margin={\"r\": 0, \"t\": 0, \"l\": 0, \"b\": 0})\n fig.show()\n \"\"\"\n\n\n\n #config.saveTrees((srrf,)) ###config.saveTree is giving us an eror type error: unable to pickle dict keys.\n\n #print('numruns: %s' % (config.num_runs))\n #print(results)\n\n\n #return results", "def select(self):\n\n def truncate(self):\n \"\"\" Truncates forest to maximum number of trees. \"\"\"\n\n self.population = self.population[:self.max_number_trees]\n\n def SortOnItem(list_, item_loc):\n \"\"\" Sorts based on a given item. \"\"\"\n\n templist = [elmt[item_loc] for elmt in list_]\n index = np.argsort(templist)\n return [list_[i] for i in index]\n\n # adds current seedlings to forest\n for tree in self.seedlings:\n\n # if tree does not competes with another existing one, adds it\n if tree not in self.population:\n self.population.append(tree)\n\n # sorts the trees of the forest in ascending values - minimization\n self.population = SortOnItem(self.population, item_loc=0)\n\n # removes unfit trees from forest\n truncate(self)", "def __init__(self,\n lower, upper ,\n fun ,\n max_std, min_std ,\n init_numb_trees = 10 ,\n max_numb_trees = 20 ,\n max_seeds = 10 ,\n min_seeds = 1 ,\n epsilon = 0.1 ,\n epsilon_decay = 0.0 ,\n max_iters = 100 ,\n mut_proba = 0.1 ,\n seed = None ,\n ):\n\n # generates a seed for the random number generator\n if (seed == None):\n self.seed = random.randint(0, 1000)\n else:\n self.seed = seed\n random.seed(self.seed)\n\n # assigns properties of FO algorithm\n self.max_number_trees = max_numb_trees\n self.max_seeds = max_seeds\n self.min_seeds = min_seeds\n self.epsilon = epsilon\n self.epsilon_decay = epsilon_decay\n self.max_iters = max_iters\n self.max_std = max_std\n self.min_std = min_std\n self.mut_proba = mut_proba\n\n # assigns fitness function\n self.evaluate = fun\n\n # stores lower and upper bounds\n self.lower = lower\n self.upper = upper\n\n # evaluates dimension of the optimal problem\n assert ( len(lower)==len(upper) ), \\\n \"'lower' and 'upper' must be of the same dimension.\"\n self.dim = len(lower)\n\n # initialises a forest of trees\n self.population = []\n for _ in range(init_numb_trees):\n tree = Tree(lower, upper)\n if (fun != None):\n self.population.append((fun(tree.vector), tree))\n else:\n self.population.append((sys.float_info.max, tree))\n\n # initialises iterations counter\n self.iteration = 1\n\n # creates a seedlings buffer\n self.seedlings = []", "def finetuned():\n launch_training_on_all_splits(experiment='full', splits=NAME_SPLIT, base_model='ft', dropout=0.7304, learning_rate=0.0000976)", "def run_all_tasks(data_dir):\n print(\"Training and testing for all tasks ...\")\n for t in range(20):\n run_task(data_dir, task_id=t + 1)", "def eval_all(folder):\n optimizers = [\n tf.keras.optimizers.Adadelta(learning_rate=0.01),\n tf.keras.optimizers.Adagrad(learning_rate=0.002),\n tf.keras.optimizers.Adam(learning_rate=0.0001),\n tf.keras.optimizers.Adamax(learning_rate=0.0005),\n tf.keras.optimizers.Ftrl(learning_rate=0.002),\n tf.keras.optimizers.Nadam(learning_rate=0.001),\n tf.keras.optimizers.RMSprop(learning_rate=0.0005),\n tf.keras.optimizers.SGD(learning_rate=0.003),\n ]\n\n epochs = [\n 500, 120, 80, 150, 300, 60, 100, 500\n ]\n\n biased_randomized = [\n (models.DefaultModel, False),\n (models.BiasedModel, False),\n (models.NeuralModel, False),\n (models.DefaultModel, True),\n (models.BiasedModel, True),\n (models.NeuralModel, True),\n ]\n\n for optimizer, n_epochs in zip(optimizers, epochs):\n for model, rndmz in biased_randomized:\n eval_optimizer(folder,\n model,\n optimizer,\n n_epochs,\n rndmz)", "def execute(self):\n # set observations for all agents\n observation = self.environment.get_observation_for_agent()\n for ag in self.environment.agents:\n ag.observation = observation\n # main loop\n while not self.environment.end_episode:\n # each agent choose its action\n self.environment.choose_action()\n # next state\n self.environment.calculate_next_state()\n # is the end of the episode\n self.environment.calculate_end_episode()\n # set observations for all agents\n observation = self.environment.get_observation_for_agent()\n for ag in self.environment.agents:\n ag.observation = observation", "def _process_datasets_all_frames(self):\n datasets = os.listdir(self.separated_root)\n for dataset in datasets:\n dataset_path = join(self.separated_root, dataset)\n\n for model in self.models:\n\n attacks_list = os.listdir(dataset_path)\n\n for attack in attacks_list:\n attack_path = join(dataset_path, attack)\n\n for prop in self.properties:\n property_alias = prop.get_property_alias()\n\n if os.path.exists(\n join(self.output_features, dataset, attack, property_alias, model.alias)):\n print('%s already extracted features' % dataset)\n continue\n\n path_train = join(attack_path, self.train_alias)\n path_test = join(attack_path, self.test_alias)\n\n X_train, y_train, indexes_train, samples_train = self._get_dataset_contents(path_train,\n property_alias)\n X_test, y_test, indexes_test, samples_test = self._get_dataset_contents(path_test,\n property_alias)\n\n output_features = join(self.output_features, dataset, attack, property_alias, model.alias)\n\n features_train = self._fetch_features(X_train, model, output_features, self.train_alias)\n features_test = self._fetch_features(X_test, model, output_features, self.test_alias)\n\n # saving features\n np.save(join(output_features, (NAME_FEATURES % self.train_alias)), features_train)\n np.save(join(output_features, (NAME_FEATURES % self.test_alias)), features_test)\n\n # saving targets\n np.save(join(output_features, (NAME_TARGETS % self.train_alias)), y_train)\n np.save(join(output_features, (NAME_TARGETS % self.test_alias)), y_test)\n np.save(join(output_features, (NAME_TARGETS % self.test_alias)), y_test)\n\n # saving samples names\n self.__save_txt(join(output_features, (NAME_SAMPLES % self.train_alias)), samples_train)\n self.__save_txt(join(output_features, (NAME_SAMPLES % self.test_alias)), samples_test)", "def step(self):\n\t\tnumpy.random.shuffle(self.agents_list)\n\t\tfor agent in self.agents_list:\n\t\t\tagent.produce()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.charge()\n\t\tfor agent in self.agents_list:\n\t\t\tif agent.strategy == 0: \n\t\t\t\tagent.retribute()\n\t\tfor agent in self.agents_list:\n\t\t\tif agent.strategy == 0: \n\t\t\t\tagent.give()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.consume()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.solve_consumption_deficit()\n\t\tfor site in self.sites:\n\t\t\tsite.recovery()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.sprout()", "def calc(self):\n\t\tfor neuron in self.neurons.items():\n\t\t\tneuron.calculate()", "def eval_fairgen():\n print(\"Evaluation: Fairgen\")\n modes = [\n # name, function\n ('dSMC', ana.d_smc),\n ('dAMC', ana.d_amc),\n ('EDF-VD', ana.d_edf_vd),\n ('pSMC', ana.p_smc),\n ('pAMC-BB', ana.p_amc_bb),\n ('pAMC-BB+', ft.partial(ana.p_amc_bb, ignore_hi_mode=True))\n ]\n\n pool = mp.Pool()\n task_sets_list = pickle.load(open(task_sets_path + 'task_sets_fairgen', 'rb'))\n for name, func in modes:\n start = time()\n rates = []\n for task_sets in task_sets_list:\n rates.append(100 * np.average(pool.map(func, task_sets)))\n pickle.dump(rates, open(eval_fairgen_path + name, 'wb'))\n stop = time()\n print('%s: %.3fs' % (name, (stop - start)))", "def _optimize(self) -> None:\n\n for i, agent in enumerate(self.agents):\n states, actions, rewards, next_states, dones = self.memory.sample()\n\n actor_next_state = self._agent_states(i, next_states)\n next_actions = torch.cat(\n [a.actor_target(actor_next_state) for a in self.agents], 1\n )\n next_q = agent.critic_target(next_states, next_actions).detach()\n target_q = rewards[:, i].view(-1, 1) + self.gamma * next_q * (\n 1 - dones[:, i].view(-1, 1)\n )\n local_q = agent.critic_local(states, actions)\n\n value_loss = agent.loss_fn(local_q, target_q)\n agent.value_optimizer.zero_grad()\n value_loss.backward()\n agent.value_optimizer.step()\n\n local_actions = []\n for i, a in enumerate(self.agents):\n local_states = self._agent_states(i, states)\n local_actions.append(\n a.actor_local(local_states)\n if a == agent\n else a.actor_local(local_states).detach()\n )\n local_actions = torch.cat(local_actions, 1)\n policy_loss = -agent.critic_local(states, local_actions).mean()\n\n agent.policy_optimizer.zero_grad()\n policy_loss.backward()\n agent.policy_optimizer.step()\n\n self._update_target_model(agent.critic_local, agent.critic_target)\n self._update_target_model(agent.actor_local, agent.actor_target)", "def eval(self):\n for name in self.network_names:\n if isinstance(name, str):\n net = getattr(self, 'net')\n net.eval()", "def run_functions(self):\n for function in self.functions:\n try:\n function()\n except Exception as err:\n logger.exception(\n f\"[red]Failed running and collecting data for function: {function.__name__}[/red]\"\n )\n logger.error(traceback.format_exc())\n logger.error(f\"[red]{err}[/red]\")\n logger.error(\"Continuing..\")", "def run(self):\n self.membershipFunction()\n self.interpretingMF()\n self.rules()\n self.standardComposition_Min()\n self.standardComposition_Max()\n self.defuzzification()", "def eval_genomes(genomes, config_):\n data = next_batch()\n assert data is not None\n inputs, outputs = data\n inputs = preprocessor(inputs)\n for _, genome in tqdm(genomes):\n net = RecurrentNet.create(genome, config_)\n mse = 0\n for single_inputs, output in zip(inputs, outputs):\n net.reset()\n mask, score = gate_activation(net, single_inputs)\n selected_score = score[mask]\n if selected_score.size == 0:\n xo = 0.5\n else:\n xo = np.sum(selected_score) / selected_score.size\n mse += (xo - output.item())**2\n genome.fitness = 1 / (1 + mse)", "def __init__(self, dims, treeCount, incAdd = 1, testDims = 3, dimCount = 4, rotCount = 32):\n # Support structures...\n self.cats = dict() # Dictionary from cat to internal indexing number.\n self.treeCount = treeCount\n self.incAdd = incAdd\n \n # Setup the classification forest...\n self.classify = DF()\n self.classify.setInc(True)\n self.classify.setGoal(Classification(None, 1))\n self.classify.setGen(LinearClassifyGen(0, 1, testDims, dimCount, rotCount))\n \n self.classifyData = MatrixGrow()\n self.classifyTrain = self.treeCount\n \n # Setup the density estimation forest...\n self.density = DF()\n self.density.setInc(True)\n self.density.setGoal(DensityGaussian(dims))\n self.density.setGen(LinearMedianGen(0, testDims, dimCount, rotCount))\n self.density.getPruner().setMinTrain(48)\n \n self.densityData = MatrixGrow()\n self.densityTrain = self.treeCount", "def train_all_curated(self, bench=False):\n train_X, train_y = self.format_input(self.M.curated_genes, self.neg_train_genes)\n self.train(train_X, train_y)\n pkl.dump(self, open(self.save_path + '/nash_model_trained.pkl', 'wb'))\n if bench:\n self.benchmark(train_X, train_y)\n\n # do feature selection on dataset as a whole so it is easier to be scored\n if self.feat_sel:\n self.dataset = pd.DataFrame(self.skb.transform(self.dataset), index=self.dataset.index)", "def infer(self):\r\n for i in range(6):\r\n count_before = len(self.graph.nodes)\r\n\r\n self.graph.cleanup().toposort()\r\n try:\r\n for node in self.graph.nodes:\r\n for o in node.outputs:\r\n o.shape = None\r\n model = gs.export_onnx(self.graph)\r\n model = shape_inference.infer_shapes(model)\r\n self.graph = gs.import_onnx(model)\r\n except Exception as e:\r\n log.info(\"Shape inference could not be performed at this time:\\n{}\".format(e))\r\n try:\r\n self.graph.fold_constants(fold_shapes=True)\r\n except TypeError as e:\r\n log.error(\"This version of ONNX GraphSurgeon does not support folding shapes, please upgrade your \"\r\n \"onnx_graphsurgeon module. Error:\\n{}\".format(e))\r\n raise\r\n\r\n count_after = len(self.graph.nodes)\r\n if count_before == count_after:\r\n # No new folding occurred in this iteration, so we can stop for now.\r\n break", "def train(self, iterations=1):\n for _ in range(iterations):\n self.trainer.train()\n self.test_network()", "def tuneRandomForest(train_set):\n\n auc_score = make_scorer(roc_auc_score)\n acc = make_scorer(accuracy_score)\n\n train_set = pd.read_csv(train_set, sep=\"\\t\", low_memory=False)\n\n train_output = train_set[\"output\"].values\n train_features = train_set[train_set.columns.drop([\"labels\", \"output\"])].values\n\n #X_train, X_test, y_train, y_test = train_test_split(train_features, train_output, test_size=0.20)\n\n # define parameters to be optimized\n parameters = {\n 'n_estimators': [int(x) for x in range(200, 3000, 300)],\n 'max_features': ['log2', 'sqrt', \"auto\"],\n 'criterion': [\"gini\", \"entropy\"],\n }\n #plotGrid(parameters, script_path + \"/results/GridSearchPlot.png\")\n\n scores = ['precision', 'recall', 'f1', auc_score, acc] # compute efficiency based on scores\n for score in scores:\n print(\"# Tuning hyper-parameters for %s\" % score)\n\n tune_search = GridSearchCV(\n RandomForestClassifier(n_jobs=-1),\n parameters,\n scoring=score\n )\n #tune_search.fit(X_train, y_train)\n tune_search.fit(train_features, train_output)\n print(tune_search.best_params_)\n\n means = tune_search.cv_results_['mean_test_score']\n stds = tune_search.cv_results_['std_test_score']\n for mean, std, params in zip(means, stds, tune_search.cv_results_['params']):\n print(\"%0.3f (+/-%0.03f) for %r\" % (mean, std * 2, params))\n\n #y_true, y_pred = y_test, tune_search.predict(X_test)\n # print(classification_report(y_true, y_pred))\n #print()", "def trainNet():", "def run_algorithm(self):\n print(f\"Checking all possible configurations with {self.algorithm}...\")\n\n if self.algorithm == \"test\" or (self.algorithm == \"greedy\" and\n self.iterations == 1000):\n\n # Test each configuration found with greedy (1000 iterations)\n while True:\n try:\n self.index += 1\n self.batteries = self.load_batteries(self.index)\n\n # Break if all configurations are checked\n except FileNotFoundError:\n break\n self.calculate_cable()\n self.link_houses()\n greedy(self, 1000)\n\n # Load best solution if user wanted to run greedy\n if self.algorithm == \"greedy\":\n self.load()\n self.plot_houses()\n\n # Call correct algorithm\n else:\n self.load()\n if self.algorithm == \"stepdown\":\n stepdown(self)\n elif self.algorithm == \"greedy\":\n greedy(self, self.iterations)\n elif self.algorithm == \"hill\":\n hill_climber(self, self.iterations)\n elif self.algorithm == \"dfs\":\n dfs(self)\n elif self.algorithm == \"random\":\n random_algorithm(self, self.iterations)\n elif self.algorithm == \"bnb\":\n bnb(self)\n\n self.load()\n self.plot_houses()", "def run_isolation_forest(file_path):\n\n features_list = ['Direction', 'Speed']\n df_train = pd.read_csv(f'{file_path}/without_anom.csv')\n\n df_train = df_train[features_list]\n\n scalar = MaxAbsScaler()\n\n X_train = scalar.fit_transform(df_train)\n\n random_model = MultiOutputRegressor(\n RandomForestRegressor(max_depth=2, max_features=\"sqrt\")\n )\n\n # lab_enc = preprocessing.LabelEncoder()\n # training_scores_encoded = lab_enc.fit_transform(X_train)\n random_model.fit(X_train, X_train)\n pred = random_model.predict(X_train)\n # isolation_model = MultiOutputRegressor(IsolationForest()).fit(X_train)\n # pred = isolation_model.predict(X_train)\n test_path = \"C:\\\\Users\\\\Yehuda Pashay\\\\Desktop\\\\fligth_data\\\\data_set\\\\test\\\\chicago_to_guadalajara\\\\down_attack\"\n df_test = pd.read_csv(f'{test_path}/sensors_8.csv')\n df_test = df_test[features_list]\n\n Y_test = scalar.transform(df_test)\n test_pred = random_model.predict(Y_test)\n a = 4", "def _evaluate_fitness(self, population: Population):\n for n, individual in enumerate(population.individuals):\n\n # Dataset extraction using individual features\n X_data = self._create_dataset(individual, self._X)\n\n # Get scores for each fitness strategy (each objective)\n scores = [fitness_func.eval_fitness(X=X_data, y=self._y, num_feats=len(population.features))\n for fitness_func in self.fitness]\n\n # If the number of features is an objective\n if self.optimize_features:\n scores.append(self.features_function(individual=individual,\n total_feats=len(self._population.features)))\n\n # Create a solution\n individual.fitness = Solution(scores)\n\n return population", "def run_all(operations=ops):\n for operation in operations:\n run(operation)", "def train(self):\n for data_tier in self.data_tiers:\n fd = open(self.data_path + '/training_data_' + data_tier + '.json', 'r')\n self.preprocessed_data[data_tier] = json.load(fd)\n fd.close()\n tot = len(self.preprocessed_data[data_tier]['features'])\n p = int(math.ceil(tot*0.8))\n training_features = np.array(self.preprocessed_data[data_tier]['features'][:p])\n trend_training_classifications = np.array(self.preprocessed_data[data_tier]['trend_classifications'][:p])\n avg_training_classifications = np.array(self.preprocessed_data[data_tier]['avg_classifications'][:p])\n t1 = datetime.datetime.utcnow()\n self.clf_trend[data_tier].fit(training_features, trend_training_classifications)\n self.clf_avg[data_tier].fit(training_features, avg_training_classifications)\n t2 = datetime.datetime.utcnow()\n td = t2 - t1\n self.logger.info('Training %s for data tier %s took %s', self.name, data_tier, str(td))\n joblib.dump(self.clf_trend[data_tier], self.data_path + '/' + self.name + '_trend_' + data_tier + '.pkl')\n joblib.dump(self.clf_avg[data_tier], self.data_path + '/' + self.name + '_avg_' + data_tier + '.pkl')", "def execute(self, *f_args):\n selection = self._selections[int(math.floor(f_args[0]))].name()\n representation = self._representations[int(\n math.floor(f_args[1]))].name()\n mutation = self._mutations[int(math.floor(f_args[2]))].name()\n crossover = self._crossovers[int(math.floor(f_args[3]))].name()\n\n population = int(round(f_args[4]))\n selection_crossover = f_args[5]\n selection_mutation = f_args[6]\n generations = int(math.floor(f_args[7]))\n precision = int(round(f_args[8]))\n max_retry = int(round(f_args[9]))\n\n values = {}\n args = collections.namedtuple(\n \"args\",\n [\"precision\", \"threads\", \"dimensions\",\n \"selection\", \"representation\", \"crossover\", \"mutation\",\n \"population\", \"selection_mutation\", \"selection_crossover\",\n \"generations\", \"max_retry\"])\n\n for function_cls in self._functions:\n values[function_cls] = {}\n for dimension in range(1, 2):\n # prepare new alg\n alg = basic_ag.BaseAG(\n selection=selection,\n representation=representation,\n mutation=mutation,\n crossover=crossover,\n population=population,\n selection_crossover=selection_crossover,\n selection_mutation=selection_mutation,\n generations=generations,\n dimension=dimension,\n precision=precision)\n\n fabicrated_args = args(\n precision=precision, max_retry=max_retry,\n dimensions=dimension, threads=5,\n selection=selection,\n representation=representation,\n mutation=mutation,\n crossover=crossover,\n population=population,\n selection_crossover=selection_crossover,\n selection_mutation=selection_mutation,\n generations=generations)\n alg.set_args(fabicrated_args)\n\n function_cls.set_args(fabicrated_args)\n function = function_cls(dimension=dimension)\n\n rez = alg(function)\n info = alg.get_info()\n\n values[function_cls][dimension] = (\n rez, function.local_mins, info, fabicrated_args)\n\n return self._get_value(values)", "def eval(self):\n self.train(mode=False)", "def runall():\n sclogic.runall()", "def start_neuroevolution(x, y, x_test, y_test):\n\n connections = [(0, INPUT0, OUTPUT0), (1, INPUT1, OUTPUT0), (2, INPUT0, OUTPUT1), (3, INPUT1, OUTPUT1)]\n genotypes = [{0: True, 1: True, 2: True, 3: True} for d in xrange(5)]\n\n for its in xrange(0,5):\n print \"iteration\", its\n\n fitnesses = []\n # test networks\n for i in xrange(0,len(genotypes)):\n fitnesses.append(eval_fitness(connections, genotypes[i], x, y, x_test, y_test, run_id=str(its) + \"/\" + str(i)))\n\n # get indices of sorted list\n fitnesses_sorted_indices = [i[0] for i in reversed(sorted(enumerate(fitnesses), key=lambda x: x[1]))]\n\n print \"connections:\\n\"\n print connections\n for ra in xrange(0,len(fitnesses_sorted_indices)):\n print fitnesses[fitnesses_sorted_indices[ra]], genotypes[fitnesses_sorted_indices[ra]]\n\n # run evolutions\n # todo: fiddle with parameters, include size of network in fitness?\n new_gen = []\n # copy five best survivors already\n m = 5\n if m > len(fitnesses):\n m = len(fitnesses)\n\n for i in xrange(0,m):\n print \"adding:\", fitnesses[fitnesses_sorted_indices[i]], genotypes[fitnesses_sorted_indices[i]]\n new_gen.append(genotypes[fitnesses_sorted_indices[i]])\n\n for i in xrange(0,len(fitnesses_sorted_indices)):\n fi = fitnesses_sorted_indices[i]\n r = np.random.uniform()\n # select the best for mutation and breeding, kill of worst.\n if r <= 0.2:\n # mutate\n connections, gen = add_connection(connections, genotypes[i])\n new_gen.append(gen)\n r = np.random.uniform()\n if r <= 0.5:\n connections, gen = add_node(connections, genotypes[i])\n new_gen.append(gen)\n\n r = np.random.uniform()\n if r <= 0.1:\n # select random for breeding\n r = np.random.randint(0,len(fitnesses))\n r2 = np.random.randint(0,len(fitnesses) - 1)\n if r2 >= r:\n r2 +=1\n gen = crossover(connections, genotypes[r], fitnesses[r], genotypes[r2], fitnesses[r2])\n new_gen.append(gen)\n new_gen.append(genotypes[fi])\n # stop if we have 5 candidates\n if len(new_gen) > 10:\n break\n genotypes = new_gen", "def __init__(self, n_trees=10, criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1, \n max_features='auto', max_leaf_nodes=None, bootstrap=True, oob_score=False, n_jobs=-1, random_state=None,\n verbose=0, min_density=None, compute_importances=None): \n self.random_forest = RandomForestClassifier(n_trees, criterion, max_depth, min_samples_split, min_samples_leaf, \n max_features, max_leaf_nodes, bootstrap, oob_score, n_jobs, random_state,\n verbose, min_density, compute_importances)", "def start_all_nodes(self):\n for node in self.nodes:\n node.start()", "def main(args, base_dir):\n for i in range(args.n_training):\n # value of the next seed\n seed = args.seed + i\n\n # The time when the current experiment started.\n now = strftime(\"%Y-%m-%d-%H:%M:%S\")\n\n # Create a save directory folder (if it doesn't exist).\n if args.log_dir is not None:\n dir_name = args.log_dir\n else:\n dir_name = os.path.join(base_dir, '{}/{}'.format(\n args.env_name, now))\n ensure_dir(dir_name)\n\n # Get the policy class.\n if args.alg == \"TD3\":\n from hbaselines.multiagent.td3 import MultiFeedForwardPolicy\n elif args.alg == \"SAC\":\n from hbaselines.multiagent.sac import MultiFeedForwardPolicy\n elif args.alg == \"PPO\":\n from hbaselines.multiagent.ppo import MultiFeedForwardPolicy\n elif args.alg == \"TRPO\":\n from hbaselines.multiagent.trpo import MultiFeedForwardPolicy\n else:\n raise ValueError(\"Unknown algorithm: {}\".format(args.alg))\n\n # Get the hyperparameters.\n hp = get_hyperparameters(args, MultiFeedForwardPolicy)\n\n # add the seed for logging purposes\n params_with_extra = hp.copy()\n params_with_extra['seed'] = seed\n params_with_extra['env_name'] = args.env_name\n params_with_extra['policy_name'] = \"MultiFeedForwardPolicy\"\n params_with_extra['algorithm'] = args.alg\n params_with_extra['date/time'] = now\n\n # Add the hyperparameters to the folder.\n with open(os.path.join(dir_name, 'hyperparameters.json'), 'w') as f:\n json.dump(params_with_extra, f, sort_keys=True, indent=4)\n\n run_exp(\n env=args.env_name,\n policy=MultiFeedForwardPolicy,\n hp=hp,\n dir_name=dir_name,\n evaluate=args.evaluate,\n seed=seed,\n eval_interval=args.eval_interval,\n log_interval=args.log_interval,\n save_interval=args.save_interval,\n initial_exploration_steps=args.initial_exploration_steps,\n ckpt_path=args.ckpt_path,\n )", "def do_training(self):\n json_data = request.data\n global g_list_of_classifier\n\n datas = json.loads(json_data.decode('UTF-8')) #datas = liste\n\n for ite_clf in g_list_of_classifier:\n for data in datas:\n ite_clf.add_data(data['score'], data['answer'])\n print(ite_clf.get_info())\n return ''", "def run_all_tests(self):\n for index in range(len(self.__test_set_list)):\n self.run_test(index)", "def learn(self):\n\n for i in range(1, self.args.numIters + 1):\n # bookkeeping\n log.info(f'Starting Iter #{i} ...')\n # examples of the iteration\n if not self.skipFirstSelfPlay or i > 1:\n iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue)\n\n for _ in tqdm(range(self.args.numEps), desc=\"Self Play\"):\n self.mcts = MCTS(self.game, self.nnet, self.args) # reset search tree\n iterationTrainExamples += self.executeEpisode()\n\n # save the iteration examples to the history \n self.trainExamplesHistory.append(iterationTrainExamples)\n\n if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:\n log.warning(\n f\"Removing the oldest entry in trainExamples. len(trainExamplesHistory) = {len(self.trainExamplesHistory)}\")\n self.trainExamplesHistory.pop(0)\n # backup history to a file\n # NB! the examples were collected using the model from the previous iteration, so (i-1) \n self.saveTrainExamples(i - 1)\n\n # shuffle examples before training\n trainExamples = []\n for e in self.trainExamplesHistory:\n trainExamples.extend(e)\n shuffle(trainExamples)\n\n # training new network, keeping a copy of the old one\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n self.pnet.load_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n pmcts = MCTS(self.game, self.pnet, self.args)\n\n self.nnet.train(trainExamples)\n nmcts = MCTS(self.game, self.nnet, self.args)\n\n log.info('PITTING AGAINST PREVIOUS VERSION')\n arena = Arena(lambda x: np.argmax(pmcts.getActionProb(x, temp=0)),\n lambda x: np.argmax(nmcts.getActionProb(x, temp=0)), self.game)\n pwins, nwins, draws = arena.playGames(self.args.arenaCompare)\n\n log.info('NEW/PREV WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))\n if pwins + nwins == 0 or float(nwins) / (pwins + nwins) < self.args.updateThreshold:\n log.info('REJECTING NEW MODEL')\n self.nnet.load_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n else:\n log.info('ACCEPTING NEW MODEL')\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=self.getCheckpointFile(i))\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename='best.pth.tar')", "def set_eval(self):\n for m in self.models.values():\n m.eval()", "def _next_generation(self, previous_generation):\n self._fullInput, self._fullOutput = previous_generation.get_data()\n self.power = self.settings.population_count\n for forest_iteration in range(self.power):\n first, second = previous_generation.selection()\n print 'selected for crossover ->', first.fitness, second.fitness\n self._forests.append(OneForest(self.settings, first_forest=first, second_forest=second))", "def go(self):\n\n if self.problem_type == 1:\n print magenta(\"|\" * 20 + \" And they're off! \" + \"|\" * 20)\n # classification\n c = ClassifyModel(self)\n c.logistic_regression()\n c.knn()\n c.random_forest()\n\n elif self.problem_type == 2:\n # regression\n pass\n\n elif self.problem_type == 3:\n # clustering\n pass\n\n elif self.problem_type == 4:\n # dimensionailty reduction\n pass\n\n elif self.problem_type == 5:\n # recommendation\n pass\n\n else:\n raise ValueError('Problem type not defined properly.')", "def main():\n path_for_data = '/Users/avielshtern/Desktop/semb/iml/IML.HUJI-master/data/kc_house_data (1).csv'\n design_matrix, response_vector = load_data(path_for_data)\n putting_it_all_together_1(design_matrix, response_vector)\n putting_it_all_together_2(design_matrix, response_vector)\n feature_evaluation(design_matrix, response_vector)", "def evaluate(train, train_labels, test, test_labels):\n \n # Use the same model for each training set for now\n model = RandomForestClassifier(n_estimators = 100, \n random_state = 50, n_jobs = -1)\n \n train = train.replace({np.inf: np.nan, -np.inf: np.nan})\n test = test.replace({np.inf: np.nan, -np.inf:np.nan})\n \n feature_names = list(train.columns)\n \n # Impute the missing values\n imputer = Imputer(strategy = 'median', axis = 1)\n train = imputer.fit_transform(train)\n test = imputer.transform(test)\n \n cv_score = 1 * cross_val_score(model, train, train_labels, \n scoring = \"f1\", \n cv = 5)\n \n # Fit on the training data and make predictions\n model.fit(train, train_labels)\n preds = model.predict(test)\n \n # Calculate the performance\n f1 = f1_score(test_labels, preds)\n print('5-fold CV F1: {:.2f} with std: {:.2f}'.format(cv_score.mean(),cv_score.std()))\n print('Test F1: {:.2f}.'.format(f1))\n \n feature_importances = pd.DataFrame({'feature': feature_names, \n 'importance': model.feature_importances_})\n \n return preds, feature_importances", "def run(self, verbose=True, max_total_iterations=50000):\n self.verbose = verbose\n\n # Upper bounds on number of evaluations\n self.max_total_iterations = max_total_iterations\n\n self.initialise_mean_and_count()\n self.directed_edges = []\n self.active_strategy_profiles = []\n self.initialise_queue()\n\n # Forced initial exploration\n self.forced_exploration()\n\n # Keep evaluating nodes until check method declares that we're finished\n iterations = 0\n edges_resolved_this_round = []\n while self.total_interactions < max_total_iterations:\n # Add nodes to queue\n self.add_to_queue(removed=edges_resolved_this_round)\n\n # Evaluate the nodes and log results\n for v, _ in self.evaluate_strategy_profile():\n if verbose:\n print(v)\n\n # Recompute confidence bounds, eliminate, stop etc.\n edges_resolved_this_round = self.check_confidence()\n\n if not self.edges_remaining:\n break\n iterations += 1\n\n # Fill in missing edges if max iters reached without resolving all edges\n self.compute_graph()\n\n # Compute objects to be returned\n if verbose:\n total_steps = self.compute_total_steps()\n print('\\nTotal steps taken = {}'.format(total_steps))\n results = {}\n results['interactions'] = int(np.sum(self.count[0]))\n graph = self._construct_digraph(self.directed_edges)\n results['graph'] = graph\n return results", "def evolve(self, generations=10000):\n\n for gen in range(generations):\n # run the tournament\n self.tournament()\n\n # generate the next generation\n self.p = self.nextGen()", "def run(self):\n # For each microenvironment that the person visits\n while self.routing_node_id != 'end':\n # Get the next node, and this activity class and arguments.\n self.routing_node_id, activity_class, kwargs = self.routing.get_next_activity(self.routing_node_id)\n\n # Add this instance to the arguments list\n kwargs['person'] = self\n\n # Create a parametrised instance of the activity\n this_activity_class = activity_class(self.simulation_params, **kwargs)\n \n # set an event flag to mark end of activity and call the activity class\n finished_activity = self.env.event() \n self.env.process(this_activity_class.start(finished_activity))\n yield finished_activity", "def run(self):\n print(' strategies...')\n matrix_file = ''\n matrix_s, matrix_c = None, None\n # run for all but the optimal version\n item2matrix = os.path.join(self.data_set.base_folder, 'item2matrix.txt')\n for rec_type in self.data_set.graphs:\n for graph in self.data_set.graphs[rec_type]:\n print(' ', graph)\n gt_graph = load_graph(graph)\n for strategy in Strategy.strategies:\n if strategy == 'optimal':\n continue\n print(' ', strategy)\n m_new = self.data_set.matrices[rec_type][graph][strategy][0]\n m_newc = self.data_set.matrices[rec_type][graph][strategy][1]\n debug(' ----', m_new)\n debug(' ----', m_newc)\n if not m_new:\n debug(' ---- not m_new')\n matrix_s, matrix_c, matrix_file = None, None, None\n elif matrix_file != m_new:\n matrix_s = SimilarityMatrix(item2matrix, m_new)\n matrix_c = SimilarityMatrix(item2matrix, m_newc)\n matrix_file = m_new\n debug(' ---- matrix_file != m_new')\n # for miss in self.data_set.missions[rec_type][graph][strategy]:\n for miss in Mission.missions:\n print(' ', miss)\n if 'Information Foraging' in miss or 'Berrypicking' in miss:\n matrix = matrix_c\n else:\n matrix = matrix_s\n for m in self.data_set.missions[rec_type][graph][strategy][miss]:\n for ti in xrange(len(m.targets_original)):\n start = m.path[-2] if m.path else m.start\n debug('++++' * 16, 'mission', ti, '/',\n len(m.targets_original))\n debug(m.targets_original[ti])\n self.navigate(gt_graph, strategy, m, start,\n None, matrix)\n if ti > 0 and len(m.targets_original[ti]) == len(m.targets[0]):\n # print('breaking...')\n m.reset()\n break\n if not (ti + 1) == len(m.targets_original):\n m.path.append(u'*')\n m.reset()\n\n # run the simulations for the optimal solution\n print(' optimal...')\n for rec_type in self.data_set.graphs:\n for graph in self.data_set.graphs[rec_type]:\n print(' ', graph)\n sp_file = graph.rsplit('.', 1)[0] + '.npy'\n with open(sp_file, 'rb') as infile:\n sp = pickle.load(infile)\n for miss in self.data_set.missions[rec_type][graph]['optimal']:\n for m in self.data_set.missions[rec_type][graph]['optimal'][miss]:\n for ti in xrange(len(m.targets_original)):\n start = m.path[-2] if m.path else m.start\n debug('++++' * 16, 'mission', ti, '/', len(m.targets_original))\n debug(m.targets_original[ti])\n self.optimal_path(m, start, sp)\n if not (ti + 1) == len(m.targets_original):\n m.path.append(u'*')\n m.reset()\n\n # # DEBUG\n # item2matrix = os.path.join(self.data_set.base_folder, 'item2matrix.txt')\n # for rec_type in ['rbar']:\n # for graph in self.data_set.graphs[rec_type]:\n # print(' ', graph)\n # gt_graph = load_graph(graph)\n # sp_file = graph.rsplit('.', 1)[0] + '.npy'\n # with open(sp_file, 'rb') as infile:\n # sp = pickle.load(infile)\n # m_newc = self.data_set.matrices[rec_type][graph]['title'][1]\n # matrix = SimilarityMatrix(item2matrix, m_newc)\n # sc = 'Berrypicking'\n # mc1 = self.data_set.missions[rec_type][graph]['title'][sc]\n # mc2 = self.data_set.missions[rec_type][graph]['optimal'][sc]\n # mc3 = self.data_set.missions[rec_type][graph]['random'][sc]\n # for m1, m2, m3 in zip(\n # mc1,\n # mc2,\n # mc3\n # ):\n # # evalute with title strategy\n # for ti in xrange(len(m1.targets_original)):\n # start = m1.path[-2] if m1.path else m1.start\n # debug('++++' * 16, 'mission', ti, '/', len(m1.targets_original))\n # # debug(m1.targets_original[ti])\n # self.navigate(gt_graph, 'title', m1, start, None, matrix)\n # # print(m1.path, ti, len(m1.targets_original[ti]), len(m1.targets[0]))\n # if ti > 0 and len(m1.targets_original[ti]) == len(m1.targets[0]):\n # # print('breaking...')\n # m1.reset()\n # break\n # if not (ti + 1) == len(m1.targets_original):\n # m1.path.append(u'*')\n # m1.reset()\n #\n # # evaluate with optimal strategy\n # for ti in xrange(len(m2.targets_original)):\n # start = m2.path[-2] if m2.path else m2.start\n # # debug('++++' * 16, 'mission', ti, '/', len(m2.targets_original))\n # # debug(m2.targets_original[ti])\n # self.optimal_path(m2, start, sp)\n # if not (ti + 1) == len(m2.targets_original):\n # m2.path.append(u'*')\n # m2.reset()\n # # pdb.set_trace()\n #\n # # if len(m1.path) < len(m2.path):\n # # print(len(m1.path), len(m2.path))\n # # pdb.set_trace()\n # # m1.compute_stats()\n # # m2.compute_stats()\n # # if m1.stats[-1] > m2.stats[-1]:\n # # print(m1.stats)\n # # print(m2.stats)\n # # pdb.set_trace()\n #\n # print('MISSION COLLECTION DONE')\n # mc1.compute_stats()\n # mc2.compute_stats()\n # print(mc1.stats[-1], mc2.stats[-1])\n # pdb.set_trace()\n\n # fname_5 = u'../data/bookcrossing/graphs/rbar_5.gt'\n # fname_20 = u'../data/bookcrossing/graphs/rbar_20.gt'\n # sp_file_5 = fname_5.rsplit('.', 1)[0] + '.npy'\n # sp_file_20 = fname_20.rsplit('.', 1)[0] + '.npy'\n # with open(sp_file_5, 'rb') as infile:\n # sp_5 = pickle.load(infile)\n # with open(sp_file_20, 'rb') as infile:\n # sp_20 = pickle.load(infile)\n # sc = 'Berrypicking'\n # mc_5 = self.data_set.missions['rbar'][fname_5]['optimal'][sc]\n # mc_52 = self.data_set.missions['rbar'][fname_5]['title'][sc]\n # mc_20 = self.data_set.missions['rbar'][fname_20]['optimal'][sc]\n # mc_202 = self.data_set.missions['rbar'][fname_20]['title'][sc]\n # for m5, m20, m52, m202 in zip(\n # mc_5,\n # mc_20,\n # mc_52,\n # mc_202\n # ):\n # # evaluate 5 with optimal strategy\n # for ti in xrange(len(m5.targets_original)):\n # start = m5.path[-2] if m5.path else m5.start\n # self.optimal_path(m5, start, sp_5)\n # if not (ti + 1) == len(m5.targets_original):\n # m5.path.append(u'*')\n # m5.reset()\n #\n # # evaluate 20 with optimal strategy\n # for ti in xrange(len(m20.targets_original)):\n # start = m20.path[-2] if m20.path else m20.start\n # self.optimal_path(m20, start, sp_20)\n # if not (ti + 1) == len(m20.targets_original):\n # m20.path.append(u'*')\n # m20.reset()\n #\n # # if len(m5.path) < len(m20.path) or \\\n # if m5.path.count('*') > m20.path.count('*'):\n # print(len(m5.path))\n # for part in ' '.join(m5.path[2:]).split('*'):\n # print(' ', part)\n # print(len(m20.path))\n # for part in ' '.join(m20.path[2:]).split('*'):\n # print(' ', part)\n # pdb.set_trace()\n #\n # print('MISSION COLLECTION DONE')\n # mc_5.compute_stats()\n # mc_20.compute_stats()\n # print(mc_5.stats[-1], mc_20.stats[-1])\n #\n # for m5, m20 in zip(mc_5.missions, mc_20.missions):\n # if m5.stats[-1] > m20.stats[-1]:\n # print(m5.stats)\n # print(m20.stats)\n # pdb.set_trace()\n # pdb.set_trace()\n\n # write the results to a file\n # self.write_paths()\n self.save()", "def runAllGLMS(self):\n\t\tfor condition in ['WMM']:\n\t\t\tfor run in self.conditionDict[condition]:\n\t\t\t\t\n\t\t\t\t# remove previous feat directories\n\t\t\t\ttry:\n\t\t\t\t\tself.logger.debug('rm -rf ' + self.runFile(stage = 'processed/mri', run = self.runList[run], postFix = ['mcf', 'sgtf'], extension = '.feat'))\n\t\t\t\t\tos.system('rm -rf ' + self.runFile(stage = 'processed/mri', run = self.runList[run], postFix = ['mcf', 'sgtf'], extension = '.feat'))\n\t\t\t\t\tos.system('rm -rf ' + self.runFile(stage = 'processed/mri', run = self.runList[run], postFix = ['mcf', 'sgtf'], extension = '.fsf'))\n\t\t\t\texcept OSError:\n\t\t\t\t\tpass\n\t\t\t\t\n\t\t\t\t# this is where we start up fsl feat analysis after creating the feat .fsf file and the like\n\t\t\t\tthisFeatFile = '/home/moorselaar/WMM_PRF/analysis/analysis.fsf'\n\t\t\t\tREDict = {\n\t\t\t\t#'---OUTPUT_DIR---':self.runFile(stage = 'processed/mri', run = r, postFix = ['mcf', 'sgtf']),\n\t\t\t\t'---NR_TRS---':str(NiftiImage(self.runFile(stage = 'processed/mri', run = self.runList[run], postFix = ['mcf', 'sgtf'])).timepoints),\n\t\t\t\t'---FUNC_FILE---':self.runFile(stage = 'processed/mri', run = self.runList[run], postFix = ['mcf', 'sgtf']), \n\t\t\t\t'---CONFOUND_EV---':self.runFile(stage = 'processed/mri', run = self.runList[run], postFix = ['mcf'], extension='.par'), \n\t\t\t\t# '---ANAT_FILE---':os.path.join(os.environ['SUBJECTS_DIR'], self.subject.standardFSID, 'mri', 'bet', 'T1_bet' ), \n\t\t\t\t'---STIM_FILE---':self.runFile(stage = 'processed/behavior', run = self.runList[run], postFix = ['stim_all'], extension='.txt'),\n\t\t\t\t'---RESPONSE_FILE---':self.runFile(stage = 'processed/behavior', run = self.runList[run], postFix = ['resp_all'], extension='.txt'),\n\t\t\t\t'---PPU_FILE---':self.runFile(stage = 'processed/hr', run = self.runList[run], postFix = ['ppu'], extension='.txt'),\n\t\t\t\t'---PPU_R_FILE---':self.runFile(stage = 'processed/hr', run = self.runList[run], postFix = ['ppu','raw'], extension='.txt'),\n\t\t\t\t'---RESP_FILE---':self.runFile(stage = 'processed/hr', run = self.runList[run], postFix = ['resp'], extension='.txt'),\n\t\t\t\t'---RESP_R_FILE---':self.runFile(stage = 'processed/hr', run = self.runList[run], postFix = ['resp','raw'], extension='.txt')\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tfeatFileName = self.runFile(stage = 'processed/mri', run = self.runList[run], extension = '.fsf')\n\t\t\t\tfeatOp = FEATOperator(inputObject = thisFeatFile)\n\t\t\t\t# no need to wait for execute because we're running the mappers after this sequence - need (more than) 8 processors for this, though.\n\t\t\t\tif self.runList[run] == [self.runList[i] for i in self.conditionDict['WMM']][-1]:\n\t\t\t\t\tfeatOp.configure( REDict = REDict, featFileName = featFileName, waitForExecute = True )\n\t\t\t\telse:\n\t\t\t\t\tfeatOp.configure( REDict = REDict, featFileName = featFileName, waitForExecute = False )\n\t\t\t\tself.logger.debug('Running feat from ' + thisFeatFile + ' as ' + featFileName)\n\t\t\t\t# run feat\n\t\t\t\tfeatOp.execute()", "def crawler(self):\n\n\t\tfor page in range(self.first_page, self.last_page+1):\n\t\t\tprint(\"\\nCrawling Page \" + str(page))\n\t\t\tpage_url = self.site_url + \"?page=\" + str(page) +\\\n\t\t\t \"&index=prod_all_products_term_optimization\"\n\t\t\t\n\t\t\tself.scrape_features(page_url)", "def launch_evaluations(self):\n self.report('Launching pending evaluations.')\n with self.optimizer() as opt:\n evals = {}\n evaluate_process = load_object(self.inputs.evaluate_process.value)\n for idx, inputs in opt.create_inputs().items():\n self.report('Launching evaluation {}'.format(idx))\n inputs_merged = ChainMap(inputs, self.inputs.get('evaluate', {}))\n if is_process_function(evaluate_process):\n _, node = run_get_node(evaluate_process, **inputs_merged)\n else:\n node = self.submit(evaluate_process, **inputs_merged)\n evals[self.eval_key(idx)] = node\n self.indices_to_retrieve.append(idx)\n return self.to_context(**evals)", "def apply_rules(self):\n if len(self.rules) == 0:\n return\n for gene in self.population:\n for rule in self.rules:\n if rule.type == \"gene\":\n rule.check_and_apply(gene)", "def learner(self):\n for N in range(self.N_learn):\n trajectories = self.sample_trajectories()\n\n # TODO: Both these methods take the full trajectories at the moment, a speedup could be achieved here\n self.qmodel.train(trajectories)\n self.amodel.train(trajectories)", "def train(self):\n\n if(self.net.killAll):\n self._kill()\n\n empty = False\n state = []\n actions = []\n rewards = []\n while(not empty):\n example = self.globalQueue.get()\n \n for prevState, action, reward in zip(example['prevStates'], example['actions'],example['rewards']):\n state.append(np.array(prevState).reshape(-1,84,84,4))\n actions.append(np.eye(self.actionSpace)[np.array(action)].reshape(-1,self.actionSpace).astype(np.float32))\n rewards.append(np.array(reward).reshape(-1))\n empty = self.globalQueue.empty()\n \n if(len(rewards) != 0 ):\n states = np.array(state).reshape(-1, 84,84,4)\n actions = np.array(actions).reshape(-1,self.actionSpace)\n rewards = np.array(rewards).reshape(-1)\n self.net.train(states, rewards, actions)", "def train_random_forest():\n train_model(RandomForestRegressor(max_depth=4, random_state=42),\n dataset_file_name=RANDOM_FOREST_DEFAULT_DATASET,\n model_file_name=RANDOM_FOREST_DEFAULT_MODEL)", "def apply(self, decision_graph: DecisionGraph) -> None:\n #takes decision graph and applies them to processors\n self.apply_random(False)\n logging.debug(\"application1:\", self.application)\n logging.debug(\"connections1:\", self.transfer)\n\n Procedures.instance.set_application(self.application)\n\n all_tasks = self.task_graph.nodes\n all_connections = [i for sub in self.transfer for i in sub if i]\n for n in decision_graph.DFS():\n #pick tasks for nodes keeping in mind propabilities and graph structure\n node = decision_graph.find_node(n)\n parent = decision_graph.find_parents(n)\n logging.debug(node, parent)\n\n if parent:\n #If node has parent use it's tasks to pick from\n parent = decision_graph.find_node(parent[0])\n picked_tasks = random.sample(parent.tasks, k=round(node.propability * len(parent.tasks)))\n assert len(picked_tasks) <= len(parent.tasks)\n picked_connections = random.sample(parent.connections, k=round(node.propability * len(parent.connections)))\n assert len(picked_connections) <= len(parent.connections)\n else:\n #If node has no parents pick random elements from all tasks according to propability value\n picked_tasks = random.sample(all_tasks, k=round(node.propability * len(all_tasks)))\n picked_connections = random.sample(all_connections, k=round(node.propability * len(all_connections)))\n\n node.tasks = picked_tasks\n node.connections = picked_connections\n\n #pick tasks according to operation (strategy) in this node\n for task in picked_tasks:\n self.move_task(task.label, node.task_strategy(task.label))\n\n #pick connection according to comm (strategy) in this node\n for conn in picked_connections:\n picked = node.comm_strategy(self.transfer)\n conn = self.alter_connection(conn, picked)\n\n logging.debug(\"application2:\", self.application)\n logging.debug(\"connections2:\", self.transfer)\n\n self.sort_tasks_with_critical_order()\n return self.simulate()", "def evaluate_ucf50_fusion():\n accs = np.zeros(3)\n ob_suffix = '-max.feat.npy.gz'\n fv_suffix = '_fv.npy.gz'\n ob_root = '/home/syq/research_final/data/features/ob_ucf50_pooled_python/'\n fv_root = '/home/syq/research_final/data/features/fv_ucf50_python/'\n fv_groups, full, sets = utility.split_data(fv_root,\n suffix=fv_suffix,\n useLooCV=False)\n\n ob_groups, _, _ = utility.split_data(ob_root,\n suffix=ob_suffix,\n useLooCV=False)\n weights = [i / 20.0 for i in range(8, 13)]\n acc_to_weights = {}\n\n for weight in weights:\n print \"Weight: %.2f\" % weight\n accs = np.zeros(2)\n for i in xrange(2):\n ts = time.time()\n Dtrain_fv, Dtest_fv, Ytrain, Ytest = utility.load_groups(\n fv_groups, np.setdiff1d(full, sets[i]),\n sets[i], scale=False, verbose=False)\n\n Dtrain_ob, Dtest_ob, Ytrain, Ytest = utility.load_groups(\n ob_groups, np.setdiff1d(full, sets[i]),\n sets[i], scale=False, verbose=False)\n\n fv_clf = OneVsRestClassifier(estimator=LinearSVC(C=100), n_jobs=8)\n\n ob_clf = OneVsRestClassifier(estimator=SVC(C=10,\n cache_size=1000,\n kernel='linear',\n probability=True),\n n_jobs=-1)\n\n # Get probabilities for late fusion\n Dtrain_fv = fv_clf.fit(Dtrain_fv, Ytrain).decision_function(Dtrain_fv)\n Dtrain_ob = ob_clf.fit(Dtrain_ob, Ytrain).decision_function(Dtrain_ob)\n Dtest_fv = fv_clf.decision_function(Dtest_fv)\n Dtest_ob = ob_clf.decision_function(Dtest_ob)\n\n # Scale decision values b/w 0 and 1\n Dtrain_fv = preprocessing.normalize(Dtrain_fv)\n Dtrain_ob = preprocessing.normalize(Dtrain_ob)\n Dtest_fv = preprocessing.normalize(Dtest_fv)\n Dtest_ob = preprocessing.normalize(Dtest_ob)\n\n # weighted averaging\n scores_train = (Dtrain_fv * weight) + (Dtrain_ob * (1 - weight))\n latefusion_clf = OneVsRestClassifier(estimator=LinearSVC(C=100), n_jobs=-1)\n latefusion_clf.fit(scores_train, Ytrain)\n\n scores_test = (Dtest_fv * weight) + (Dtest_ob * (1 - weight))\n latefusion_acc = latefusion_clf.score(scores_test, Ytest)\n print 'Fold', i, 'late fusion acc', latefusion_acc\n print \"Train & testing time %.3f\" % (time.time() - ts)\n accs[i] = latefusion_acc\n\n print \"Mean accuracy: %.3f\" % accs.mean()\n with open(\"fv_ucf50_accs_5fold_gridsearch.txt\", \"w\") as f:\n for weight, accs in acc_to_weights.items():\n f.write(str(weight) + str(accs) + '\\n')\n return acc_to_weights", "def runner(self):\n\n print('[ INFO ]: Initializing the abalone program runner...')\n\n df, features, predictor, classes = self.preprocess()\n\n df = alg.random_feature_sample(self, df, 0.10)\n\n # Set up the training, testing and validation sets\n split = round(len(df) * 0.10)\n v_set = df[df.index < split]\n t_set = df[df.index >= split]\n\n tree = alg()\n folds_dict = tree.cross_validation(t_set, predictor, type='classification', folds=5)\n\n # Initialize comparion values\n best_fold_tree = None\n best_fold_score = 0\n best_fold_pred_labels = None\n best_fold_df = None\n\n # Loop through each fold in the folds dictionary\n for fold in folds_dict:\n\n test_set = folds_dict[fold]\n train_set = pd.DataFrame()\n for inner_fold in folds_dict:\n if inner_fold != fold:\n train_set = train_set.append(folds_dict[inner_fold], ignore_index=True)\n\n # Build an ID3 tree\n root = tree.build_tree(train_set, features, predictor)\n df, labels, pred_labels, score = tree.test(test_set, features, predictor, root)\n\n # Determine which tree is the best\n if score > best_fold_score:\n best_fold_tree = root\n best_fold_score = score\n best_fold_pred_labels = pred_labels\n best_fold_df = df\n\n # Validate results and prune the ID3 tree\n v_tree = alg()\n df, labels, pred_labels, score = v_tree.test(v_set, features, predictor, best_fold_tree)\n prune_root = v_tree.prune(df, predictor, best_fold_tree)\n prune_df, prune_labels, prune_pred_labels, prune_score = v_tree.test(v_set, features, predictor, prune_root)\n\n return best_fold_tree, score, labels, pred_labels, prune_root, prune_score, prune_labels, prune_pred_labels", "def run_experiment(self):\n\n start_time = time.time()\n\n strategy_instance = None\n if (self.strategy == 'ccegp'):\n strategy_instance = CCEGPStrategy(self)\n else:\n print('strategy unknown:', self.strategy)\n sys.exit(1)\n\n # For each run...\n for curr_run in range(1, self.num_runs_per_experiment + 1):\n\n # Update log\n self.curr_run = curr_run\n print('\\nRun', curr_run)\n self.log_file.write('\\nRun ' + str(curr_run) + '\\n')\n\n # Execute one run and get best values.\n attacker_run_high_fitness, attacker_run_best_world_data, attacker_run_best_solution, \\\n defender_run_high_fitness, defender_run_best_solution, attacker_dot, defender_dot \\\n = strategy_instance.execute_one_run()\n\n print('\\nBest attacker tree of run:\\n' + attacker_run_best_solution)\n if (self.print_dots):\n print('\\nBest attacker dot of run:\\n' + str(attacker_dot))\n print('\\nBest defender tree of run:\\n' + defender_run_best_solution)\n if (self.print_dots):\n print('\\nBest defender dot of run:\\n' + str(defender_dot))\n\n # If best of run is best overall, update appropriate values\n if (self.strategy != 'ccegp'):\n if (attacker_run_high_fitness > self.attacker_exp_high_fitness):\n self.attacker_exp_high_fitness = attacker_run_high_fitness\n print('New exp Attacker high fitness: ', self.attacker_exp_high_fitness)\n self.attacker_exp_best_world_data = attacker_run_best_world_data\n self.attacker_exp_best_solution = attacker_run_best_solution\n self.attacker_exp_best_dot = attacker_dot\n # If Competitive Co-evolution, add fitnesses (use Attacker to store most data)\n else:\n if ((attacker_run_high_fitness + defender_run_high_fitness) > self.attacker_exp_high_fitness):\n self.attacker_exp_high_fitness = (attacker_run_high_fitness + defender_run_high_fitness)\n print('New exp Attacker+Defender high fitness: ', self.attacker_exp_high_fitness)\n self.attacker_exp_best_world_data = attacker_run_best_world_data\n self.attacker_exp_best_solution = attacker_run_best_solution\n self.defender_exp_best_solution = defender_run_best_solution\n self.attacker_exp_best_dot = attacker_dot\n self.defender_exp_best_dot = defender_dot\n\n # Dump best world to file\n the_file = open(self.high_score_world_file_path, 'w')\n for line in self.attacker_exp_best_world_data:\n the_file.write(line)\n the_file.close()\n\n # Dump best Attacker solution (text) to file\n the_file = open(self.attacker_solution_file_path, 'w')\n the_file.write(self.attacker_exp_best_solution)\n the_file.close()\n\n # Dump best Defender solution (text) to file\n if (self.strategy == 'ccegp'):\n the_file = open(self.defender_solution_file_path, 'w')\n the_file.write(self.defender_exp_best_solution)\n the_file.close()\n\n # Dump best Attacker solution (dot) to file\n the_file = open(self.attacker_solution_dot_path, 'w')\n the_file.write(str(self.attacker_exp_best_dot))\n the_file.close()\n\n # Dump best Defender solution (dot) to file\n if (self.strategy == 'ccegp'):\n the_file = open(self.defender_solution_dot_path, 'w')\n the_file.write(str(self.defender_exp_best_dot))\n the_file.close()\n\n # Dump and display best Attacker solution\n if (self.render_solutions):\n self.attacker_exp_best_dot.render(filename=self.attacker_solution_png_path,\n view=self.attacker_open_png,\n format='png')\n\n # Dump and display best Defender solution\n if (self.render_solutions and self.strategy == 'ccegp'):\n self.defender_exp_best_dot.render(filename=self.defender_solution_png_path,\n view=self.defender_open_png,\n format='png')\n\n # Close out the log file\n if (not(self.log_file is None)):\n self.log_file.close()\n\n print(time.time() - start_time, 'seconds')", "def main():\n args = parameter_parser()\n tab_printer(args)\n trainer = GPNTrainer(args)\n # trainer.fit()\n \"\"\"\n Scoring on the prediction and learning ability.\n \"\"\"\n trainer.score()\n \"\"\"\n Scoring on the subgraph test set.\n \"\"\"\n # trainer.score2()\n \"\"\"\n Scoring on the generalization ability.\n \"\"\"\n # trainer.score3()\n \"\"\"\n Finetuning for downstream tasks.\n \"\"\"\n # model = finetune_GPN(args, trainer.number_of_labels)\n # model.finetune()", "def compute_all(self) -> None:\n self.compute_j_matrix()\n self.compute_outter_distribution()\n self.compute_max_prior()\n self.compute_max_poutter()", "def start(self):\n for trial in self._trials:\n self._run(trial)" ]
[ "0.66012484", "0.62504995", "0.61932045", "0.6135642", "0.6134355", "0.61263996", "0.6049594", "0.5867963", "0.5865974", "0.5832648", "0.581788", "0.5802029", "0.57808244", "0.5779419", "0.5708533", "0.56901664", "0.5634386", "0.56181246", "0.558044", "0.553923", "0.55354095", "0.55076337", "0.5501047", "0.546172", "0.54554665", "0.5449654", "0.54465944", "0.5430582", "0.54302466", "0.5425708", "0.54240423", "0.54049075", "0.53903145", "0.53903145", "0.53879577", "0.5372477", "0.537163", "0.5371445", "0.536546", "0.5364046", "0.53615", "0.5349199", "0.5348576", "0.5347975", "0.5347421", "0.53442204", "0.53416", "0.5340558", "0.533911", "0.5332747", "0.53245175", "0.5317703", "0.53116584", "0.5308252", "0.529818", "0.5285249", "0.52834314", "0.5280737", "0.5258904", "0.525514", "0.5253242", "0.5235794", "0.5225982", "0.52235615", "0.52227396", "0.5217332", "0.52070844", "0.5204109", "0.52016765", "0.5187104", "0.5180868", "0.5170577", "0.5167422", "0.5165838", "0.5165298", "0.5158282", "0.51581365", "0.51535106", "0.51534796", "0.51476747", "0.5146464", "0.51380193", "0.513666", "0.5131762", "0.51260483", "0.5123776", "0.51139426", "0.5107123", "0.510546", "0.5100614", "0.5097922", "0.5097633", "0.50936204", "0.50892085", "0.5084416", "0.5073824", "0.50726", "0.5072061", "0.5070364", "0.5070096" ]
0.7236054
0
Expects a list of signals and a list of bkgs (Dataset objects), and a cut_function and cut_values.
def roccurve(signals, bkgs, cut_function, cut_values): eff_sig, n_pass_sig, n_total_sig = get_eff(svjflatanalysis.iterate(signals), cut_function, cut_values) eff_bkg, n_pass_bkg, n_total_bkg = get_eff(svjflatanalysis.iterate(bkgs), cut_function, cut_values) return eff_sig, eff_bkg, n_pass_sig, n_pass_bkg, n_total_sig, n_total_bkg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cuts(data, args, verbose):\n\n if args['experiment']['cut_finding'] == CutFinding.features:\n\n values = (data.xs == True).T\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.binning:\n\n values, names = binning(xs=data.xs,\n range_answers=args['cut_finding']['range_answers'],\n n_bins=args['cut_finding']['n_bins'])\n return Cuts(values=values, names=names)\n\n if args['experiment']['cut_finding'] == CutFinding.Kernighan_Lin:\n\n values = kernighan_lin(A=data.A,\n nb_cuts=args['cut_finding']['nb_cuts'],\n lb_f=args['cut_finding']['lb_f'],\n seed=args['experiment']['seed'],\n verbose=verbose)\n values = np.unique(values, axis=0)\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.kmodes:\n\n values = find_kmodes_cuts(xs=data.xs,\n max_nb_clusters=args['cut_finding']['max_nb_clusters'])\n values = np.unique(values, axis=0)\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.Fiduccia_Mattheyses:\n\n values = fid_mat(xs=data.A,\n nb_cuts=args['cut_finding']['nb_cuts'],\n lb_f=args['cut_finding']['lb_f'],\n seed=args['experiment']['seed'],\n verbose=verbose)\n values = np.unique(values, axis=0)\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.linear:\n\n values, equations = linear_cuts(xs=data.xs,\n equations=args['cut_finding']['equations'],\n verbose=verbose)\n\n return Cuts(values=values, equations=equations)\n\n raise ValueError('Wrong name for a cut finding function')", "def get_events_passing_cuts(bolo_name, WIMP_mass, d_cut, analysis_type, MVA_tag, bin_X, min_X, max_X, list_variables, **kwargs): \n\n try:\n kwargs[\"weight_dir\"]\n except KeyError:\n sys.exit()\n\n #Get heat _fraction\n heat_fraction = kwargs[\"classifier_name\"][13:]\n\n #Get scaling dict to set the weights\n d_scaling = BDT_fh.open_MVA_scaling_file(bolo_name, analysis_type, MVA_tag)\n\n d_event_dir = {\"S1Pb\":\"Beta_and_Pb\", \"S2Pb\":\"Beta_and_Pb\", \"S1Beta\":\"Beta_and_Pb\", \"S2Beta\":\"Beta_and_Pb\",\n \"S1Gamma\":\"Gamma\", \"S2Gamma\":\"Gamma\", \"FidGamma\":\"Gamma\", \n \"heatonly_heat_fraction\" + heat_fraction: \"Heatonly\", \"WIMP_mass_\" + str(WIMP_mass): \"WIMP\"}\n key_heat = \"heatonly_heat_fraction\" + heat_fraction\n\n #Load data\n d_test = dp.get_data_array(bolo_name, 1, analysis_type, MVA_tag, d_event_dir.keys(), 1, list_variables, datasplit = 1)\n\n # Get classifier\n model_dir = script_utils.create_directory(\"../../Classifier_files/\" + bolo_name + \"/\" + analysis_type + \"/\"+ kwargs[\"weight_dir\"] + \"/\") \n if kwargs.has_key(\"classifier_name\"):\n modelfile = model_dir + \"xgboost_classifier_mass_\" + str(WIMP_mass) + \"_\" + kwargs[\"classifier_name\"] + \".model\"\n bst = xgb.Booster({'nthread':16}, model_file = modelfile)\n\n #Get predictions on test sample\n d_pred = {}\n d_hist = {}\n d_color = {\"S1Pb\":kOrange-8, \"S2Pb\":kOrange-9, \"S1Beta\":kGreen+2, \"S2Beta\":kGreen-3,\n \"S1Gamma\":kBlue-7, \"S2Gamma\":kBlue, \"FidGamma\":kAzure+10, key_heat: kRed, \"WIMP_mass_\" + str(WIMP_mass):kGray, \"neutron\":kMagenta}\n\n #ROOT out_dir \n root_dir = script_utils.create_directory(\"./ROOT_files/\" + bolo_name + \"/\" + analysis_type + \"/\")\n file_root = TFile(root_dir + bolo_name + \"_sensi_eff_curves_heat_fraction\" + heat_fraction + \"_mass_\" + str(WIMP_mass) + \".root\", \"read\")\n\n #Write events that pass cut to a file \n txt_dir = script_utils.create_directory(\"./Text_files/Simulated_sensitivity/\")\n with open(txt_dir + \"/simulated_events_passing_cut_heat_fraction\" + heat_fraction + \"_mass_\" + str(WIMP_mass) + \".txt\", \"w\") as fout:\n\n fout.write(\"heat_fraction,exposure,num_events_passing_cut\\n\")\n\n #Loop over possible exposure values\n for exposure in [10, 50, 100, 500]:\n script_utils.print_utility(\"Getting events passing cut for exposure of \" + str(exposure) + \" mass of \" + str(WIMP_mass))\n for event_type in d_test.keys():\n d_pred[event_type] = bst.predict( xgb.DMatrix(d_test[event_type].iloc[:,:-3].values) )\n d_hist[event_type] = TH1F(\"h\" + event_type + str(exposure), \"h\" + event_type + str(exposure), bin_X, min_X, max_X)\n PyRPl.fill_TH1(d_hist[event_type], d_pred[event_type])\n PyRPl.process_TH1(d_hist[event_type], use_fill_bool = True, color = d_color[event_type] )\n if \"WIMP\" not in event_type:\n d_hist[event_type].Scale(float(d_scaling[\"prop_\" + event_type])*float(d_scaling[\"exp_per_day\"])*exposure/float(d_hist[event_type].Integral()))\n else:\n d_hist[\"WIMP_mass_\" + str(WIMP_mass)].Scale(1./d_hist[\"WIMP_mass_\" + str(WIMP_mass)].Integral())\n\n list_hist_bckg =[d_hist[\"S1Pb\"], d_hist[\"S2Pb\"], d_hist[\"S1Beta\"], d_hist[\"S2Beta\"], d_hist[\"S1Gamma\"], d_hist[\"S2Gamma\"], d_hist[\"FidGamma\"], d_hist[key_heat]]\n\n hsum_bckg=TH1F(\"hsum_bckg\" + str(exposure),\"hsum_bckg\" + str(exposure), bin_X, min_X, max_X)\n for i in range(1,bin_X+1):\n sumcontent = sum([h.GetBinContent(i) for h in list_hist_bckg])\n hsum_bckg.SetBinContent(i, sumcontent)\n\n fsensi = file_root.Get(\"sensitivity_expo_\" + str(exposure))\n cut_val = fsensi.GetMinimumX(2,10)\n\n #Run Poisson simulations\n list_event_pass_cut=[]\n for nsimu in range(100):\n hdatasimu = TH1F(\"hdatasimu\",\"hdatasimu\", bin_X, min_X, max_X)\n for i in range(1,bin_X+1):\n hdatasimu.SetBinContent(i, np.random.poisson(hsum_bckg.GetBinContent(i)))\n bin_cut = hdatasimu.FindBin(cut_val)\n num_entry_cut = int(hdatasimu.Integral(bin_cut, max_X))\n list_event_pass_cut.append(str(num_entry_cut))\n del hdatasimu\n fout.write(heat_fraction[1:] + \",\" + str(exposure) + \",\" + \",\".join(list_event_pass_cut) + \"\\n\")", "def cut_eval(self, hits, *args):\n end = self.start_offset + self.train_window + self.predict_window\n return self.cut(hits, self.start_offset, end) + args", "def plot_roccurves_per_bkg(signals, bkgs, cut_function, cut_values, ax=None):\n # Get a default ax if none is given\n if ax is None:\n import matplotlib.pyplot as plt\n fig = plt.figure(figsize=(8,8))\n ax = fig.gca()\n # Get signal efficieny once\n eff_sig, n_pass_sig, n_total_sig = get_eff(svjflatanalysis.iterate(signals), cut_function, cut_values)\n # Perform some basic plotting setup\n ax.plot([0.0,1.0], [0.0,1.0], linestyle='--', color='xkcd:gray')\n ax.set_xlim(0.0, 1.05)\n ax.set_ylim(0.0, 1.05)\n ax.set_ylabel('Signal eff.', fontsize=DEFAULT_FONTSIZE)\n ax.set_xlabel('Bkg eff.', fontsize=DEFAULT_FONTSIZE)\n ax.legend(fontsize=DEFAULT_FONTSIZE)\n # Then efficiencies per bkg category (ttjets, qcd, ...)\n bkg_categories = list(set([ b.get_category() for b in bkgs ]))\n bkg_categories.sort()\n lines = {}\n for bkg_cat in bkg_categories:\n # Get Datasets that have this category\n bkgs_this_cat = [ b for b in bkgs if b.get_category() == bkg_cat ]\n # Compute efficiency in this category\n eff_bkg, n_pass_bkg, n_total_bkg = get_eff(svjflatanalysis.iterate(bkgs_this_cat), cut_function, cut_values)\n # Draw roccurve for this category\n line = _draw_roccurve(eff_sig, eff_bkg, cut_values, ax)\n line.set_label(bkg_cat)\n # Save this line in a dict for potential outputting/modifying\n lines[bkg_cat] = line\n return ax", "def get_fidcuts():\n return combine_cuts([fid_cuts('muN_pt', 'muN_eta'),\n fid_cuts('muP_pt', 'muP_eta')])", "def bessel_bandpass_filter(data, lowcut, highcut, fs, order=2):\n\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n\n # bessel() and lfilter() are from scipy.signal\n\n b, a = bessel(order, [low, high], btype='band')\n y = lfilter(b, a, data)\n return y", "def cutflow(self, *names):\n for cut in names:\n if not isinstance(cut, str) or cut not in self._names:\n raise ValueError(\n \"All arguments must be strings that refer to the names of existing selections\"\n )\n\n masksonecut, maskscutflow = [], []\n for i, cut in enumerate(names):\n mask1 = self.any(cut)\n mask2 = self.all(*(names[: i + 1]))\n masksonecut.append(mask1)\n maskscutflow.append(mask2)\n\n if not self.delayed_mode:\n nevonecut = [len(self._data)]\n nevcutflow = [len(self._data)]\n nevonecut.extend(numpy.sum(masksonecut, axis=1))\n nevcutflow.extend(numpy.sum(maskscutflow, axis=1))\n\n else:\n nevonecut = [dask_awkward.count(self._data, axis=0)]\n nevcutflow = [dask_awkward.count(self._data, axis=0)]\n nevonecut.extend([dask_awkward.sum(mask1) for mask1 in masksonecut])\n nevcutflow.extend([dask_awkward.sum(mask2) for mask2 in maskscutflow])\n\n return Cutflow(\n names, nevonecut, nevcutflow, masksonecut, maskscutflow, self.delayed_mode\n )", "def apply_cuts(objects):\n #- Check if objects is a filename instead of the actual data\n if isinstance(objects, (str, unicode)):\n objects = io.read_tractor(objects)\n \n #- undo Milky Way extinction\n flux = unextinct_fluxes(objects)\n gflux = flux['GFLUX']\n rflux = flux['RFLUX']\n zflux = flux['ZFLUX']\n w1flux = flux['W1FLUX']\n wflux = flux['WFLUX']\n \n #- DR1 has targets off the edge of the brick; trim to just this brick\n if 'BRICK_PRIMARY' in objects.dtype.names:\n primary = objects['BRICK_PRIMARY']\n else:\n primary = np.ones(len(objects), dtype=bool)\n \n #----- LRG\n lrg = primary.copy()\n lrg &= rflux > 10**((22.5-23.0)/2.5)\n lrg &= zflux > 10**((22.5-20.56)/2.5)\n lrg &= w1flux > 10**((22.5-19.35)/2.5)\n lrg &= zflux > rflux * 10**(1.6/2.5)\n #- clip to avoid warnings from negative numbers raised to fractional powers\n lrg &= w1flux * rflux.clip(0)**(1.33-1) > zflux.clip(0)**1.33 * 10**(-0.33/2.5)\n\n #----- ELG\n elg = primary.copy()\n elg &= rflux > 10**((22.5-23.4)/2.5)\n elg &= zflux > rflux * 10**(0.3/2.5)\n elg &= zflux < rflux * 10**(1.5/2.5)\n elg &= rflux**2 < gflux * zflux * 10**(-0.2/2.5)\n elg &= zflux < gflux * 10**(1.2/2.5)\n\n #----- Quasars\n psflike = ((objects['TYPE'] == 'PSF') | (objects['TYPE'] == 'PSF ')) \n qso = primary.copy()\n qso &= psflike\n qso &= rflux > 10**((22.5-23.0)/2.5)\n qso &= rflux < gflux * 10**(1.0/2.5)\n qso &= zflux > rflux * 10**(-0.3/2.5)\n qso &= zflux < rflux * 10**(1.1/2.5)\n #- clip to avoid warnings from negative numbers raised to fractional powers\n qso &= wflux * gflux.clip(0)**1.2 > rflux.clip(0)**(1+1.2) * 10**(-0.4/2.5)\n ### qso &= wflux * gflux**1.2 > rflux**(1+1.2) * 10**(2/2.5)\n\n #------ Bright Galaxy Survey\n #- 'PSF' for astropy.io.fits; 'PSF ' for fitsio (sigh)\n bgs = primary.copy()\n bgs &= ~psflike\n bgs &= rflux > 10**((22.5-19.35)/2.5)\n\n #----- Standard stars\n fstd = primary.copy()\n fstd &= psflike\n fracflux = objects['DECAM_FRACFLUX'].T \n signal2noise = objects['DECAM_FLUX'] * np.sqrt(objects['DECAM_FLUX_IVAR'])\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n for j in (1,2,4): #- g, r, z\n fstd &= fracflux[j] < 0.04\n fstd &= signal2noise[:, j] > 10\n\n #- observed flux; no Milky Way extinction\n obs_rflux = objects['DECAM_FLUX'][:, 2]\n fstd &= obs_rflux < 10**((22.5-16.0)/2.5)\n fstd &= obs_rflux > 10**((22.5-19.0)/2.5)\n #- colors near BD+17; ignore warnings about flux<=0\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n grcolor = 2.5 * np.log10(rflux / gflux)\n rzcolor = 2.5 * np.log10(zflux / rflux)\n fstd &= (grcolor - 0.32)**2 + (rzcolor - 0.13)**2 < 0.06**2\n\n #-----\n #- construct the targetflag bits\n #- Currently our only cuts are DECam based (i.e. South)\n desi_target = lrg * desi_mask.LRG_SOUTH\n desi_target |= elg * desi_mask.ELG_SOUTH\n desi_target |= qso * desi_mask.QSO_SOUTH\n\n desi_target |= lrg * desi_mask.LRG\n desi_target |= elg * desi_mask.ELG\n desi_target |= qso * desi_mask.QSO\n\n desi_target |= fstd * desi_mask.STD_FSTAR\n \n bgs_target = bgs * bgs_mask.BGS_BRIGHT\n bgs_target |= bgs * bgs_mask.BGS_BRIGHT_SOUTH\n\n #- nothing for MWS yet; will be GAIA-based\n mws_target = np.zeros_like(bgs_target)\n\n #- Are any BGS or MWS bit set? Tell desi_target too.\n desi_target |= (bgs_target != 0) * desi_mask.BGS_ANY\n desi_target |= (mws_target != 0) * desi_mask.MWS_ANY\n\n return desi_target, bgs_target, mws_target", "def filt_bp(sig: np.ndarray, Ss: int, Cfs0: int, Cfs1: None,\n order=5) -> np.ndarray:\n nyq = 0.5 * Ss\n normal_cutoff1 = Cfs0 / nyq\n normal_cutoff2 = Cfs1 / nyq\n b, a = butter(order, (normal_cutoff1, normal_cutoff2),\n btype='band',\n analog=False)\n return lfilter(b, a, sig)", "def butter_bp_coe(lowcut, highcut, fs, order=1):\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n b, a = butter(order, [low, high], btype='band')\n return b, a", "def ANN_binned_tagged_jets_hist(datalist, model, discriminant_cuts, CSV_cuts, bins, nbins, mode=\"pT_jet\",Save=False,addFeature=False):\n title = \"binned_tagged_jets_vs_\"+mode\n\tdiscriminant = \"ANN\"\n AllJetsHistlist = []\n CSVHistlist = []\n DiscriminantHistlist = []\n if mode == \"pT_hadron\":\n feature = 2\n elif mode == \"pT_jet\":\n feature = 3\n elif mode == \"decay_vx\":\n feature = 4\n for n,data in enumerate(datalist):\n\t\tdatatitle = data[3]\n print \"working on\",datatitle\n ran = data[4]\n\t\tCSV = data[2]\n\t\tpT = data[1]\n\t\tx_data = data[0]\n AllJetsHistlist.append(rt.TH1D(datatitle+\"_AllJets\",datatitle+\"_\"+title,nbins,ran[0],ran[1]))\n AllJetsHistlist[n].SetLineColor(4)\n CSVHistlist.append(rt.TH1D(datatitle+\"_CSV\",datatitle+\"_\"+title,nbins,ran[0],ran[1]))\n CSVHistlist[n].SetLineColor(3)\n DiscriminantHistlist.append(rt.TH1D(datatitle+\"_Discriminant\",datatitle+\"_\"+title,nbins,ran[0],ran[1]))\n DiscriminantHistlist[n].SetLineColor(2)\n\t\n\t\tif addFeature == False:\n\t\t\tpred_y = model.predict(ANN_functional_shape(x_data))\n\t\telif addFeature == \"pT\":\n\t\t\tpred_y = model.predict(ANN_functional_shape(x_data)+[pT/200])\n\t\telif addFeature == \"PV\":\n\t\t\tassert x_data.shape[1] == 21, \"wrong x_data format: PV cannot be found\"\n\t\t\tpred_y = model.predict(ANN_functional_shape(x_data)+[x_data[:,-1]/10.])\n\t\telse:\n\t\t\tprint \"invalid feature input\"\n\t\t\treturn None\n\t\tbin_numbers = ANN_bin_selection(pT,bins)\n\n\t for i,pT_value in enumerate(pT):\n\t if bin_numbers[i] == -100: continue\n\t\t\tAllJetsHistlist[n].Fill(pT_value)\n\t if pred_y[i] >= discriminant_cuts[bin_numbers[i]]: DiscriminantHistlist[n].Fill(pT_value)\n\t if CSV[i] >= CSV_cuts[bin_numbers[i]]: CSVHistlist[n].Fill(pT_value)\n\n canvaslist = []\n legendlist = []\n Tfilelist = []\n for n,data in enumerate(datalist):\n\t\tdatatitle = data[3]\n canvaslist.append(rt.TCanvas(datatitle+\"_canvas\",\"canvas\",600,600))\n canvaslist[n].SetTitle(datatitle+\"_\"+title)\n rt.gStyle.SetOptStat(0)\n legendlist.append(rt.TLegend(0.9,0.9,0.65,0.75))\n legendlist[n].AddEntry(AllJetsHistlist[n], \"All jets\")\n legendlist[n].AddEntry(CSVHistlist[n], \"CSV\")\n legendlist[n].AddEntry(DiscriminantHistlist[n], discriminant)\n AllJetsHistlist[n].GetXaxis().SetTitle(mode)\n AllJetsHistlist[n].GetYaxis().SetTitle('# jets')\n AllJetsHistlist[n].GetYaxis().SetTitleOffset(1.5)\n #AllJetsHistlist[n].Draw()\n #CSVHistlist[n].Draw(\"SAME\")\n #DiscriminantHistlist[n].Draw(\"SAME\")\n #legendlist[n].Draw()\n if Save:\n #canvaslist[n].SaveAs(title+\"_\"+datatitle+discriminant+\".png\")\n Tfilelist.append(rt.TFile(\"Thesis_Plots/root_files/\"+title+\"_\"+datatitle+discriminant+\".root\",\"recreate\"))\n print \"saved histogram as Thesis_Plots/root_files/\"+title+\"_\"+datatitle+discriminant+\".root\"\n AllJetsHistlist[n].Write()\n CSVHistlist[n].Write()\n DiscriminantHistlist[n].Write()", "def targetFromSignals(obars, nbands=3, amount=1, targetprofit=15., stoploss=45.):\n # bandsg, yband, ask, bid, day, amount, targetprofit, stoploss\n bars = obars.copy()\n for j in range(nbands): # for each band traverse it\n ibandsg = bars.columns.get_loc('bandsg'+str(j))\n # being pessimistic ... right\n ybandsell = traverseSellBand(bars.iloc[:, ibandsg].values.astype(int),\n bars.H.values, bars.L.values, bars.date.values,\n amount, targetprofit, stoploss)\n ybandbuy = traverseBuyBand(bars.iloc[:, ibandsg].values.astype(int),\n bars.H.values, bars.L.values, bars.date.values,\n amount, targetprofit, stoploss)\n bars['y'+str(j)] = mergebandsignals(ybandsell, ybandbuy)\n\n return bars", "def runCutVals(df, eVal=0., windowSize = 2):\n\n dfg = df.groupby(['cpd1'])\n\n eMin = round(eVal - windowSize/2, 2)\n eMax = round(eMin + windowSize, 2)\n dFullPeakE, dFullBkgE = 0, 0\n dCutPeakE, dCutBkgE = 0, 0\n dFullPeakN, dFullBkgN = 0, 0\n dCutPeakN, dCutBkgN = 0, 0\n\n for name, g in dfg:\n valsFull = g['trapENFCal1'].loc[(g['trapENFCal1']>eMin) & (g['trapENFCal1']<eMax)].values + g['trapENFCal2'].loc[(g['trapENFCal1']>eMin) & (g['trapENFCal1']<eMax)].values\n\n valsCut = g['trapENFCal1'].loc[(g['Pass1']==True) & (g['Pass2']==True) & (g['trapENFCal1']>eMin) & (g['trapENFCal1']<eMax)].values + g['trapENFCal2'].loc[(g['Pass1']==True) & (g['Pass2']==True) & (g['trapENFCal1']>=eMin) & (g['trapENFCal1']<=eMax)].values\n if name in enrDetList:\n dFullPeakE += len(valsFull[(valsFull > 237.28) & (valsFull < 239.46)])\n dCutPeakE += len(valsCut[(valsCut > 237.28) & (valsCut < 239.46)])\n dFullBkgE += len(valsFull[(valsFull > 235) & (valsFull < 237.18)])\n dCutBkgE += len(valsCut[(valsCut > 235) & (valsCut < 237.18)])\n elif name in natDetList:\n dFullPeakN += len(valsFull[(valsFull > 237.28) & (valsFull < 239.46)])\n dCutPeakN += len(valsCut[(valsCut > 237.28) & (valsCut < 239.46)])\n dFullBkgN += len(valsFull[(valsFull > 235) & (valsFull < 237.18)])\n dCutBkgN += len(valsCut[(valsCut > 235) & (valsCut < 237.18)])\n\n return dFullPeakE, dCutPeakE, dFullBkgE, dCutBkgE, dFullPeakN, dCutPeakN, dFullBkgN, dCutBkgN", "def bandstop_filter(data, lowcut, highcut, fs=2000, numtaps=255):\n nyq = fs / 2\n\n # design filter\n fe1 = lowcut / nyq\n fe2 = highcut / nyq\n b = firwin(numtaps, (fe1, fe2), pass_zero=True)\n\n filtered = lfilter(b, 1, data)\n\n return filtered", "def __init__(self, *args):\n _BRepAlgo.BRepAlgo_Cut_swiginit(self,_BRepAlgo.new_BRepAlgo_Cut(*args))", "def efficient_binned_tagged_jets_hist(datalist,discriminant, discriminant_cuts, CSV_cuts, bins, nbins, Difference=False, mode=\"pT_jet\",Save=False):\n title = \"binned_tagged_jets_vs_\"+mode\n AllJetsHistlist = []\n CSVHistlist = []\n DiscriminantHistlist = []\n if mode == \"pT_hadron\":\n feature = 2\n elif mode == \"pT_jet\":\n feature = 3\n elif mode == \"decay_vx\":\n feature = 4\n for n,data in enumerate(datalist):\n print \"working on\",data[1]\n ran = data[2]\n AllJetsHistlist.append(rt.TH1D(data[1]+\"_AllJets\",data[1]+\"_\"+title,nbins,ran[0],ran[1]))\n AllJetsHistlist[n].SetLineColor(4)\n CSVHistlist.append(rt.TH1D(data[1]+\"_CSV\",data[1]+\"_\"+title,nbins,ran[0],ran[1]))\n CSVHistlist[n].SetLineColor(3)\n DiscriminantHistlist.append(rt.TH1D(data[1]+\"_Discriminant\",data[1]+\"_\"+title,nbins,ran[0],ran[1]))\n DiscriminantHistlist[n].SetLineColor(2)\n for particle in data[0]:\n bin_number = FCM.bin_selection(particle,bins)\n if bin_number == -100: continue\n AllJetsHistlist[n].Fill(particle[feature])\n if particle[1] >= CSV_cuts[bin_number]: CSVHistlist[n].Fill(particle[feature])\n if Difference:\n L = particle[8]-particle[5]\n else:\n if particle[17] != 0:\n L = particle[20]/float(particle[17])\n else:\n continue\n if L >= discriminant_cuts[bin_number]: DiscriminantHistlist[n].Fill(particle[feature])\n canvaslist = []\n legendlist = []\n Tfilelist = []\n for n,data in enumerate(datalist):\n canvaslist.append(rt.TCanvas(data[1]+\"_canvas\",\"canvas\",600,600))\n canvaslist[n].SetTitle(data[1]+\"_\"+title)\n rt.gStyle.SetOptStat(0)\n legendlist.append(rt.TLegend(0.9,0.9,0.65,0.75))\n legendlist[n].AddEntry(AllJetsHistlist[n], \"All jets\")\n legendlist[n].AddEntry(CSVHistlist[n], \"CSV\")\n legendlist[n].AddEntry(DiscriminantHistlist[n], discriminant)\n AllJetsHistlist[n].GetXaxis().SetTitle(\"jet p_{T} (GeV)\")\n AllJetsHistlist[n].GetYaxis().SetTitle('# jets')\n AllJetsHistlist[n].GetYaxis().SetTitleOffset(1.5)\n #AllJetsHistlist[n].Draw()\n #CSVHistlist[n].Draw(\"SAME\")\n #DiscriminantHistlist[n].Draw(\"SAME\")\n #legendlist[n].Draw()\n if Save:\n #canvaslist[n].SaveAs(title+\"_\"+data[1]+discriminant+\".png\")\n Tfilelist.append(rt.TFile(\"Thesis_Plots/root_files/\"+title+\"_\"+data[1]+discriminant+\".root\",\"recreate\"))\n print \"saved histogram as Thesis_Plots/root_files/\"+title+\"_\"+data[1]+discriminant+\".root\"\n AllJetsHistlist[n].Write()\n CSVHistlist[n].Write()\n DiscriminantHistlist[n].Write()", "def cut(S, T, graph):\n ###TODO\n pass", "def butter_bandstop_filter(data, lowcut, highcut, fs, order):\n\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n\n i, u = sg.butter(order, (low, high), btype='bandstop')\n y = sg.filtfilt(i, u, data)\n return y", "def get_data_and_cuts(args):\n\n if args['verbose'] >= 2:\n print(\"Load data\\n\", flush=True)\n data = get_dataset(args)\n\n if args['verbose'] >= 2:\n print(\"Find cuts\", flush=True)\n cuts = get_cuts(data, args, verbose=args['verbose'])\n if args['verbose'] >= 2:\n print(f'\\tI found {len(cuts.values)} cuts\\n')\n\n print(\"Compute cost\", flush=True)\n cost_function = get_cost_function(data, args)\n cuts = compute_cost_and_order_cuts(cuts, cost_function)\n\n cuts = pick_cuts_up_to_order(cuts,\n percentile=args['experiment']['percentile_orders'])\n if args['verbose'] >= 2:\n max_considered_order = cuts.costs[-1]\n print(f\"\\tI will stop at order: {max_considered_order}\")\n print(f'\\tI will use {len(cuts.values)} cuts\\n', flush=True)\n\n if args['plot']['cuts']:\n if args['verbose'] >= 2:\n print(f\"\\tPlotting cuts\")\n\n plot_cuts(data, cuts,\n nb_cuts_to_plot=args['plot']['nb_cuts'],\n path=args['plot_dir'])\n\n return data, cuts", "def place(self, sig, bg_x, bg_y, cut_1_range, cut_2_range):\n assert bg_x.shape == bg_y.shape\n npts_1, npts_2 = bg_x.shape\n\n c1_bin_bounds = np.linspace(*cut_1_range, num=(npts_1 + 1))\n c1_bin = np.digitize([self._cut_1], c1_bin_bounds) - 1\n\n c2_bin_bounds = np.linspace(*cut_2_range, num=(npts_2 + 1))\n c2_bin = np.digitize([self._cut_2], c2_bin_bounds) - 1\n\n if any(b < 0 for b in [c1_bin, c2_bin]): \n raise ValueError(\"can't put a cut in the underflow bin\")\n \n eff = float(sig[c1_bin, c2_bin] / sig.max())\n\n def get_rej(bkg_array): \n array_val = bkg_array.max() / bkg_array[c1_bin, c2_bin]\n return float(array_val)\n rej_x, rej_y = [get_rej(ar) for ar in [bg_x, bg_y]]\n\n self._xyz = rej_x, rej_y, eff\n self._cut_ranges = (cut_1_range, cut_2_range)", "def bandpass_filter(data, lowcut, highcut, fs=2000, numtaps=255):\n nyq = fs / 2\n\n # design filter\n fe1 = lowcut / nyq\n fe2 = highcut / nyq\n b = firwin(numtaps, (fe1, fe2), pass_zero=False)\n\n filtered = lfilter(b, 1, data)\n\n return filtered", "def cut(\n self,\n bins,\n **kwargs,\n ):\n\n def squeeze_and_cut(df, *args, **kwargs):\n # We need this function to ensure we squeeze our internal\n # representation (a dataframe) to a Series.\n series = df.squeeze(axis=1)\n return pandas.cut(series, *args, **kwargs)\n\n # We use `default_to_pandas` here since the type and number of\n # results can change depending on the input arguments.\n return self.default_to_pandas(squeeze_and_cut, bins, **kwargs)", "def callback_freq_cut(val):\n global plot_mode\n global idx_freq\n last_plot_mode = plot_mode\n plot_mode = 'freq_cut'\n# print( 'scale_freq', scale_freq)\n idx_freq = freq_to_idx( val, scale_freq )\n val_freq = idx_freq * scale_freq\n# print( 'val idx_freq val_freq', val, idx_freq, val_freq )\n update_num_shadow(int(sld['neighbors'].val))\n #plot 121\n lcutfreq.set_ydata( [val_freq, val_freq])\n lcuttime.set_alpha( 0.0 )\n lcutfreq.set_alpha( alpha_hm )\n #plot 122\n if plot_mode == last_plot_mode:\n replot_flags = get_replot_flag( idx_freq )\n replot_shadow( replot_flags )\n update_shadow( ~replot_flags )\n update_light()\n else:\n replot_shadow( [True, True])\n replot_light()\n reform_axis()\n \n fig.canvas.draw_idle()", "def get_buckets(self, first, last, num_buckets, hertz_cutoff=float(5)):\n # Pensar en la posibilidad de no aplicar PCA, permitir utilizar fft sobre una feature diferente, por ejemplo raiz-cuadrada(x2 + y2 + z2)\n if self.pca == True:\n pca = PCA(n_components=1, copy=True, whiten=True)\n numpy_data = array(self.data)\n transformed_dataset = PCA.fit_transform(pca, numpy_data)\n slice=transformed_dataset[first:last]\n else:\n slice = self.data[first:last]\n slice = [column[0] for column in slice]\n \n transformed = fft.fft(slice)\n absolute = [abs(complex) for complex in transformed]\n\n frequencies = self.get_frequencies()\n\n buckets = [0 for i in range(num_buckets)]\n width = hertz_cutoff / num_buckets\n sum_of_buckets = 0.0000001\n for i in range(1, len(absolute)):\n index = int(frequencies[i] / width)\n if index >= num_buckets:\n break\n buckets[index] += absolute[i]\n sum_of_buckets += absolute[i]\n\n #if args.normalize == 't':\n # buckets = map(lambda x: x/sum_of_buckets, buckets)\n\n return buckets", "def reformat_cuts(input_cuts):\n output_cuts = []\n for cut in input_cuts:\n cut = list(cut)\n if cut[1]==None:\n cut[1]=float(\"-inf\")\n if cut[2]==None:\n cut[2]=float(\"inf\")\n cut = tuple(cut)\n output_cuts.append(cut)\n return output_cuts", "def rawSignals(obars, window=21, nbands=3, inc=0.5, save=True):\n bars = obars.copy() # avoid warnings\n bars['OHLC'] = np.nan # typical price\n bars.OHLC.values[:] = np.mean(bars.values[:,0:4], axis=1) # 1000x faster\n price = bars.OHLC.values\n for i in range(nbands):\n upband, sma, lwband = ta.BBANDS(price, window*inc)\n if save: # for plotting stuff\n bars['bandlw'+str(i)] = lwband\n bars['bandup'+str(i)] = upband\n bars['bandsg'+str(i)] = 0 # signal for this band\n signals = fastbollingerSignal(price, upband, lwband)\n bars.loc[:, 'bandsg'+str(i)] = signals.astype(int) # signal for this band\n inc += 0.5\n bars.dropna(inplace=True)\n return bars", "def compute_cost_and_order_cuts(cuts, cost_function):\n\n cost_cuts = np.zeros(len(cuts.values), dtype=float)\n for i_cut, cut in enumerate(cuts.values):\n cost_cuts[i_cut] = cost_function(cut)\n idx = np.argsort(cost_cuts)\n\n cuts.values = cuts.values[idx]\n cuts.costs = cost_cuts[idx]\n if cuts.names is not None:\n cuts.names = cuts.names[idx]\n if cuts.equations is not None:\n cuts.equations = cuts.equations[idx]\n\n return cuts", "def get_sensi_eff_curves_various_exp(bolo_name, WIMP_mass, d_cut, analysis_type, MVA_tag, bin_X, min_X, max_X, list_variables, **kwargs): \n\n try:\n kwargs[\"weight_dir\"]\n except KeyError:\n sys.exit()\n\n #Get heat _fraction\n heat_fraction = kwargs[\"classifier_name\"][13:]\n\n #Get scaling dict to set the weights\n d_scaling = BDT_fh.open_MVA_scaling_file(bolo_name, analysis_type, MVA_tag)\n # print d_scaling\n\n d_event_dir = {\"S1Pb\":\"Beta_and_Pb\", \"S2Pb\":\"Beta_and_Pb\", \"S1Beta\":\"Beta_and_Pb\", \"S2Beta\":\"Beta_and_Pb\",\n \"S1Gamma\":\"Gamma\", \"S2Gamma\":\"Gamma\", \"FidGamma\":\"Gamma\", \n \"heatonly_heat_fraction\" + heat_fraction: \"Heatonly\", \"WIMP_mass_\" + str(WIMP_mass): \"WIMP\"}\n key_heat = \"heatonly_heat_fraction\" + heat_fraction\n\n #Load data\n d_test = dp.get_data_array(bolo_name, 1, analysis_type, MVA_tag, d_event_dir.keys(), 1, list_variables, datasplit = 1)\n\n # Get classifier\n model_dir = script_utils.create_directory(\"../../Classifier_files/\" + bolo_name + \"/\" + analysis_type + \"/\"+ kwargs[\"weight_dir\"] + \"/\") \n if kwargs.has_key(\"classifier_name\"):\n modelfile = model_dir + \"xgboost_classifier_mass_\" + str(WIMP_mass) + \"_\" + kwargs[\"classifier_name\"] + \".model\"\n bst = xgb.Booster({'nthread':16}, model_file = modelfile)\n\n #Get predictions on test sample\n d_pred = {}\n d_hist = {}\n d_color = {\"S1Pb\":kOrange-8, \"S2Pb\":kOrange-9, \"S1Beta\":kGreen+2, \"S2Beta\":kGreen-3,\n \"S1Gamma\":kBlue-7, \"S2Gamma\":kBlue, \"FidGamma\":kAzure+10, key_heat: kRed, \"WIMP_mass_\" + str(WIMP_mass):kGray, \"neutron\":kMagenta}\n\n #ROOT out_dir \n root_dir = script_utils.create_directory(\"./ROOT_files/\" + bolo_name + \"/\" + analysis_type + \"/\")\n file_root = TFile(root_dir + bolo_name + \"_sensi_eff_curves_heat_fraction\" + heat_fraction + \"_mass_\" + str(WIMP_mass) + \".root\", \"recreate\")\n\n #Loop over possible exposure values\n # for exposure in [10, 50, 100, 500]:\n for exposure in [66]:\n script_utils.print_utility(\"Getting sensi + eff for exposure of \" + str(exposure) + \" mass of \" + str(WIMP_mass))\n for event_type in d_test.keys():\n d_pred[event_type] = bst.predict( xgb.DMatrix(d_test[event_type].iloc[:,:-3].values) )\n d_hist[event_type] = TH1F(\"h\" + event_type + str(exposure), \"h\" + event_type + str(exposure), bin_X, min_X, max_X)\n PyRPl.fill_TH1(d_hist[event_type], d_pred[event_type])\n PyRPl.process_TH1(d_hist[event_type], use_fill_bool = True, color = d_color[event_type] )\n if \"WIMP\" not in event_type:\n d_hist[event_type].Scale(float(d_scaling[\"prop_\" + event_type])*float(d_scaling[\"exp_per_day\"])*exposure/float(d_hist[event_type].Integral()))\n else:\n d_hist[\"WIMP_mass_\" + str(WIMP_mass)].Scale(8000./d_hist[\"WIMP_mass_\" + str(WIMP_mass)].Integral())\n\n list_hist_bckg =[d_hist[\"S1Pb\"], d_hist[\"S2Pb\"], d_hist[\"S1Beta\"], d_hist[\"S2Beta\"], d_hist[\"S1Gamma\"], d_hist[\"S2Gamma\"], d_hist[\"FidGamma\"], d_hist[key_heat]]\n\n hsum_bckg=TH1F(\"hsum_bckg\" + str(exposure),\"hsum_bckg\" + str(exposure), bin_X, min_X, max_X)\n for i in range(1,bin_X+1):\n sumcontent = sum([h.GetBinContent(i) for h in list_hist_bckg])\n hsum_bckg.SetBinContent(i, sumcontent)\n\n # print hsum_bckg.Integral(hsum_bckg.FindBin(3.5), bin_X)\n # print d_hist[\"WIMP_mass_\" + str(WIMP_mass)].Integral(d_hist[\"WIMP_mass_\" + str(WIMP_mass)].FindBin(3.5), bin_X)/d_hist[\"WIMP_mass_\" + str(WIMP_mass)].Integral()\n\n hs=THStack(\"hs\", \"hs\")\n for hist in list_hist_bckg + [d_hist[\"WIMP_mass_\" + str(WIMP_mass)]]:\n hs.Add(hist)\n\n # cc = TCanvas(\"cc\", \"cc\")\n # h1=TH1F(\"h1\",\"h1\", bin_X, min_X, max_X)\n # PyRPl.process_TH1(h1, X_title=\"BDT ouput\", min_Y = 1E-1, max_Y = 20000)\n \n # gPad.SetLogy()\n # h1.Draw()\n # hs.Draw(\"same\")\n # raw_input()\n\n class Sensitivity:\n def __call__( self, x, par ):\n\n bin_number_sig = d_hist[\"WIMP_mass_\" + str(WIMP_mass)].FindBin(x[0])\n bin_number_bckg = hsum_bckg.FindBin(x[0])\n eff_sig = float(d_hist[\"WIMP_mass_\" + str(WIMP_mass)].Integral(bin_number_sig, bin_X))\n exp_bckg = hsum_bckg.Integral(bin_number_bckg, bin_X)\n\n vec_proba = [TMath.PoissonI(i, exp_bckg) for i in range(500)] \n lim_Poisson_bckg = np.sum(np.array([PoissonCL.compute_90CL_limit(i)*vec_proba[i] for i in range(500)]))\n\n if eff_sig<=0:\n return 1E10\n else:\n return lim_Poisson_bckg/eff_sig + par[0]\n\n class Signal_eff:\n def __call__( self, x, par ):\n\n bin_number = d_hist[\"WIMP_mass_\" + str(WIMP_mass)].FindBin(x[0])\n integ = float(d_hist[\"WIMP_mass_\" + str(WIMP_mass)].Integral(bin_number, bin_X))/float(d_hist[\"WIMP_mass_\" + str(WIMP_mass)].Integral())\n return par[0] + integ\n\n h = TH1F(\"h\", \"h\",100, 0, 10)\n PyRPl.process_TH1(h, X_title = \"BDT cut\", Y_title = \"Sensitivity (a.u.)\")\n h.SetMinimum(1)\n h.SetMaximum(1E3)\n # h.Draw()\n\n fopt = TF1(\"sensitivity_expo_\" + str(exposure), Sensitivity(), 0,10, 1)\n fopt.SetParameter(0,0)\n fopt.SetNpx(100)\n # fopt.Draw(\"same\")\n\n fsig_eff = TF1(\"signal_eff_expo_\" + str(exposure), Signal_eff(), 0,10, 1)\n fsig_eff.SetParameter(0,0)\n fsig_eff.SetNpx(500)\n\n min_X = fopt.GetMinimumX(2,10)\n print \"signal eff\", fsig_eff.Eval(min_X)\n print \"bckg_exp\", hsum_bckg.Integral(hsum_bckg.FindBin(min_X), bin_X)\n\n # fopt.Write()\n # fsig_eff.Write()\n\n # gPad.SetLogy()\n # raw_input()\n # del h \n\n # file_root.Close()", "def geneffcut(energy, array, cutvals=hads, bins=BINS):\n binning = np.digitize(energy, bins) - 1\n binning[binning < 0] = 0.\n binning[binning >= len(bins)-1] = 0.\n hadeffcut = np.zeros(len(energy), dtype=bool)\n for i, cutval in enumerate(cutvals):\n binmask = binning == i\n hadeffcut[binmask] = array[binmask] < cutval\n binning = np.digitize(energy, bins) - 1\n binning[binning < 0] = -1\n binning[binning >= len(bins)-1] = -1\n hadeffcut[binning == -1] = 0\n\n return hadeffcut", "def butter_bandpass_filter(data, lowcut, highcut, fs, order=5, axis=0): \n omega = 0.5 * fs\n low = lowcut / omega\n high = highcut / omega\n b, a = signal.butter(order, [low, high], btype='band')\n y = signal.lfilter(b, a, data, axis=0)\n return y", "def cut_train(self, hits, *args):\n n_days = self.predict_window + self.train_window\n # How much free space we have to choose starting day\n free_space = self.inp.data_days - n_days - self.back_offset - self.start_offset\n if self.verbose:\n lower_train_start = self.inp.data_start + pd.Timedelta(self.start_offset, 'D')\n lower_test_end = lower_train_start + pd.Timedelta(n_days, 'D')\n lower_test_start = lower_test_end - pd.Timedelta(self.predict_window, 'D')\n upper_train_start = self.inp.data_start + pd.Timedelta(free_space - 1, 'D')\n upper_test_end = upper_train_start + pd.Timedelta(n_days, 'D')\n upper_test_start = upper_test_end - pd.Timedelta(self.predict_window, 'D')\n print(f\"Free space for training: {free_space} days.\")\n print(f\" Lower train {lower_train_start}, prediction {lower_test_start}..{lower_test_end}\")\n print(f\" Upper train {upper_train_start}, prediction {upper_test_start}..{upper_test_end}\")\n # Random starting point\n offset = tf.random_uniform((), self.start_offset,self.start_offset + free_space+1, dtype=tf.int32, seed=self.rand_seed)\n end = offset + n_days\n # Cut all the things\n return self.cut(hits, offset, end) + args", "def __init__(self, predict_lowerbound: float, first_season: int, aug_num_cuts: int, aug_min_cuts_on: int,\n cdf_cutoff: float):\n super().__init__(CutLayer(MultiplyAggregateLayer(InnerAppearanceLayer(first_season, aug_num_cuts,\n aug_min_cuts_on, cdf_cutoff)), mean, 1.0, predict_lowerbound))", "def efficient_tagged_jets_hist(datalist,discriminant, discriminant_cut, CSV_cut, bins, Difference=False, mode=\"pT_jet\",Save=False):\n title = \"tagged_jets_vs_\"+mode\n AllJetsHistlist = []\n CSVHistlist = []\n DiscriminantHistlist = []\n if mode == \"pT_hadron\":\n feature = 2\n elif mode == \"pT_jet\":\n feature = 3\n elif mode == \"decay_vx\":\n feature = 4\n for n,data in enumerate(datalist):\n print \"working on\",data[1]\n ran = data[2]\n AllJetsHistlist.append(rt.TH1D(data[1]+\"_AllJets\",data[1]+\"_\"+title,bins,ran[0],ran[1]))\n AllJetsHistlist[n].SetLineColor(4)\n CSVHistlist.append(rt.TH1D(data[1]+\"_CSV\",data[1]+\"_\"+title,bins,ran[0],ran[1]))\n CSVHistlist[n].SetLineColor(3)\n DiscriminantHistlist.append(rt.TH1D(data[1]+\"_Discriminant\",data[1]+\"_\"+title,bins,ran[0],ran[1]))\n DiscriminantHistlist[n].SetLineColor(2)\n for particle in data[0]:\n AllJetsHistlist[n].Fill(particle[feature])\n if particle[1] >= CSV_cut: CSVHistlist[n].Fill(particle[feature])\n if Difference:\n L = particle[8]-particle[5]\n else:\n if particle[13] != 0:\n L = particle[16]/float(particle[13])\n else:\n continue\n if L >= discriminant_cut: DiscriminantHistlist[n].Fill(particle[feature])\n canvaslist = []\n legendlist = []\n Tfilelist = []\n for n,data in enumerate(datalist):\n canvaslist.append(rt.TCanvas(data[1]+\"_canvas\",\"canvas\",600,600))\n canvaslist[n].SetTitle(data[1]+\"_\"+title)\n rt.gStyle.SetOptStat(0)\n legendlist.append(rt.TLegend(0.9,0.9,0.65,0.75))\n legendlist[n].AddEntry(AllJetsHistlist[n], \"All jets\")\n legendlist[n].AddEntry(CSVHistlist[n], \"CSV\")\n legendlist[n].AddEntry(DiscriminantHistlist[n], discriminant)\n AllJetsHistlist[n].GetXaxis().SetTitle(mode)\n AllJetsHistlist[n].GetYaxis().SetTitle('# jets')\n AllJetsHistlist[n].GetYaxis().SetTitleOffset(1.5)\n #AllJetsHistlist[n].Draw()\n #CSVHistlist[n].Draw(\"SAME\")\n #DiscriminantHistlist[n].Draw(\"SAME\")\n #legendlist[n].Draw()\n if Save:\n #canvaslist[n].SaveAs(\"Thesis_Plots/\"+title+\"_\"+data[1]+discriminant+\".png\")\n Tfilelist.append(rt.TFile(\"Thesis_Plots/root_files/\"+title+\"_\"+data[1]+discriminant+\".root\",\"recreate\"))\n AllJetsHistlist[n].Write()\n CSVHistlist[n].Write()\n DiscriminantHistlist[n].Write()", "def group_causality(sig_list, condition, freqs, ROI_labels=None,\n out_path=None, submount=10):\n print 'Running group causality...'\n set_directory(out_path)\n sig_caus = []\n\n for f in sig_list:\n sig_cau = np.load(f)\n print sig_cau.shape[-1]\n sig_caus.append(sig_cau)\n\n sig_caus = np.array(sig_caus)\n sig_group = sig_caus.sum(axis=0)\n plt.close()\n for i in xrange(len(sig_group)):\n fmin, fmax = freqs[i][0], freqs[i][1]\n cau_band = sig_group[i]\n # cau_band[cau_band < submount] = 0\n cau_band[cau_band < submount] = 0\n # fig, ax = pl.subplots()\n cmap = plt.get_cmap('hot', cau_band.max()+1-submount)\n cmap.set_under('gray')\n plt.matshow(cau_band, interpolation='nearest', vmin=submount, cmap=cmap)\n if ROI_labels == None:\n ROI_labels = np.arange(cau_band.shape[0]) + 1\n pl.xticks(np.arange(cau_band.shape[0]), ROI_labels, fontsize=9, rotation='vertical')\n pl.yticks(np.arange(cau_band.shape[0]), ROI_labels, fontsize=9)\n # pl.imshow(cau_band, interpolation='nearest')\n # pl.set_cmap('BlueRedAlpha')\n np.save(out_path + '/%s_%s_%sHz.npy' %\n (condition, str(fmin), str(fmax)), cau_band)\n v = np.arange(submount, cau_band.max()+1, 1)\n\n # cax = ax.scatter(x, y, c=z, s=100, cmap=cmap, vmin=10, vmax=z.max())\n # fig.colorbar(extend='min')\n\n plt.colorbar(ticks=v, extend='min')\n # pl.show()\n plt.savefig(out_path + '/%s_%s_%sHz.png' %\n (condition, str(fmin), str(fmax)), dpi=300)\n plt.close()\n return", "def Make_Binned_ANN_ROC_Curves(title,Signal_title,Background_title,bins,log=False):\n #hsv = plt.get_cmap('hsv')\n #color = hsv(np.linspace(0,1.0,len(bins)-1))\n #color = ['b', 'g', 'r', 'c', 'm', 'y']\n if len(bins)<=6:\n color = ['red','green','blue','orange','brown']\n else:\n color = ['deepskyblue','rosybrown','olivedrab','royalblue','firebrick','chartreuse','navy','red','darkorchid','lightseagreen','mediumvioletred','blue']\n nbins = 60\n\tdis_string = \"ANN_\"\n\n Signal_file = rt.TFile(\"Thesis_Plots/root_files/{}_ANN_histograms.root\".format(Signal_title),\"READ\")\n Background_file = rt.TFile(\"Thesis_Plots/root_files/{}_ANN_histograms.root\".format(Background_title),\"READ\")\n\n plt.figure(\"ROC\")\n plt.clf()\n\n for bin_ in range(len(bins)-1):\n Dis_Signal_Eff = FCM.Get_ROC_Efficiencies(Signal_file.Get(dis_string+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),nbins,0)\n Dis_BG_Eff = FCM.Get_ROC_Efficiencies(Background_file.Get(dis_string+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),nbins,0)\n CSV_Signal_Eff = FCM.Get_ROC_Efficiencies(Signal_file.Get(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),ratio_bins,0)\n CSV_BG_Eff = FCM.Get_ROC_Efficiencies(Background_file.Get(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),ratio_bins,0)\n if log:\n plt.semilogy(Dis_Signal_Eff,Dis_BG_Eff, color = color[bin_], linestyle = '-',label=str(bins[bin_])+\"_\"+str(bins[bin_+1]))\n plt.semilogy(CSV_Signal_Eff,CSV_BG_Eff, color = color[bin_],linestyle = '--',)\n\n else:\n plt.plot(Dis_Signal_Eff,1-Dis_BG_Eff, color = color[bin_], linestyle = '-',label=str(bins[bin_])+\"_\"+str(bins[bin_+1]))\n plt.plot(CSV_Signal_Eff,1-CSV_BG_Eff, color = color[bin_],linestyle = '--',)\n\n if log:\n\t\tif diff:\n\t\t\tplt.semilogy([0,0],[0,0],'k-',label = 'L4-L1')\n\t\telse:\n \tplt.semilogy([0,0],[0,0],'k-',label = 'L4/L1')\n plt.semilogy([0,0],[0,0],'k-.',label = 'CSV')\n plt.semilogy([0,1],[0.1,0.1],'k:')\n plt.xlabel(r\"$\\epsilon$_signal\")\n plt.ylabel(r\"$\\epsilon$_background\")\n plt.legend(loc=4)\n else:\n\t\tif diff:\n\t\t\tplt.plot([0,0],[0,0],'k-',label = 'L4-L1')\n\t\telse:\n \tplt.plot([0,0],[0,0],'k-',label = 'L4/L1')\n plt.plot([0,0],[0,0],'k-.',label = 'CSV')\n #plt.plot([0,1],[0.9,0.9],'k:',label=\"10% mistag\")\n plt.plot([0,1],[0.9,0.9],'k:')\n plt.xlabel(r\"$\\epsilon$_signal\")\n plt.ylabel(r\"1-$\\epsilon$_background\")\n plt.legend(loc=3)\n #plt.title(title+\"_ROC-Curves\")\n\n plt.savefig(\"Thesis_Plots/{}_ROC_Curves.png\".format(title))\n print \"saved as Thesis_Plots/{}_ROC_Curves.png\".format(title)", "def cut_sig(self):\n c = TCut(self.cut_both)\n c += TCut(self._return_if('_cut_sig'))\n return c", "def get_pop_list_for_scaling(bolo_name, d_cut, data_dir, tree_name = \"t_merged\"):\n \n file_tree = TFile(data_dir+bolo_name+\"_lowmass_fond.root\")\n tree = file_tree.Get(tree_name)\n\n #Create background hist directory\n pop_path_name = script_utils.create_directory(\"../Analyse_\" + bolo_name + \"/Populations/Pop_for_scaling/\")\n\n #Load the estimator\n d_est = BDT_fh.open_estimator_file(bolo_name)\n d_std_true_events = BDT_fh.open_true_event_FWHM_file(bolo_name)\n\n #Best estimator for heat: coefficients\n coeff_EC1, coeff_EC2 = str(d_est[\"HEAT\"][:5]), str(1- float(d_est[\"HEAT\"][:5]))\n coeff_EIA, coeff_EIB = str(d_est[\"S1\"][:5]), str(1-float(d_est[\"S1\"][:5]))\n coeff_EIC, coeff_EID = str(d_est[\"S2\"][:5]), str(1-float(d_est[\"S2\"][:5]))\n \n sigma_IA = str(d_std_true_events[\"FWIA\"])\n sigma_IC = str(d_std_true_events[\"FWIC\"])\n sigma_IB = str(d_std_true_events[\"FWIB\"])\n sigma_ID = str(d_std_true_events[\"FWID\"])\n\n #Load standard cuts\n TCut_path_name = script_utils.create_directory(\"../Cut_files/\") \n TCut_file_name =\"TCuts.txt\" \n file_TCut =\"\" \n #Add an exception if the file does not exist\n try:\n file_TCut = script_utils.open_text_file(TCut_path_name, TCut_file_name , \"r\")\n except IOError: \n script_utils.print_utility(script_utils.COL(\"No such file, use get_standard_cuts.py first\",\"fail\"))\n sys.exit()\n \n # Load the cut values. \n list_file_TCut_lines =[line.rstrip().split(\",\") for line in file_TCut.readlines()]\n standard_cuts =\"\"\n # Add a boolean flag to check if the bolo has its cuts in the file\n is_bolo_in_file =False\n for line in list_file_TCut_lines:\n if bolo_name == line[0]:\n standard_cuts = line[1]\n is_bolo_in_file = True\n assert(is_bolo_in_file)\n\n \n l_all = TEventList(\"l_all\")\n l_heatonly = TEventList(\"l_heatonly\")\n \n l_FidGamma = TEventList(\"l_FidGamma\")\n l_S1Gamma = TEventList(\"l_S1Gamma\")\n l_S2Gamma = TEventList(\"l_S2Gamma\")\n \n l_S1Beta = TEventList(\"l_S1Beta\")\n l_S2Beta = TEventList(\"l_S2Beta\")\n \n l_S1Pb = TEventList(\"l_S1Pb\")\n l_S2Pb = TEventList(\"l_S2Pb\")\n\n\n string_EC = coeff_EC1 + \"*EC1_ERA+\" + coeff_EC2 + \"*EC2_ERA\"\n string_EI = coeff_EIB + \"*EIB+\" + coeff_EID + \"*EID\"\n\n standard_cuts = standard_cuts + \"&&KTH<1&&KTH>0\"\n heat_cut = str(d_cut[\"ECinf\"]) + \"<\" + string_EC + \"&&\" + str(d_cut[\"ECsup\"]) + \">\" + string_EC + \"&& abs(EC1_ERA-EC2_ERA)<1\"\n ion_cut = str(d_cut[\"EIinf\"]) + \"<\" + string_EI + \"&&\" + str(d_cut[\"EIsup\"]) + \">\" + string_EI\n veto_cut = \"EIA<\" + str(d_cut[\"sigma_vet\"]) + \"*\" + sigma_IA + \"&&\" + \"EIC<\" + str(d_cut[\"sigma_vet\"]) + \"*\" + sigma_IC\n \n # all_cuts = \"&&\".join([standard_cuts, heat_cut, ion_cut, veto_cut])\n all_cuts = \"&&\".join([standard_cuts, heat_cut, ion_cut, veto_cut])\n\n # print tree\n # print all_cuts.split(\"&&\")\n # raw_input()\n\n ###############################\n # All\n ###############################\n tree.Draw(\">>l_all\", all_cuts )\n pop_len = l_all.GetN()\n pop_file_name = bolo_name + \"_all_KTH_cut_full_info.txt\"\n pop_file = script_utils.open_text_file(pop_path_name, pop_file_name, \"w\")\n for k in range(pop_len):\n counter = l_all.GetEntry(k)\n tree.GetEntry(counter)\n pop_file.write(str(tree.EC1_ERA) + \",\" + str(tree.EC2_ERA) + \",\" + str(tree.EIA) + \",\" + str(tree.EIB) + \",\" + str(tree.EIC) + \",\" + str(tree.EID) + \"\\n\")\n pop_file.close()\n\n ###############################\n # Heatonly\n ###############################\n tree.Draw(\">>l_heatonly\",all_cuts + \" && EIA<2.7*\" + sigma_IA +\" && EIB<2.7*\" + sigma_IB +\"&& EIC<2.7*\" + sigma_IC +\"&& EID<2.7*\" + sigma_ID)\n pop_len = l_heatonly.GetN()\n pop_file_name = bolo_name + \"_heatonly_KTH_cut_full_info.txt\"\n pop_file = script_utils.open_text_file(pop_path_name, pop_file_name, \"w\")\n for k in range(pop_len):\n counter = l_heatonly.GetEntry(k)\n tree.GetEntry(counter)\n pop_file.write(str(tree.EC1_ERA) + \",\" + str(tree.EC2_ERA) + \",\" + str(tree.EIA) + \",\" + str(tree.EIB) + \",\" + str(tree.EIC) + \",\" + str(tree.EID) + \"\\n\")\n pop_file.close()\n\n\n ##################################\n # G A M M A E V E N T S\n ##################################\n #Fiducial gammas\n tree.Draw(\">>l_FidGamma\",all_cuts + \" && EIA<2.7*\" + sigma_IA +\" && EIB>2.7*\" + sigma_IB +\"&& EIC<2.7*\" + sigma_IC +\"&& EID>2.7*\" + sigma_ID + \" &&\" + d_est[\"Q_FID\"] + \">0.7\")\n pop_len = l_FidGamma.GetN()\n pop_file_name = bolo_name + \"_FidGamma_KTH_cut_full_info.txt\"\n pop_file = script_utils.open_text_file(pop_path_name, pop_file_name, \"w\")\n for k in range(pop_len):\n counter = l_FidGamma.GetEntry(k)\n tree.GetEntry(counter)\n pop_file.write(str(tree.EC1_ERA) + \",\" + str(tree.EC2_ERA) + \",\" + str(tree.EIA) + \",\" + str(tree.EIB) + \",\" + str(tree.EIC) + \",\" + str(tree.EID) + \"\\n\")\n pop_file.close() \n #S1 gammas\n tree.Draw(\">>l_S1Gamma\",all_cuts + \" && EIA>2.7*\" + sigma_IA +\" && EIB>2.7*\" + sigma_IB +\"&& EIC<2.7*\" + sigma_IC +\"&& EID<2.7*\" + sigma_ID + \" &&\" + d_est[\"Q_S1\"] + \">0.65\")\n pop_len = l_S1Gamma.GetN()\n pop_file_name = bolo_name + \"_S1Gamma_KTH_cut_full_info.txt\"\n pop_file = script_utils.open_text_file(pop_path_name, pop_file_name, \"w\")\n for k in range(pop_len):\n counter = l_S1Gamma.GetEntry(k)\n tree.GetEntry(counter)\n pop_file.write(str(tree.EC1_ERA) + \",\" + str(tree.EC2_ERA) + \",\" + str(tree.EIA) + \",\" + str(tree.EIB) + \",\" + str(tree.EIC) + \",\" + str(tree.EID) + \"\\n\")\n pop_file.close() \n #S2 gammas\n tree.Draw(\">>l_S2Gamma\",all_cuts + \" && EIA<2.7*\" + sigma_IA +\" && EIB<2.7*\" + sigma_IB +\"&& EIC>2.7*\" + sigma_IC +\"&& EID>2.7*\" + sigma_ID + \" &&\" + d_est[\"Q_S2\"] + \">0.65\")\n pop_len = l_S2Gamma.GetN()\n pop_file_name = bolo_name + \"_S2Gamma_KTH_cut_full_info.txt\"\n pop_file = script_utils.open_text_file(pop_path_name, pop_file_name, \"w\")\n for k in range(pop_len):\n counter = l_S2Gamma.GetEntry(k)\n tree.GetEntry(counter)\n pop_file.write(str(tree.EC1_ERA) + \",\" + str(tree.EC2_ERA) + \",\" + str(tree.EIA) + \",\" + str(tree.EIB) + \",\" + str(tree.EIC) + \",\" + str(tree.EID) + \"\\n\")\n pop_file.close() \n\n ##################################\n # B E T A E V E N T S\n ##################################\n #S1 beta\n tree.Draw(\">>l_S1Beta\",all_cuts + \" && EIA>2.7*\" + sigma_IA +\" && EIB>2.7*\" + sigma_IB +\"&& EIC<2.7*\" + sigma_IC +\"&& EID<2.7*\" + sigma_ID + \" &&\" + d_est[\"Q_S1\"] + \"<0.65 && \" + d_est[\"Q_S1\"] + \">0.2\")\n pop_len = l_S1Beta.GetN()\n pop_file_name = bolo_name + \"_S1Beta_KTH_cut_full_info.txt\"\n pop_file = script_utils.open_text_file(pop_path_name, pop_file_name, \"w\")\n for k in range(pop_len):\n counter = l_S1Beta.GetEntry(k)\n tree.GetEntry(counter)\n pop_file.write(str(tree.EC1_ERA) + \",\" + str(tree.EC2_ERA) + \",\" + str(tree.EIA) + \",\" + str(tree.EIB) + \",\" + str(tree.EIC) + \",\" + str(tree.EID) + \"\\n\")\n pop_file.close() \n #S2 beta\n tree.Draw(\">>l_S2Beta\",all_cuts + \" && EIA<2.7*\" + sigma_IA +\" && EIB<2.7*\" + sigma_IB +\"&& EIC>2.7*\" + sigma_IC +\"&& EID>2.7*\" + sigma_ID + \" &&\" + d_est[\"Q_S2\"] + \"<0.65 && \" + d_est[\"Q_S2\"] + \">0.2\")\n pop_len = l_S2Beta.GetN()\n pop_file_name = bolo_name + \"_S2Beta_KTH_cut_full_info.txt\"\n pop_file = script_utils.open_text_file(pop_path_name, pop_file_name, \"w\")\n for k in range(pop_len):\n counter = l_S2Beta.GetEntry(k)\n tree.GetEntry(counter)\n pop_file.write(str(tree.EC1_ERA) + \",\" + str(tree.EC2_ERA) + \",\" + str(tree.EIA) + \",\" + str(tree.EIB) + \",\" + str(tree.EIC) + \",\" + str(tree.EID) + \"\\n\")\n pop_file.close() \n\n ##################################\n # P b E V E N T S\n ##################################\n # S1 Pb\n tree.Draw(\">>l_S1Pb\",all_cuts + \" && EIA>2.7*\" + sigma_IA +\" && EIB>2.7*\" + sigma_IB +\"&& EIC<2.7*\" + sigma_IC +\"&& EID<2.7*\" + sigma_ID + \" &&\" + d_est[\"Q_S1\"] + \"<0.15 &&\" + d_est[\"Q_S1\"] + \">0.04\")\n print \n pop_len = l_S1Pb.GetN()\n pop_file_name = bolo_name + \"_S1Pb_KTH_cut_full_info.txt\"\n pop_file = script_utils.open_text_file(pop_path_name, pop_file_name, \"w\")\n for k in range(pop_len):\n counter = l_S1Pb.GetEntry(k)\n tree.GetEntry(counter)\n pop_file.write(str(tree.EC1_ERA) + \",\" + str(tree.EC2_ERA) + \",\" + str(tree.EIA) + \",\" + str(tree.EIB) + \",\" + str(tree.EIC) + \",\" + str(tree.EID) + \"\\n\")\n pop_file.close() \n # S2 Pb\n tree.Draw(\">>l_S2Pb\",all_cuts + \" && EIA<2.7*\" + sigma_IA +\" && EIB<2.7*\" + sigma_IB +\"&& EIC>2.7*\" + sigma_IC +\"&& EID>2.7*\" + sigma_ID + \" &&\" + d_est[\"Q_S2\"] + \"<0.15 &&\" + d_est[\"Q_S2\"] + \">0.04\")\n pop_len = l_S2Pb.GetN()\n pop_file_name = bolo_name + \"_S2Pb_KTH_cut_full_info.txt\"\n pop_file = script_utils.open_text_file(pop_path_name, pop_file_name, \"w\")\n for k in range(pop_len):\n counter = l_S2Pb.GetEntry(k)\n tree.GetEntry(counter)\n pop_file.write(str(tree.EC1_ERA) + \",\" + str(tree.EC2_ERA) + \",\" + str(tree.EIA) + \",\" + str(tree.EIB) + \",\" + str(tree.EIC) + \",\" + str(tree.EID) + \"\\n\")\n pop_file.close() \n\n list_list = [l_heatonly, l_FidGamma, l_S1Gamma, l_S2Gamma, l_S1Beta, l_S2Beta, l_S1Pb, l_S2Pb]\n list_num = [l.GetN() for l in list_list]\n list_ev = [\"heatonly\", \"FidGamma\", \"S1Gamma\", \"S2Gamma\", \"S1Beta\", \"S2Beta\", \"S1Pb\", \"S2Pb\"]\n for ev, num in zip(list_ev, list_num):\n print ev, num\n\n print \"all known\", sum(list_num)\n print \"all\", l_all.GetN()\n\n del l_all\n del l_heatonly\n\n del l_FidGamma\n del l_S1Gamma\n del l_S2Gamma \n\n del l_S1Beta \n del l_S2Beta \n\n del l_S1Pb \n del l_S2Pb", "def extract_waveforms(signal, fs, spikes_idx, pre, post):\n cutouts = []\n pre_idx = int(pre * fs)\n post_idx = int(post * fs)\n for index in spikes_idx:\n if index-pre_idx >= 0 and index+post_idx <= signal.shape[0]:\n cutout = signal[(index-pre_idx):(index+post_idx)]\n cutouts.append(cutout)\n return np.stack(cutouts)", "def seed_plots(self, bcut=5, subset=None, title=None):\n z = self.seeds if subset is None else self.seeds[subset]\n fig,axx= plt.subplots(1,3, figsize=(12,4))\n plt.subplots_adjust(left=0.1)\n bc = np.abs(z.b)<bcut\n histkw=dict(histtype='step', lw=2)\n def all_plot(ax, q, dom, label):\n ax.hist(q.clip(dom[0],dom[-1]),dom, **histkw)\n ax.hist(q[bc].values.clip(dom[0],dom[-1]),dom, color='orange', label='|b|<%d'%bcut, **histkw)\n plt.setp(ax, xlabel=label, xlim=(None,dom[-1]))\n ax.grid()\n ax.legend(prop=dict(size=10))\n all_plot(axx[0], z['size'], np.linspace(0.5,10.5,11), 'cluster size')\n all_plot(axx[1], z.ts, np.linspace(0,50,26), 'TS')\n all_plot(axx[2], np.sin(np.radians(z.b)), np.linspace(-1,1,41), 'sin(b)')\n axx[2].axvline(0, color='k')\n fig.suptitle('{} {} seeds from model {}'.format( len(z), self.tsname, self.input_model,)\n if title is None else title)\n fig.set_facecolor('white')\n return fig", "def get_eff(arrays_iterator, cut_function, cut_values):\n n_cuts = len(cut_values)\n n_total = np.zeros(n_cuts)\n n_pass = np.zeros(n_cuts)\n for arrays, dataset in arrays_iterator:\n weight = dataset.get_weight()\n for i_cut, cut in enumerate(cut_values):\n this_n_pass, this_n_total = cut_function(arrays, cut)\n n_total[i_cut] += weight * this_n_total\n n_pass[i_cut] += weight * this_n_pass\n # Basically n_pass / n_total, but returns 0 if n_total has a 0 somewhere\n eff = np.divide(n_pass, n_total, out=np.zeros_like(n_pass), where=n_total!=0)\n return eff, n_pass, n_total", "def generate_signals(symbol, period=default_period, std=default_std, refresh=False, start_date=config.start_date, end_date=config.end_date):\n\n bb(symbol, period, std, refresh=False, start_date=start_date, end_date=end_date)\n df = pd.read_csv(utils.get_file_path(config.ta_data_path, table_filename, symbol=symbol), index_col=\"Date\", parse_dates=[\"Date\"])[start_date:end_date]\n\n signal_column_name = get_signal_name(period=period, std=std)\n if signal_column_name not in df.columns:\n lower_column_name = \"Lower\"\n upper_column_name = \"Upper\"\n\n conditions = [\n ((df[\"Close\"].shift(1) > df[lower_column_name].shift(1)) & (df[\"Close\"] < df[lower_column_name])), # price crosses lower band; buy signal\n ((df[\"Close\"].shift(1) < df[upper_column_name].shift(1)) & (df[\"Close\"] > df[upper_column_name])), # price crosses upper band; sell signal\n False, # ((df[\"Close\"].shift(1) < df[\"Mid\"].shift(1)) & (df[\"Close\"] > df[\"Mid\"])) # bb breaches the mid line after a buy signal, soft sell\n False # ((df[\"Close\"].shift(1) > df[\"Mid\"].shift(1)) & (df[\"Close\"] < df[\"Mid\"])) # bb breaches the mid line after a sell signal, soft buy\n ]\n\n df[signal_column_name] = np.select(conditions, ta.signals, default=ta.default_signal)\n utils.debug(df[signal_column_name])\n df.to_csv(utils.get_file_path(config.ta_data_path, table_filename, symbol=symbol))\n\n return df[signal_column_name]", "def generate_cut_labels(var, bin_edges, bottom_inclusive=True):\n incl = '=' if bottom_inclusive else ''\n return ['{low:g} <{incl} {var} < {high:g}'.format(var=var, low=bin_low,\n high=bin_high, incl=incl)\n for (bin_low, bin_high) in bin_edges]", "def plot_roccurve(signals, bkgs, cut_function, cut_values, ax):\n eff_sig, eff_bkg, n_pass_sig, n_pass_bkg, n_total_sig, n_total_bkg = roccurve(signals, bkgs, cut_function, cut_values)\n return _draw_roccurve(eff_sig, eff_bkg, cut_values, ax)", "def add_bucketing_callbacks(self, X, y):\n app = self.app\n add_common_callbacks(self)\n\n @app.callback(\n [Output(\"input_map\", \"value\")],\n [\n Input(\"input_column\", \"value\"),\n ],\n )\n def update_input_map(col):\n \"\"\"Update bucketer map.\"\"\"\n input_map = self.features_bucket_mapping_.get(col).map\n col_type = self.features_bucket_mapping_.get(col).type\n\n if col_type == \"categorical\":\n # We also allow for treating numerical as categoricals\n # if key is a string, we'll need to quote them\n if isinstance(list(input_map.keys())[0], str):\n str_repr = \",\\n\\t\".join([f\"'{k}': {v}\" for k, v in input_map.items()])\n else:\n str_repr = \",\\n\\t\".join([f\"{k}: {v}\" for k, v in input_map.items()])\n str_repr = f\"{{\\n\\t{str_repr}\\n}}\"\n else:\n str_repr = str(input_map)\n return [str_repr]\n\n @app.callback(\n [Output(\"input_map_helptext\", \"children\")],\n [\n Input(\"input_column\", \"value\"),\n ],\n )\n def update_input_map_feedback(col):\n col_type = self.features_bucket_mapping_.get(col).type\n right = self.features_bucket_mapping_.get(col).right\n if col_type == \"categorical\":\n msg = \"Edit the prebucket mapping dictionary, e.g. {'value' : 'pre-bucket'}\"\n if col_type == \"numerical\" and right:\n msg = \"Edit the prebucket mapping boundaries. \"\n msg += \"Values up to and including the boundary are put into a bucket (right=True)\"\n if col_type == \"numerical\" and not right:\n msg = \"Edit the prebucket mapping boundaries. \"\n msg += \"Values up to but not including the boundary are put into a bucket (right=False)\"\n return [msg]\n\n @app.callback(\n [\n Output(\"bucket_table\", \"data\"),\n Output(\"graph-bucket\", \"figure\"),\n Output(\"input_map\", \"invalid\"),\n Output(\"input_map_feedback\", \"children\"),\n ],\n [Input(\"input_map\", \"value\")],\n [State(\"input_column\", \"value\")],\n )\n def get_bucket_table(input_map, col):\n \"\"\"Loads the table and the figure, when the input_map changes.\"\"\"\n col_type = self.features_bucket_mapping_.get(col).type\n\n # Load the object from text input into python object\n if col_type == \"numerical\":\n try:\n input_map = json.loads(input_map)\n assert len(input_map) > 0\n except Exception:\n msg = \"Make sure the input is properly formatted as a list\"\n return no_update, no_update, True, [msg]\n # validate input\n if not is_increasing(input_map):\n return no_update, no_update, True, [\"Make sure the list values are in increasing order\"]\n else:\n try:\n # note using ast.literal_eval is not safe\n # for use when you don't trust the user input\n # in this case, it's a local user using his/her own kernel\n # note: we're using literal_eval because JSON enforces quoted keys\n input_map = ast.literal_eval(input_map)\n # re-sort on value, key\n input_map = dict(sorted(input_map.items(), key=lambda x: (x[1], x[0])))\n except Exception:\n msg = \"Make sure the input is properly formatted as a dictionary\"\n return no_update, no_update, True, [msg]\n # validate input\n if not min(input_map.values()) == 0:\n msg = \"Dictionary values (buckets) must start at 0\"\n return no_update, no_update, True, [msg]\n if not is_sequential(list(input_map.values())):\n msg = \"Dictionary values (buckets) must be sequentially increasing with steps of 1\"\n return no_update, no_update, True, [msg]\n\n # Update the fit for this specific column\n special = self.features_bucket_mapping_.get(col).specials\n right = self.features_bucket_mapping_.get(col).right\n # Note we passed X, y to add_bucketing_callbacks() so they are available here.\n # make sure to re-generate the summary table\n self._update_column_fit(\n X=X, y=y, feature=col, special=special, splits=input_map, right=right, generate_summary=True\n )\n\n # Retrieve the new bucket tables and plots\n table = self.bucket_table(col)\n # unsupervised bucketers don't have an event rate.\n if \"Event Rate\" in table.columns:\n table[\"Event Rate\"] = round(table[\"Event Rate\"] * 100, 2)\n fig = self.plot_bucket(col)\n # remove title from plot\n fig.update_layout(title=\"\")\n return table.to_dict(\"records\"), fig, False, no_update", "def plot_single_roccurve(signals, bkgs, cut_function, cut_values, ax=None):\n # Get a default ax if none is given\n if ax is None:\n import matplotlib.pyplot as plt\n fig = plt.figure(figsize=(8,8))\n ax = fig.gca()\n # Plot the base line\n ax.plot([0.0,1.0], [0.0,1.0], linestyle='--', color='xkcd:gray')\n # Plot the single roccurve\n line = plot_roccurve(signals, bkgs, cut_function, cut_values, ax=ax)\n line.set_label(bkgs[0].get_category())\n # Plot settings\n ax.set_xlim(0.0, 1.05)\n ax.set_ylim(0.0, 1.05)\n ax.set_ylabel('Signal eff.', fontsize=DEFAULT_FONTSIZE)\n ax.set_xlabel('Bkg eff.', fontsize=DEFAULT_FONTSIZE)\n ax.legend(fontsize=DEFAULT_FONTSIZE)\n return ax", "def cchalf(dataframe, function, bins):\n dist = dataframe.set_index(['H', 'K', 'L'])['D'].drop_duplicates()\n dmin = dist.min()\n dmax = dist.max()\n binedges = np.linspace(dmin**-2, dmax**-2, bins+1)**-0.5\n binedges = list(zip(binedges[:-1], binedges[1:]))\n a,b = split(dataframe)\n xval_a, xval_b = function(a), function(b)\n#TODO: Fix this awful hack\n key = [i for i in xval_a if i!='D'][0]\n xval_a, xval_b = xval_a.join(dist),xval_b.join(dist)\n idx = xval_a.index.intersection(xval_b.index)\n xval_a,xval_b = xval_a.loc[idx],xval_b.loc[idx]\n cchalf = []\n for dmin,dmax in binedges:\n idx = (xval_a['D'] > dmin) & (xval_a['D'] < dmax)\n a = np.array(xval_a[idx][key]).flatten()\n b = np.array(xval_b[idx][key]).flatten()\n cchalf.append(np.corrcoef(a,b)[0, 1])\n return cchalf, binedges", "def setCutFile(self, cutfile):\n self.cutfile = cutfile\n self.cuts = {}\n with open(cutfile) as f:\n for l in f:\n llist = l.strip().split()\n cut = str(int(llist[0])).zfill(2)\n seq = cutinfo11[cut][1]\n self.cuts[cut] = [readQIE.sequences(elem,seq) for elem in llist[1:]]", "def component_func(B, C, F, list_funcs, len_kegg, threshold=0.05):\n\n size_label = 18\n size_tick = 18\n sns.set_style(\"darkgrid\")\n\n list_funcs = list_funcs[0:len_kegg]\n B = B[0:len_kegg]\n C = C[0:len_kegg]\n\n idx_p = [idx*2+0 for idx in range(B.shape[1]/2)]\n idx_m = [idx*2+1 for idx in range(B.shape[1]/2)]\n is_p = F[:,idx_p].mean(axis=1) - F[:,idx_m].mean(axis=1) > threshold\n is_m = F[:,idx_m].mean(axis=1) - F[:,idx_p].mean(axis=1) > threshold\n is_p = [idx for idx, val in enumerate(is_p) if val]\n is_m = [idx for idx, val in enumerate(is_m) if val]\n is_n = [idx for idx in range(C.shape[1]) if (idx not in is_p) and (idx not in is_m)]\n\n fig = plt.figure(figsize=(6, 6))\n\n for idx_c in range(C.shape[1]):\n clone_num = idx_c +1\n if idx_c in is_p:\n color=\"green\"\n label = \"C\"+str(clone_num)+\"|P\"\n elif idx_c in is_m:\n color=\"red\"\n label = \"C\"+str(clone_num)+\"|M\"\n else:\n color = \"royalblue\"\n label = \"C\"+str(clone_num)\n\n if clone_num in [1]:\n marker = \"$1$\"\n elif clone_num in [2]:\n marker = \"$2$\"\n elif clone_num in [3]:\n marker = \"$3$\"\n elif clone_num in [4]:\n marker = \"$4$\"\n elif clone_num in [5]:\n marker = \"$5$\"\n elif clone_num in [6]:\n marker = \"$6$\"\n else:\n print(\"error\")\n plt.plot(C[:,idx_c],range(len_kegg), color=color, marker=marker, markersize=10, linestyle=\"\",label=label, alpha=0.5)\n\n plt.yticks(range(len_kegg), list_funcs)\n plt.xlabel(\"Pathway strength\", fontsize=size_label)\n plt.legend()\n plt.tick_params(labelsize=size_tick-4)\n plt.show()\n ##fig.savefig(\"figures/fig7compfunc.pdf\", bbox_inches=\"tight\")", "def process_data(data, model, cgenes, cutoff, max_outliers, csample = None):\n\n\t#Transforms certain columns from string to numeric\n\tcols = ['CT','Quantity']\n\tdata[cols] = data[cols].apply(pandas.to_numeric, errors='coerce')\n\n\n\t#Marks the Control Genes in a new column in the dataframe\n\tdata['Control'] = data['Target Name'].apply(lambda x: True if str(x) in cgenes else False)\n\n\n\t# Create column 'Ignore' in dataframe to mark rows with NaN values in certain columns \n\tdata['Ignore'] = False\n\tcols = ['Sample Name', 'Target Name', 'Task', 'Reporter', 'CT']\n\tfor col in cols:\n\t\tdata.loc[data[col].isnull(), 'Ignore'] = True\n\n\t\n\n\t# Calls the different processing models depending on the model argument\n\tif model == 'absolute':\n\t\tdata = cleanup_outliers(data, \"Quantity\", cutoff, max_outliers)\n\t\tresults = process_absolute(data)\n\t\n\telif model == 'relative':\n\t\tdata = cleanup_outliers(data, \"CT\", cutoff, max_outliers)\n\t\tresults = process_relative(data)\n\n\telif model == 'stability':\n\t\tdata = cleanup_outliers(data, \"CT\", cutoff, max_outliers)\n\t\tresults = process_stability(data, csample)\n\n\treturn results", "def bin_discretize(self, variables=[], bins=3,\n min_const_samples_bin_size=1.0/3):\n self.edges=np.zeros((self.arity.size,bins+1))\n for i in variables:\n un_cnt=np.unique(self.data[:,i],return_counts=True)\n constvals=un_cnt[0][un_cnt[1]>self.data.shape[0]*min_const_samples_bin_size]\n mask=np.ones(self.data.shape[0],dtype=bool)\n if constvals.size>0:\n for j,cv in enumerate(constvals):\n mask*=(self.data[:,i]!=cv)\n self.data[self.data[:,i]==cv,i]=j\n\n size=np.sum(mask)/bins\n sorted_i=np.argsort(self.data[mask,i])\n edges=[self.data[mask,i][sorted_i[int(size*num)-1]] for num in range(1,bins)]\n self.edges[i]=[self.data[mask,i][sorted_i[0]]]+edges+[self.data[mask,i][sorted_i[-1]]]\n self.data[mask,i]=np.searchsorted(edges,self.data[mask,i])+constvals.size\n self.arity[i]=len(edges)+1+constvals.size", "def bookAnalysisJobs(config, cuts, aliases=QFramework.TQTaggable()):\n\n # boolean variable to keep track of whether we are using cutbased or MVA\n mva = config.getTagVString(\"MVA\")\n cutbased = (len(mva)==0)\n\n # if no aliases directly provided,\n if aliases.getNTags() < 1:\n # see if there are any in the config\n aliases.importTagsWithoutPrefix(config,\"cutParameters.\")\n aliases.importTagsWithoutPrefix(config,\"aliases.\")\n\n # TODO: modularize booking of each type of analysis job?\n\n #book cutflows\n if config.getTagBoolDefault(\"cutbased.makeCutflow\",cutbased):\n QFramework.INFO(\"booking cutflow\")\n cutflowjob = QFramework.TQCutflowAnalysisJob(\"cutflowJob\")\n cuts.addAnalysisJob(cutflowjob,\"*\")\n cutbased = True\n\n # TODO: book xAOD skimming here? (worst case just implement in HWW)\n xAODdumpingConfig = QFramework.TQTaggable()\n dumpXAODs = (xAODdumpingConfig.importTagsWithoutPrefix(config,\"xAODdumping.\") > 0)\n if dumpXAODs :\n try:\n flaggingJob = ROOT.TQEventFlaggingAnalysisJob()\n if xAODdumpingConfig.hasTag(\"flagName\"): flaggingJob.setFlagName(xAODdumpingConfig.getTagStringDefault(\"flagName\",\"\"))\n flaggingCuts = xAODdumpingConfig.getTagStringDefault(\"cuts\",\"\")\n print(\"Booking event flagging jobs at cuts: {:s}\".format(flaggingCuts.Data()))\n cuts.addAnalysisJob(flaggingJob,flaggingCuts)\n except NameError:\n QFramework.ERROR(\"Cannot schedule xAOD dumping, required classes are not in your version of CAFCore. Please consider updating CAFCore\")\n\n # add the event flagging for possible unfolding\n unfoldingConfig = QFramework.TQTaggable()\n unfolding = (unfoldingConfig.importTagsWithoutPrefix(config,\"unfolding.\") > 0)\n if unfolding :\n #add a suffix to the cut names for the flags. This is needed to prevent cross talk between channels!\n unfoldingFlagSuffix = config.getTagStandardStringDefault(\"~flagSuffix\",\"_$(cand)\")\n unfoldingCuts = unfoldingConfig.getTagVString(\"flagcuts\")\n for cutName in unfoldingCuts:\n fullCutNames = cuts.getCutNames(cutName)\n for fullCutName in fullCutNames:\n unfoldingJob = ROOT.TQEventFlaggingAnalysisJob()\n flagName = fullCutName + unfoldingFlagSuffix\n unfoldingJob.setFlagName(flagName)\n cuts.addAnalysisJob(unfoldingJob,fullCutName)\n\n\n #book histograms (TH1, TH2, TH3, TProfiles,...)\n if ( config.hasTag(\"histograms.0\") or config.hasTag(\"histograms\") ) and config.getTagBoolDefault(\"makeHistograms\",cutbased):\n QFramework.INFO(\"booking histograms\")\n histofiles = config.getTagVString(\"histograms\")\n histofiles = common.findMultipleConfigPathsFromList(histofiles)\n if QFramework.TQHistoMakerAnalysisJob.importJobsFromTextFiles(histofiles,cuts,aliases,\"*\",config.getTagBoolDefault(\"printHistograms\",False)) > 0:\n print(QFramework.TQHistoMakerAnalysisJob.getErrorMessage())\n cutbased = True\n\n #book multi-dim histograms (THn based, i.e., for nDim>3)\n if ( config.hasTag(\"multidimHistograms\") ) and config.getTagBoolDefault(\"makeHistograms\",cutbased):\n QFramework.INFO(\"booking multidimensional histograms\")\n histofiles = config.getTagVString(\"multidimHistograms\")\n histofiles = common.findMultipleConfigPathsFromList(histofiles)\n if QFramework.TQTHnBaseMakerAnalysisJob.importJobsFromTextFiles(histofiles,cuts,aliases,\"*\",config.getTagBoolDefault(\"printHistograms\",False)) > 0:\n print(QFramework.TQTHnBaseMakerAnalysisJob.getErrorMessage())\n cutbased = True\n\n #book graphs\n if ( config.hasTag(\"graphs.0\") or config.hasTag(\"graphs\") ) and config.getTagBoolDefault(\"makeGraphs\",cutbased):\n QFramework.INFO(\"booking graphs\")\n graphfiles = config.getTagVString(\"graphs\")\n graphfiles = findMultipleConfigPathsFromList(graphfiles)\n if QFramework.TQGraphMakerAnalysisJob.importJobsFromTextFiles(graphfiles,cuts,aliases,\"*\",config.getTagBoolDefault(\"printGraphs\",False)) > 0:\n print(QFramework.TQGraphMakerAnalysisJob.getErrorMessage())\n cutbased = True\n\n #book event lists\n if ( config.hasTag(\"eventlists.0\") or config.hasTag(\"eventlists\") ) and config.getTagBoolDefault(\"makeEventLists\",cutbased):\n QFramework.INFO(\"booking eventlists\")\n evtlistfiles = config.getTagVString(\"eventlists\")\n evtlistfiles = common.findMultipleConfigPathsFromList(evtlistfiles)\n if QFramework.TQEventlistAnalysisJob.importJobsFromTextFiles(evtlistfiles,cuts,aliases,\"*\") > 0:\n print(QFramework.TQEventlistAnalysisJob.getErrorMessage())\n cutbased = True\n\n #book Ntuple dumping\n if ( config.hasTag(\"ntuples.0\") or config.hasTag(\"ntuples\") ) and config.getTagBoolDefault(\"dumpNtuples\",cutbased):\n QFramework.INFO(\"preparing to dump ntuples\")\n ntupfiles = config.getTagVString(\"ntuples\")\n ntupfiles = common.findMultipleConfigPathsFromList(ntupfiles)\n if QFramework.TQNTupleDumperAnalysisJob.importJobsFromTextFiles(ntupfiles,cuts,aliases,\"*\",config.getTagBoolDefault(\"printNTuples\",False)) > 0:\n print(QFramework.TQNTupleDumperAnalysisJob.getErrorMessage());\n cutbased = True\n\n runtime = config.getFolder(\"runtime+\")\n runtime.setTagBool(\"cutbased\", cutbased)\n\n return", "def cutting(args):\n import numpy as np\n import h5py\n\n # Read in map data\n with h5py.File(args.pointmap, 'r') as f:\n ptmap = f['map'][...]\n\n if args.threshold > 0:\n cut_map = np.where(ptmap<args.threshold, 0, ptmap)\n else:\n idx = np.unravel_index(np.argmax(ptmap), ptmap.shape) # the index of the max element\n cut_map = np.zeros_like(ptmap)\n cut_map[idx] = ptmap[idx]\n\n # Create output image file name\n if args.outfile:\n out_file = args.outfile\n elif args.threshold > 0:\n out_file = ((args.pointmap.split('/')[-1]).split('.')[0]).replace('sim', 'cut') + '_' + str(int(args.threshold)) + '.hdf5'\n else:\n out_file = ((args.pointmap.split('/')[-1]).split('.')[0]).replace('sim', 'cut') + '_max.hdf5'\n\n # Save cut data\n with h5py.File(out_file, 'w') as f:\n f.create_dataset('map', data=cut_map)\n\n print 'done!'", "def Fill(self, *args, **kwargs):\n self._varexp = kwargs.get(\"varexp\")\n self._cuts = kwargs.get(\"cuts\", [])\n self._weight = kwargs.get(\"weight\", \"1\")\n if len(args) == 1 and isinstance(args[0], (str, unicode)):\n IOManager.FillHistogram(self, args[0], **kwargs)\n if not kwargs.get(\"append\", False):\n self._errorband.Reset()\n self._errorband.Add(self)\n else:\n super(Histo1D, self).Fill(*args)", "def test_1d_cut():\n \n dic,data = ng.pipe.read(\"common_data/1d_pipe/test_cut.ft\")\n assert data.shape == (2766,)\n assert data.dtype == 'float32'\n assert round(data[0],2) == -12123.67\n assert round(data[1],2) == -8979.31\n assert round(data[100],2) == -7625.30\n write_readback(dic,data)\n check_ppm_limits(dic,data,0,[278.59, 10.03])", "def eval_BC(Teff,logg,FeH,filt=\"g\",allBCs=None):\n if allBCs is None: allBCs = read_bc_table()\n \n BCs = allBCs[filt]\n \n points = np.atleast_2d([np.ravel(Teff),np.ravel(logg),np.ravel(FeH)]).T\n points[points[:,2] < -2.5,2] = -2.5\n out = interpolate.griddata(BCs[:,0:3], BCs[:,3], points, method='linear')\n return out", "def choose_split(data,treshold):\n n_features = len(data[0]) - 1 # number of columns\n quest_gain = [] # keep track of the gains and questions\n\n for col in range(1,n_features): # for each feature\n values = set([row[col] for row in data]) # unique values in the column\n for val in values: # for each value\n question = Question(col, val)\n \n # try splitting the dataset\n true_rows, false_rows = partition(data, question)\n\n # Skip this split if it doesn't divide the dataset.\n if len(true_rows) == 0 or len(false_rows) == 0:\n continue\n\n # Calculate the information gain from this split\n gain = info_gain(data, true_rows, false_rows)\n quest_gain.append(Question_gain(gain,question))\n\n possible_question = [] # possible questions to ask\n n_quest_gain = len(quest_gain)\n\n if n_quest_gain == 0:\n return float('Inf'), float('NaN') #\n\n for x in range(n_quest_gain):\n if (quest_gain[x].gain >= treshold):\n possible_question.append(Question_gain(quest_gain[x].gain,quest_gain[x].question))\n \n n_possible_question = len(possible_question)\n if n_possible_question == 0:\n return float('Inf'), float('NaN')\n\n if n_possible_question>=2:\n [i, j] = random.sample(range(0, n_possible_question), 2)\n else:\n i = j = random.randint(0,n_possible_question-1)\n\n if possible_question[i].gain>=possible_question[j].gain:\n return possible_question[i].gain, possible_question[i].question\n else:\n return possible_question[j].gain, possible_question[j].question", "def butter_bandpass_filter(data, lowcut, highcut, fs, order=2):\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n \n # butter() and lfilter() are from scipy.signal\n \n b, a = butter(order, [low, high], btype='band')\n y = lfilter(b, a, data)\n return y", "def fid_cuts(ptname, etaname):\n cuts = []\n cuts.append(combine_cuts([ptname + ' > 4.5',\n 'TMath::Abs(' + etaname + ') < 1.2']))\n cuts.append(combine_cuts([ptname + ' > 4.0',\n var_selection('TMath::Abs('+etaname+')', 1.2, 1.4)\n ]))\n cuts.append(combine_cuts([ptname + ' > 3.5',\n var_selection('TMath::Abs('+etaname+')', 1.4, 1.6)\n ]))\n return combine_cuts(cuts, ' || ')", "def cutout(od,\n varList = None,\n YRange = None,\n XRange = None,\n add_Hbdr = False,\n mask_outside = False,\n ZRange = None,\n add_Vbdr = False,\n timeRange = None,\n timeFreq = None,\n sampMethod = 'snapshot',\n dropAxes = False):\n \n # Check\n for wrong_dim in ['mooring', 'station', 'particle']:\n if wrong_dim in od._ds.dims and (XRange is not None or YRange is not None):\n raise ValueError('`cutout` cannot subsample in the horizontal plain oceandatasets with dimension [{}]'.format(wrong_dim))\n \n # Convert variables to numpy arrays and make some check\n if not isinstance(od, _ospy.OceanDataset):\n raise TypeError('`od` must be OceanDataset')\n \n if varList is not None:\n varList = _np.asarray(varList, dtype='str')\n if varList.ndim == 0: varList = varList.reshape(1)\n elif varList.ndim >1: raise TypeError('Invalid `varList`')\n \n if not isinstance(add_Hbdr, (float, int, bool)):\n raise TypeError('`add_Hbdr` must be float, int, or bool')\n \n if not isinstance(mask_outside, bool):\n raise TypeError('`add_Hbdr` must be bool')\n \n if YRange is not None:\n YRange = _np.asarray(YRange, dtype=od._ds['YG'].dtype)\n if YRange.ndim == 0: YRange = YRange.reshape(1)\n elif YRange.ndim >1: raise TypeError('Invalid `YRange`')\n Ymax = od._ds['YG'].max().values\n Ymin = od._ds['YG'].min().values\n if any(YRange<Ymin) or any(YRange>Ymax):\n _warnings.warn(\"\\nThe Y range of the oceandataset is: {}\"\n \"\\nYRange has values outside the oceandataset range.\".format([Ymin, Ymax]), stacklevel=2)\n \n if XRange is not None:\n XRange = _np.asarray(XRange, dtype=od._ds['XG'].dtype)\n if XRange.ndim == 0: XRange = XRange.reshape(1)\n elif XRange.ndim >1: raise TypeError('Invalid `XRange`')\n Xmax = od._ds['XG'].max().values\n Xmin = od._ds['XG'].min().values\n if any(XRange<Xmin) or any(XRange>Xmax):\n _warnings.warn(\"\\nThe X range of the oceandataset is: {}\"\n \"\\nXRange has values outside the oceandataset range.\".format([Xmin, Xmax]), stacklevel=2)\n if ZRange is not None:\n ZRange = _np.asarray(ZRange, dtype=od._ds['Zp1'].dtype)\n if ZRange.ndim == 0: ZRange = ZRange.reshape(1)\n elif ZRange.ndim >1: raise TypeError('Invalid `ZRange`')\n Zmax = od._ds['Zp1'].max().values\n Zmin = od._ds['Zp1'].min().values\n if any(ZRange<Zmin) or any(ZRange>Zmax):\n _warnings.warn(\"\\nThe Z range of the oceandataset is: {}\"\n \"\\nZRange has values outside the the oceandataset range.\".format([Zmin, Zmax]), stacklevel=2)\n \n if timeRange is not None:\n timeRange = _np.asarray(timeRange, dtype=od._ds['time'].dtype)\n if timeRange.ndim == 0: timeRange = timeRange.reshape(1)\n elif timeRange.ndim >1: raise TypeError('Invalid `timeRange`')\n timemax = od._ds['time'].max().values\n timemin = od._ds['time'].min().values\n if any(timeRange<timemin) or any(timeRange>timemax):\n _warnings.warn(\"\\nThe time range of the oceandataset is: {}\"\n \"\\ntimeRange has values outside the the oceandataset range.\".format([timemin, timemax]), stacklevel=2)\n \n if not isinstance(timeFreq, (str, type(None))):\n raise TypeError('`timeFreq` must None or str')\n \n sampMethod_list = ['snapshot', 'mean']\n if sampMethod not in sampMethod_list:\n raise ValueError('[{}] is not an available `sampMethod`.'\n '\\nOptions: {}'.format(sampMethod, sampMethod_list))\n \n if not isinstance(dropAxes, bool):\n dropAxes = _np.asarray(dropAxes, dtype='str')\n if dropAxes.ndim == 0: dropAxes = dropAxes.reshape(1)\n elif dropAxes.ndim >1: raise TypeError('Invalid `dropAxes`')\n axis_error = [axis for axis in dropAxes if axis not in od.grid_coords]\n if len(axis_error)!=0:\n raise ValueError('{} are not in od.grid_coords and can not be dropped'.format(axis_error))\n dropAxes = {d: od.grid_coords[d] for d in dropAxes}\n elif dropAxes is True:\n dropAxes = od.grid_coords\n if YRange is None : dropAxes.pop('Y', None)\n if XRange is None : dropAxes.pop('X', None)\n if ZRange is None : dropAxes.pop('Z', None)\n if timeRange is None: dropAxes.pop('time', None)\n else:\n dropAxes = {}\n \n # Message\n print('Cutting out the oceandataset.')\n \n # Copy\n od = _copy.copy(od)\n \n # Unpack\n ds = od._ds\n periodic = od.grid_periodic\n \n # ---------------------------\n # Horizontal CUTOUT\n # ---------------------------\n \n if add_Hbdr is True:\n add_Hbdr = (_np.mean([_np.fabs(od._ds['XG'].max() - od._ds['XG'].min()),\n _np.fabs(od._ds['YG'].max() - od._ds['YG'].min())]) / \n _np.mean([len(od._ds['X']), len(od._ds['Y'])]))\n elif add_Hbdr is False:\n add_Hbdr = 0\n \n if add_Vbdr is True:\n add_Vbdr = _np.fabs(od._ds['Zp1'].diff('Zp1')).max().values\n elif add_Vbdr is False:\n add_Vbdr = 0\n \n # Initialize horizontal mask\n if XRange is not None or YRange is not None:\n maskH = _xr.ones_like(ds['XG'])\n\n if YRange is not None: \n # Use arrays\n YRange = _np.asarray([_np.min(YRange)-add_Hbdr, _np.max(YRange)+add_Hbdr]).astype(ds['YG'].dtype)\n\n # Get the closest \n for i, Y in enumerate(YRange):\n diff = _np.fabs(ds['YG']-Y)\n YRange[i] = ds['YG'].where(diff==diff.min()).min().values \n maskH = maskH.where(_np.logical_and(ds['YG']>=YRange[0], ds['YG']<=YRange[-1]), 0)\n maskHY = maskH\n\n if XRange is not None:\n # Use arrays\n XRange = _np.asarray([_np.min(XRange)-add_Hbdr, _np.max(XRange)+add_Hbdr]).astype(ds['XG'].dtype)\n\n # Get the closest \n for i, X in enumerate(XRange):\n diff = _np.fabs(ds['XG']-X)\n XRange[i] = ds['XG'].where(diff==diff.min()).min().values \n maskH = maskH.where(_np.logical_and(ds['XG']>=XRange[0], ds['XG']<=XRange[-1]), 0)\n\n # Can't be all zeros\n if maskH.sum()==0: raise ValueError('Zero grid points in the horizontal range')\n\n # Find horizontal indexes\n maskH['Yp1'].values = _np.arange(len(maskH['Yp1']))\n maskH['Xp1'].values = _np.arange(len(maskH['Xp1']))\n dmaskH = maskH.where(maskH, drop=True)\n dYp1 = dmaskH['Yp1'].values\n dXp1 = dmaskH['Xp1'].values\n iY = [_np.min(dYp1), _np.max(dYp1)]\n iX = [_np.min(dXp1), _np.max(dXp1)]\n maskH['Yp1'] = ds['Yp1']\n maskH['Xp1'] = ds['Xp1']\n \n # Original length\n lenY = len(ds['Yp1'])\n lenX = len(ds['Xp1']) \n \n # Indexis\n if iY[0]==iY[1]:\n if 'Y' not in dropAxes:\n if iY[0]>0: iY[0]=iY[0]-1\n else: iY[1]=iY[1]+1\n else: dropAxes.pop('Y', None)\n \n\n if iX[0]==iX[1]:\n if 'X' not in dropAxes:\n if iX[0]>0: iX[0]=iX[0]-1\n else: iX[1]=iX[1]+1\n else: dropAxes.pop('X', None)\n \n # Cutout\n ds = ds.isel(Yp1 = slice(iY[0], iY[1]+1),\n Xp1 = slice(iX[0], iX[1]+1))\n \n if 'X' in dropAxes:\n if iX[0]==len(ds['X']):\n iX[0]=iX[0]-1\n iX[1]=iX[1]-1\n ds = ds.isel(X = slice(iX[0], iX[1]+1))\n elif (('outer' in od._grid.axes['X'].coords and od._grid.axes['X'].coords['outer'].name == 'Xp1') or \n ('left' in od._grid.axes['X'].coords and od._grid.axes['X'].coords['left'].name == 'Xp1')):\n ds = ds.isel(X = slice(iX[0], iX[1]))\n elif 'right' in od._grid.axes['X'].coords and od._grid.axes['X'].coords['right'].name =='Xp1':\n ds = ds.isel(X = slice(iX[0]+1, iX[1]+1)) \n \n if 'Y' in dropAxes:\n if iY[0]==len(ds['Y']):\n iY[0]=iY[0]-1\n iY[1]=iY[1]-1\n ds = ds.isel(Y = slice(iY[0], iY[1]+1))\n elif (('outer' in od._grid.axes['Y'].coords and od._grid.axes['Y'].coords['outer'].name == 'Yp1') or \n ('left' in od._grid.axes['Y'].coords and od._grid.axes['Y'].coords['left'].name == 'Yp1')):\n ds = ds.isel(Y = slice(iY[0], iY[1]))\n elif 'right' in od._grid.axes['Y'].coords and od._grid.axes['Y'].coords['right'].name =='Yp1':\n ds = ds.isel(Y = slice(iY[0]+1, iY[1]+1))\n \n # Cut axis can't be periodic\n if (len(ds['Yp1']) < lenY or 'Y' in dropAxes) and 'Y' in periodic: periodic.remove('Y')\n if (len(ds['Xp1']) < lenX or 'X' in dropAxes) and 'X' in periodic: periodic.remove('X')\n \n # ---------------------------\n # Vertical CUTOUT\n # ---------------------------\n \n # Initialize vertical mask\n maskV = _xr.ones_like(ds['Zp1'])\n \n if ZRange is not None:\n # Use arrays\n ZRange = _np.asarray([_np.min(ZRange)-add_Vbdr, _np.max(ZRange)+add_Vbdr]).astype(ds['Zp1'].dtype)\n \n # Get the closest \n for i, Z in enumerate(ZRange):\n diff = _np.fabs(ds['Zp1']-Z)\n ZRange[i] = ds['Zp1'].where(diff==diff.min()).min().values \n maskV = maskV.where(_np.logical_and(ds['Zp1']>=ZRange[0], ds['Zp1']<=ZRange[-1]), 0) \n \n # Find vertical indexes\n maskV['Zp1'].values = _np.arange(len(maskV['Zp1']))\n dmaskV = maskV.where(maskV, drop=True)\n dZp1 = dmaskV['Zp1'].values\n iZ = [_np.min(dZp1), _np.max(dZp1)]\n maskV['Zp1'] = ds['Zp1']\n \n # Original length\n lenZ = len(ds['Zp1']) \n \n # Indexis\n if iZ[0]==iZ[1]:\n if 'Z' not in dropAxes:\n if iZ[0]>0: iZ[0]=iZ[0]-1\n else: iZ[1]=iZ[1]+1\n else: dropAxes.pop('Z', None)\n \n # Cutout\n ds = ds.isel(Zp1 = slice(iZ[0], iZ[1]+1))\n if 'Z' in dropAxes:\n if iZ[0]==len(ds['Z']):\n iZ[0]=iZ[0]-1\n iZ[1]=iZ[1]-1\n ds = ds.isel(Z = slice(iZ[0], iZ[1]+1))\n if 'Zu' in ds.dims and len(ds['Zu'])>1:\n ds = ds.sel(Zu=ds['Zp1'].values, method='nearest')\n if 'Zl' in ds.dims and len(ds['Zl'])>1:\n ds = ds.sel(Zl=ds['Zp1'].values, method='nearest')\n \n else:\n ds = ds.isel(Z = slice(iZ[0], iZ[1]))\n \n if 'Zu' in ds.dims and len(ds['Zu'])>1:\n ds = ds.sel(Zu = slice(ds['Zp1'].isel(Zp1=0).values, ds['Zp1'].isel(Zp1=-1).values))\n\n if 'Zl' in ds.dims and len(ds['Zl'])>1:\n ds = ds.sel(Zl = slice(ds['Zp1'].isel(Zp1=0).values, ds['Z'].isel(Z=-1).values))\n \n # Cut axis can't be periodic\n if (len(ds['Z']) < lenZ or 'Z' in dropAxes) and 'Z' in periodic: periodic.remove('Z')\n \n # ---------------------------\n # Time CUTOUT\n # ---------------------------\n \n # Initialize vertical mask\n maskT = _xr.ones_like(ds['time']).astype('int')\n \n if timeRange is not None:\n \n # Use arrays\n timeRange = _np.asarray([_np.min(timeRange), _np.max(timeRange)]).astype(ds['time'].dtype)\n \n # Get the closest \n for i, time in enumerate(timeRange):\n if _np.issubdtype(ds['time'].dtype, _np.datetime64):\n diff = _np.fabs(ds['time'].astype('float64') - time.astype('float64'))\n else:\n diff = _np.fabs(ds['time']-time)\n timeRange[i] = ds['time'].where(diff==diff.min()).min().values \n # return maskT, ds['time'], timeRange[0], timeRange[-1]\n maskT = maskT.where(_np.logical_and(ds['time']>=timeRange[0], ds['time']<=timeRange[-1]), 0) \n \n # Find vertical indexes\n maskT['time'].values = _np.arange(len(maskT['time']))\n dmaskT = maskT.where(maskT, drop=True)\n dtime = dmaskT['time'].values\n iT = [min(dtime), max(dtime)]\n maskT['time'] = ds['time']\n \n # Original length\n lenT = len(ds['time'])\n \n # Indexis\n if iT[0]==iT[1]:\n if 'time' not in dropAxes:\n if iT[0]>0: iT[0]=iT[0]-1\n else: iT[1]=iT[1]+1\n else: dropAxes.pop('time', None)\n \n # Cutout\n ds = ds.isel(time = slice(iT[0], iT[1]+1))\n if 'time' in dropAxes:\n if iT[0]==len(ds['time_midp']):\n iT[0]=iT[0]-1\n iT[1]=iT[1]-1\n ds = ds.isel(time_midp = slice(iT[0], iT[1]+1))\n else:\n ds = ds.isel(time_midp = slice(iT[0], iT[1]))\n \n # Cut axis can't be periodic\n if (len(ds['time']) < lenT or 'T' in dropAxes) and 'time' in periodic: periodic.remove('time')\n \n # ---------------------------\n # Horizontal MASK\n # ---------------------------\n \n if mask_outside and (YRange is not None or XRange is not None):\n if YRange is not None: minY = YRange[0]; maxY = YRange[1]\n else: minY = ds['YG'].min().values; maxY = ds['YG'].max().values\n if XRange is not None: minX = XRange[0]; maxX = XRange[1]\n else: minX = ds['XG'].min().values; maxX = ds['XG'].max().values \n \n maskC = _xr.where(_np.logical_and(_np.logical_and(ds['YC']>=minY, ds['YC']<=maxY),\n _np.logical_and(ds['XC']>=minX, ds['XC']<=maxX)), 1,0).persist()\n maskG = _xr.where(_np.logical_and(_np.logical_and(ds['YG']>=minY, ds['YG']<=maxY),\n _np.logical_and(ds['XG']>=minX, ds['XG']<=maxX)), 1,0).persist()\n maskU = _xr.where(_np.logical_and(_np.logical_and(ds['YU']>=minY, ds['YU']<=maxY),\n _np.logical_and(ds['XU']>=minX, ds['XU']<=maxX)), 1,0).persist()\n maskV = _xr.where(_np.logical_and(_np.logical_and(ds['YV']>=minY, ds['YV']<=maxY),\n _np.logical_and(ds['XV']>=minX, ds['XV']<=maxX)), 1,0).persist()\n for var in ds.data_vars:\n if set(['X', 'Y']).issubset(ds[var].dims): ds[var] = ds[var].where(maskC)\n elif set(['Xp1', 'Yp1']).issubset(ds[var].dims): ds[var] = ds[var].where(maskG)\n elif set(['Xp1', 'Y']).issubset(ds[var].dims): ds[var] = ds[var].where(maskU)\n elif set(['X', 'Yp1']).issubset(ds[var].dims): ds[var] = ds[var].where(maskV)\n \n # ---------------------------\n # TIME RESAMPLING\n # ---------------------------\n # Resample in time\n if timeFreq:\n \n # Infer original frequency\n inFreq=_pd.infer_freq(ds.time.values); \n if timeFreq[0].isdigit() and not inFreq[0].isdigit(): inFreq='1'+inFreq\n \n # Same frequency: Skip\n if timeFreq==inFreq:\n _warnings.warn(\"\\nInput time freq: [{}] = Output time frequency: [{}]:\"\n \"\\nSkip time resampling.\".format(inFreq, timeFreq), stacklevel=2)\n \n else:\n \n # Remove time_midp and warn\n vars2drop = [var for var in ds.variables if 'time_midp' in ds[var].dims]\n if vars2drop:\n _warnings.warn(\"\\nTime resampling drops variables on `time_midp` dimension.\"\n \"\\nDropped variables: {}.\".format(vars2drop), stacklevel=2)\n ds = ds.drop(vars2drop)\n if 'time_midp' in ds.dims: ds = ds.drop('time_midp')\n \n # Snapshot\n if sampMethod=='snapshot': \n # Find new times\n newtime = ds['time'].sel(time=ds['time'].resample(time=timeFreq).first())\n\n # Use slice when possible\n inds = [i for i, t in enumerate(ds['time'].values) if t in newtime.values]\n inds_diff = _np.diff(inds)\n if all(inds_diff==inds_diff[0]): \n ds = ds.isel(time = slice(inds[0], inds[-1]+1, inds_diff[0]))\n else: \n # TODO: is this an xarray bug od just bad chunking/bad coding/bad SciServe compute performances?\n # Make test case and open issue!\n attrs = ds.attrs\n ds = _xr.concat([ds.sel(time = time) for i, time in enumerate(newtime)], dim='time')\n ds.attrs = attrs\n # Mean\n elif sampMethod=='mean':\n\n # Separate time and timeless\n attrs = ds.attrs\n ds_dims = ds.drop([var for var in ds.variables if not var in ds.dims])\n ds_time = ds.drop([var for var in ds.variables if not 'time' in ds[var].dims])\n ds_timeless = ds.drop([var for var in ds.variables if 'time' in ds[var].dims])\n\n # Resample\n ds_time = ds_time.resample(time=timeFreq).mean('time')\n\n # Add all dimensions to ds, and fix attributes\n for dim in ds_time.dims:\n if dim=='time': ds_time[dim].attrs = ds_dims[dim].attrs\n else: ds_time[dim] = ds_dims[dim]\n\n # Merge\n ds = _xr.merge([ds_time, ds_timeless])\n ds.attrs = attrs\n \n # Update oceandataset\n od._ds = ds\n \n # Add time midp\n if timeFreq and 'time' not in dropAxes:\n od = od.set_grid_coords({**od.grid_coords, 'time' : {'time': -0.5}}, add_midp=True, overwrite=True)\n\n # Drop axes\n grid_coords = od.grid_coords\n for coord in list(grid_coords): \n if coord in dropAxes: grid_coords.pop(coord, None)\n od = od.set_grid_coords(grid_coords, overwrite=True)\n \n # Cut axis can't be periodic \n od = od.set_grid_periodic(periodic, overwrite = True)\n \n # Drop variables\n if varList is not None: \n if isinstance(varList, str): varList = [varList]\n \n # Compute missing variables\n od = _compute._add_missing_variables(od, varList)\n \n # Drop useless\n od._ds = od._ds.drop([v for v in od._ds.variables if (v not in od._ds.dims and v not in od._ds.coords and v not in varList)])\n \n return od", "def cut_tof_event(data_dict, plot_dict, event) :\n event_spacepoints = event.GetTOFEventSpacePoint()\n\n tof0_sp_size = event_spacepoints.GetTOF0SpacePointArraySize()\n tof1_sp_size = event_spacepoints.GetTOF1SpacePointArraySize()\n tof2_sp_size = event_spacepoints.GetTOF2SpacePointArraySize()\n\n if tof0_sp_size < 1 or tof1_sp_size < 1 or tof2_sp_size < 1 :\n return True\n\n tof0_sp = event_spacepoints.GetTOF0SpacePointArrayElement(0)\n tof1_sp = event_spacepoints.GetTOF1SpacePointArrayElement(0)\n tof2_sp = event_spacepoints.GetTOF2SpacePointArrayElement(0)\n\n if tof1_sp_size != 1 or tof2_sp_size != 1 :\n return True\n\n diff_0_1 = tof1_sp.GetTime() - tof0_sp.GetTime()\n diff_1_2 = tof2_sp.GetTime() - tof1_sp.GetTime()\n\n plot_dict['tof_0_1'].Fill( diff_0_1 )\n plot_dict['tof_1_2'].Fill( diff_1_2 )\n\n if diff_1_2 < TOF_CUT_LOW or diff_1_2 > TOF_CUT_HIGH :\n return True\n\n plot_dict['tof_0_1_cut'].Fill( tof1_sp.GetTime() - tof0_sp.GetTime() )\n plot_dict['tof_1_2_cut'].Fill( tof2_sp.GetTime() - tof1_sp.GetTime() )\n\n return False", "def pick_signals(processor, source = 'input'):\n\n if source == 'input':\n bin_edges = processor.input_parameters['bin_edges']\n raw_signal = processor.input_signal\n elif source == 'output':\n bin_edges = processor.output_parameters['bin_edges']\n raw_signal = processor.output_signal\n else:\n raise ValueError('Unknown value for the data source')\n t = np.zeros(len(raw_signal)*4)\n bins = np.zeros(len(raw_signal)*4)\n signal = np.zeros(len(raw_signal)*4)\n value = 1.\n\n for i, edges in enumerate(bin_edges):\n t[4*i] = edges[0]\n t[4*i+1] = edges[0]\n t[4*i+2] = edges[1]\n t[4*i+3] = edges[1]\n bins[4*i] = 0.\n bins[4*i+1] = value\n bins[4*i+2] = value\n bins[4*i+3] = 0.\n signal[4*i] = 0.\n signal[4*i+1] = raw_signal[i]\n signal[4*i+2] = raw_signal[i]\n signal[4*i+3] = 0.\n value *= -1\n\n z = t * c\n return (t, z, bins, signal)", "def butter_filter(datalist):\n fs = 200.00\n fHigh = 50.00\n fLow = 5.00\n N=4\n [b,a]=sg.butter(N,[fLow/fs, fHigh/fs], btype='band')\n global filtered\n #IIR filter\n return sg.filtfilt(b,a,datalist)", "def Flux_init(self, flns, oversample=None, sigma=None, tophat=None, thin=None, wave_cut=None, temp_cut=None, logg_cut=None, convert=None, linlog=False, verbose=False):\n ## Reading the parameter information about the spectra\n lst = []\n for i in np.arange(len(flns)):\n print(flns[i])\n ## Get the logg and temp value from the filename\n hdr = pyfits.getheader(flns[i], ext=0)\n temp = hdr['PHXTEFF']\n logg = hdr['PHXLOGG']\n if temp_cut is None or (temp >= temp_cut[0] and temp <= temp_cut[1]):\n print(' temp_cut')\n if logg_cut is None or (logg >= logg_cut[0] and logg <= logg_cut[1]):\n print(' logg_cut')\n lst.append( [i, logg, temp] )\n\n ## Reading the mu values\n self.mu = np.array(pyfits.getdata(flns[0], ext=1), dtype=float)\n n_mu = self.mu.size\n\n ## Sorting the grid by temperature and then logg\n print(lst)\n Utils.Misc.Sort_list(lst, [2,1])\n lst = np.array(lst)\n print(lst)\n\n ## Extracting the temperature values\n self.logtemp = np.log(np.unique(lst[:,2]))\n self.logtemp.sort()\n n_teff = self.logtemp.size\n\n ## Extracting the logg values\n self.logg = np.unique(lst[:,1])\n self.logg.sort()\n n_logg = self.logg.size\n\n ## If there is a mismatch and the grid is not rectangular, then the function aborts\n if n_teff*n_logg != lst.shape[0]:\n print( \"Number of temperature points: {}\".format(n_teff) )\n print( \"Number of logg points: {}\".format(n_logg) )\n print( \"Number of grid points: {}\".format(lst.shape[0]) )\n for teff in self.logtemp:\n for logg in self.logg:\n missing = True\n for l in lst:\n if np.log(l[2]) == teff and l[1] == logg:\n missing = False\n if missing:\n print(\"Missing -> logg: {:3.1f}, temp: {:5.0f}\".format(logg,np.exp(teff)))\n raise Exception( \"There is a mismatch in the number of log(g) and teff grid points!\" )\n return\n\n ## Extracting the data\n grid = []\n wav = []\n if verbose: print( \"Starting to read atmosphere grid files\" )\n for i,l in enumerate(lst[:,0]):\n if verbose: sys.stdout.write( \"Reading {} ({}/{})\\r\".format(flns[int(l)], i+1, lst.shape[0]) ); sys.stdout.flush()\n tmp = Read_AGSS(flns[int(l)], oversample=oversample, sigma=sigma, tophat=tophat, thin=thin, wave_cut=wave_cut, convert=convert, linlog=linlog)\n grid.append(tmp[0])\n wav.append(tmp[1])\n self.z0 = tmp[2]\n logger.log(8, \"Number of wavelength points: {}, range: [{}, {}]\".format(tmp[1].size, tmp[1][0], tmp[1][-1]) )\n if verbose: print( \"\\nFinished reading atmosphere grid files\" )\n try:\n wav = np.array(wav)\n if wav.std(0).max() > 1.e-6:\n raise Exception( \"The wavelength grid is not uniform!\" )\n return\n else:\n wav = wav[0]\n except:\n raise Exception( \"The wavelength grid has an inconsistent number of elements!\" )\n return\n if verbose: print( \"Transforming grid data to array\" )\n grid = np.asarray(grid)\n if verbose: print( \"Addressing the grid data shape\" )\n grid.shape = n_teff, n_logg, n_mu, wav.size\n self.wav = wav\n if verbose: print( \"Making the grid a class attribute\" )\n self.grid = grid\n\n ## Calculating the grid log-to-linear weights\n if linlog:\n self.wav_linear = Utils.Series.Resample_loglin(self.wav)\n self.wav_delta = self.wav_linear[1] - self.wav_linear[0]\n self.wav_frac, self.wav_inds = Utils.Series.Getaxispos_vector(self.wav, self.wav_linear)\n return", "def trigger_cut(\n df,\n nearest_data_trigger_allowed=5,\n furthest_simu_trigger_allowed=5\n ):\n df['trigger_cut'] = (\n ( abs(df['t_nearest_data_trigger']) > nearest_data_trigger_allowed )\n & ( abs(df['t_input_simu_trigger']) < furthest_simu_trigger_allowed )\n )", "def Cut(self, *args):\n return _BRepAlgo.BRepAlgo_BooleanOperations_Cut(self, *args)", "def cut_standard(X, VARS, xcorr_flow=False):\n \n # Fiducial cuts\n #MINPT = 0.5\n #MAXETA = 2.4\n MINPT = 0.7\n MAXETA = 1.5\n \n \n # Construct cuts\n cuts = []\n names = []\n\n #\n cuts.append( X[:,VARS.index('has_gsf')] == True )\n names.append(f'has_gsf == True')\n #\n cuts.append( X[:,VARS.index('gsf_pt')] > MINPT )\n names.append(f'gsf_pt > {MINPT:0.2f}')\n #\n cuts.append( np.abs(X[:,VARS.index('trk_eta')]) < MAXETA )\n names.append(f'|gsf_eta| < {MAXETA:0.2f}')\n #\n #cuts.append( [(len(X[i,VARS.index('image_clu_eta')]) is not 0) for i in range(X.shape[0])] )\n #names.append(f'len(image_clu_eta) != 0')\n \n \n ind = aux.apply_cutflow(cut=cuts, names=names, xcorr_flow=xcorr_flow)\n return ind", "def butter_bandpass_filter(data, lowcut, highcut, fs, order=1):\n b, a = butter_bandpass(lowcut, highcut, fs, order=order)\n y = filtfilt(b, a, data)\n return y", "def data_clean_analysis(dates, thresholds, thresholds_pairs):\n mpf = []\n tpt = []\n date = []\n thresh = []\n thresh_pairs = []\n\n for k, v in dates.items():\n for t in thresholds:\n for tp in thresholds_pairs:\n print(k)\n print(t)\n print(tp)\n print('-----')\n\n commits = pd.read_csv('../pub_data/test_commits_pub.csv', encoding='latin-1', sep='\\t')\n test_details = pd.read_csv('../pub_data/test_details_pub.csv', sep='\\t')\n test_status = pd.read_csv('../pub_data/test_histo_pub.csv', sep='\\t')\n mod_files = pd.read_csv(\"../pub_data/test_commits_mod_files_pub.csv\", sep='\\t')\n\n D = DataCI(commits, test_details, test_status, mod_files, start_date=v, threshold=t, threshold_pairs=tp)\n modification, transition = D.get_data_info()\n\n mpf.append(modification)\n tpt.append(transition)\n date.append(k)\n thresh.append(t)\n thresh_pairs.append(tp)\n\n print(len(date))\n print(len(thresh))\n print(len(thresh_pairs))\n print(len(mpf))\n print(len(tpt))\n\n df = pd.DataFrame(list(zip(date, thresh, thresh_pairs, mpf, tpt)),\n columns=['date', 'threshold', 'threshold_pairs', 'mpf', 'tpt']\n )\n\n df.to_pickle('start_date_analysis1.pkl')", "def featuresHist(self, **kwargs):\n\n # Selecting bins automatically:\n bins_onpower = np.arange(self.onpower_train.min().values[0],\n self.onpower_train.max().values[0],\n (self.onpower_train.max().values[0] -\n self.onpower_train.min().values[0]) / 50)\n\n bins_offpower = np.arange(self.offpower_train.min().values[0],\n self.offpower_train.max().values[0],\n (self.offpower_train.max().values[0] -\n self.offpower_train.min().values[0]) / 50)\n\n bins_duration = np.arange(self.duration_train.min().values[0],\n self.duration_train.max().values[0],\n (self.duration_train.max().values[0] -\n self.duration_train.min().values[0]) / 50)\n\n # If a bin has been specified update the bin sizes.\n for key in kwargs:\n if key == 'bins_onpower':\n bins_onpower = kwargs[key]\n elif key == 'bins_offpower':\n bins_offpower = kwargs[key]\n elif key == 'bins_duration':\n bins_duration = kwargs[key]\n else:\n print(\"Non valid kwarg\")\n\n # Plot structure:\n fig = plt.figure()\n ax1 = fig.add_subplot(311)\n ax2 = fig.add_subplot(312)\n ax3 = fig.add_subplot(313)\n\n # Evaluating score for:\n # Onpower\n x = np.arange(bins_onpower.min(), bins_onpower.max() + \\\n np.diff(bins_onpower)[0], np.diff(bins_onpower)[0] / float(1000)).reshape(-1, 1)\n y = self.__pdf(self.onpower, x)\n norm = pd.cut(\n self.onpower_train.onpower, bins=bins_onpower).value_counts().max() / max(y)\n # Plots for Onpower\n ax1.hist(\n self.onpower_train.onpower.values, bins=bins_onpower, alpha=0.5)\n ax1.plot(x, y * norm)\n #ax1.set_title(\"Feature: Onpower\")\n #ax1.set_ylabel(\"Counts\")\n #ax1.set_xlabel(\"On power (W)\")\n ax1.set_ylabel(\"On power counts\")\n\n # Offpower\n x = np.arange(bins_offpower.min(), bins_offpower.max() + \\\n np.diff(bins_offpower)[0], np.diff(bins_offpower)[0] / float(1000)).reshape(-1, 1)\n y = self.__pdf(self.offpower, x)\n norm = pd.cut(self.offpower_train.offpower,\n bins=bins_offpower).value_counts().max() / max(y)\n # Plots for Offpower\n ax2.hist(self.offpower_train.offpower.values,\n bins=bins_offpower, alpha=0.5)\n ax2.plot(x, y * norm)\n #ax2.set_title(\"Feature: Offpower\")\n #ax2.set_ylabel(\"Counts\")\n #ax2.set_xlabel(\"Off power (W)\")\n ax2.set_ylabel(\"Off power counts\")\n\n # Duration\n x = np.arange(bins_duration.min(), bins_duration.max() + \\\n np.diff(bins_duration)[0], np.diff(bins_duration)[0] / float(1000)).reshape(-1, 1)\n y = self.__pdf(self.duration, x)\n norm = pd.cut(self.duration_train.duration,\n bins=bins_duration).value_counts().max() / max(y)\n # Plots for duration\n ax3.hist(self.duration_train.duration.values,\n bins=bins_duration, alpha=0.5)\n ax3.plot(x, y * norm)\n #ax3.set_title(\"Feature: Duration\")\n #ax3.set_ylabel(\"Counts\")\n #ax3.set_xlabel(\"Duration (seconds)\")\n ax3.set_ylabel(\"Duration counts\")", "def filt(self,cutoff_dt, btype='low',order=3,axis=-1):\r\n \r\n if self.isequal==False and self.VERBOSE:\r\n print 'Warning - time series is unequally spaced. Use self.interp to interpolate onto an equal grid'\r\n \r\n if not btype == 'band':\r\n Wn = self.dt/cutoff_dt\r\n else:\r\n Wn = [self.dt/co for co in cutoff_dt]\r\n \r\n (b, a) = signal.butter(order, Wn, btype=btype, analog=0, output='ba')\r\n \r\n return signal.filtfilt(b, a, self.y, axis=axis)", "def cut_bkg(self):\n c = TCut(self.cut_both)\n c += TCut(self._return_if('_cut_bkg'))\n return c", "def pick_cuts_up_to_order(cuts, percentile):\n\n mask_orders_to_pick = cuts.costs <= np.percentile(cuts.costs, q=percentile)\n cuts.costs = cuts.costs[mask_orders_to_pick]\n cuts.values = cuts.values[mask_orders_to_pick, :]\n if cuts.names is not None:\n cuts.names = cuts.names[mask_orders_to_pick]\n if cuts.equations is not None:\n cuts.equations = cuts.equations[mask_orders_to_pick]\n\n return cuts", "def onAddCutToolClicked(self, event):\n i_cube = self.cube_choice.GetSelection()\n i_dimension = self.cut_dimension_choice.GetSelection()\n\n if i_dimension <= 0:\n dlg_func.openWarningBox(_(u'CUT'), _(u'Cut dimension not selected'))\n else:\n value = self.cut_value_textCtrl.GetValue()\n if not value.strip():\n dlg_func.openWarningBox(_(u'CUT'), _(u'Cut value not specified'))\n else:\n cube = self._OLAP_server.getCubes()[i_cube]\n dimension = cube.getDimensions()[i_dimension - 1]\n row = (dimension.getLabel(), dimension.getName(), value)\n self.appendListCtrlRow(listctrl=self.cut_listCtrl, row=row)\n\n # After adding, clear the controls\n self.cut_dimension_choice.SetSelection(0)\n self.cut_value_textCtrl.SetValue(u'')\n\n event.Skip()", "def __call__(self, n_bins, segment, elements):\n\n # n_bins\n assert type(n_bins) is int\n assert n_bins > 0\n\n # segment\n assert type(segment) is list or type(segment) is tuple\n assert len(segment) == 2\n assert np.isscalar(segment[0]) and np.isscalar(segment[1])\n assert segment[0] < segment[1]\n\n # elements\n assert type(elements) is np.ndarray, f\"elements should be an np.ndarray, instead of {type(elements)}\"\n assert elements.dtype == np.number\n\n raise NotImplemented", "def cut_data(xdata, ydata, yerrdata, cutmode=CUT_POINT, cutvalue=0):\n if cutmode == CUT_POINT:\n xdata_cut = xdata[cutvalue:]\n ydata_cut = ydata[cutvalue:]\n yerrdata_cut = yerrdata[cutvalue:]\n elif cutmode == CUT_RADIUS:\n ii = xdata >= cutvalue\n xdata_cut = xdata[ii]\n ydata_cut = ydata[ii]\n yerrdata_cut = yerrdata[ii]\n else:\n raise ValueError('Unknown cut mode: %s' % cutmode)\n return (xdata_cut, ydata_cut, yerrdata_cut)", "def plotMultipleVars(self, vars, series, groups=None, labels=None, postfix=\"\",logy=True, fixedrange=False):\n # split the variable names, we'll use the first one for naming purposes\n varnames = [var.split(\"_\") for var in vars]\n\n # create the separate dataframes from the provided groups\n # Define some labels if we have groups and no provided labels\n # Stack all the variables we want to plot in one histogram\n dfs = None\n if groups:\n dfs = [series.loc[g,:].stack() for g in groups]\n if not labels or len(labels) != len(groups):\n labels = [\"Group %s\" % (i+1) for i in xrange(len(groups)-1)]\n labels.append(\"Bulk\")\n else:\n dfs = [series.stack()]\n\n\n # Get right number of colors, and reverse them so that mediumpurple is \n # used for the bulk of the chips (assumed to be the last group)\n colors = (self.colorlist[:len(dfs)])\n colors.reverse()\n \n # Make the histogram\n # Get the preferred binning and check whether all values fall within that range \n if varnames[0][0] in cutinfo11:\n nbins = cutinfo11[varnames[0][0]][2]\n xmin = cutinfo11[varnames[0][0]][3]\n xmax = cutinfo11[varnames[0][0]][4]\n series_min = series.min().min()\n series_max = series.max().max()\n if fixedrange or (series_min > xmin and series_max < xmax):\n ax = plt.hist(dfs, bins=nbins, range=[xmin, xmax], stacked=True, \n color=colors, label=labels, log=logy)\n else:\n ax = plt.hist(dfs, bins=nbins, stacked=True, \n color=colors, label=labels, log=logy)\n else:\n ax = plt.hist(dfs, bins=20, stacked=True, \n color=colors, label=labels, log=logy)\n\n # Set the axis titles\n if varnames[0][0] in cutinfo11:\n if len(varnames[0]) == 1:\n plt.xlabel(cutinfo11[varnames[0][0]][0], \n fontsize=self.labelsize)\n else:\n plt.xlabel(\"%s ; %s\" % (cutinfo11[varnames[0][0]][0], varnames[0][1]), \n fontsize=self.labelsize)\n else:\n plt.xlabel(varnames[0][0], \n fontsize=self.labelsize)\n plt.ylabel(\"Number of measurements\", fontsize=self.labelsize)\n\n # set margins and format axis labels\n x0, x1, y0, y1 = plt.axis()\n if logy:\n plt.axis((x0, x1,\n 0.5, y1*10))\n else:\n plt.axis((x0, x1,\n 0.5, y1*(1+0.2)))\n ax = plt.gca()\n ax.tick_params(labelsize=self.ticklabelsize)\n plt.gcf().subplots_adjust(bottom=0.12)\n\n # Add mean and std info\n # Only use info on good chips, should be the last group in the list\n mean = dfs[-1].mean() #series.stack().mean()\n std = dfs[-1].std() #series.stack().std()\n plt.figtext(0.4, 0.92,\n \"Mean: %.3g Std/Mean: %.3g\\nStd: %.3g\"%(mean, std/mean, std),\n fontsize=self.ticklabelsize)\n\n # Add cut lines if we have info\n if self.cutfile != None and varnames[0][0] in cutinfo11:\n plt.axvline(x=self.cuts[varnames[0][0]][2], linestyle='dashed', linewidth=2, color='grey')\n plt.axvline(x=self.cuts[varnames[0][0]][3], linestyle='dashed', linewidth=2, color='grey')\n plt.axvline(x=self.cuts[varnames[0][0]][0], linestyle='solid', linewidth=2, color='dimgrey')\n plt.axvline(x=self.cuts[varnames[0][0]][1], linestyle='solid', linewidth=2, color='dimgrey')\n\n # Add legend if we have labels\n if labels:\n plt.legend(loc='best', ncol=2)\n\n # Save figure\n plt.savefig(\"%s/%s%s.pdf\" % (self.outputdir, varnames[0][0], postfix))\n plt.clf()", "def _conditions(self, beg=-90, intvl=20, con_type='ori', stim='bar', \n\t\t\t\t\tbiphasic=True, unit='deg', con_list=[], temp_freq = 2):\n\t\t\n\t\t\n\t\tcon_types = ['ori', 'spat_freq', 'temporal_freq', 'chromatic', 'dl_bar']\n\t\tstims = ['bar', 'grating']\n\t\t\n\t\t\n\t\t# Checking if condition and stimulus type recognised. \n\t\tif not con_type.lower() in con_types:\n\t\t\tprint('con_type not recognised. ' \n\t\t\t\t\t'Predefined options, if desired, are %s \\n'%con_types\n\t\t\t\t\t)\n\n\t\tif not stim.lower() in stims:\n\t\t\tprint('stimulus not recognised. ' \n\t\t\t\t\t'Predefined options, if desired, are %s \\n'%con_types\n\t\t\t\t\t)\n\n\n\t\t\n\t\tn_con = self.parameters['conditions']\n\t\t\n\t\tself.parameters['condition_type'] = con_type.lower()\n\t\tself.parameters['condition_unit'] = unit.capitalize()\n\t\tself.parameters['stimulus'] = stim.lower()\n\t\t\n\t\tif stim.lower() == stims[1]:\n\t\t\t# Gratings are GENERALLY not biphasic\n\t\t\tself.parameters['biphasic'] = 'N/A'\n\t\telse:\n\t\t\tself.parameters['biphasic'] = biphasic\n\t\t\n\t\t# Address issue of whether the sampling rate suits teh temporal frequency of \n\t\t# the grating for FFT analysis\n\t\tif stim.lower() == 'grating':\n\t\t\tself.parameters['temp_freq'] = float(temp_freq)\n\t\t\t\n\t\t\t# Sample rate must be a multiple of F1/temp_freq for it to be a frequency measured\n\t\t\t# in the FFT.\n\t\t\tsamp_rate = 1/float(self.bin_width)\n\t\t\t\n\t\t\t\n\t\t\tassert samp_rate % temp_freq == 0., ('Bin_width (%s) is incompatible wih obtaining' \n\t\t\t\t\t\t\t\t\t\t\t\t 'an FFT containing the specified temp_freq (%s). '\n\t\t\t\t\t\t\t\t\t\t\t\t 'The sampling frequency (1/bin_width) must be a'\n\t\t\t\t\t\t\t\t\t\t\t\t 'multiple of the temp_freq. \\n\\n Try as a' \n\t\t\t\t\t\t\t\t\t\t\t\t 'bin_width %s and rerun self._sort().'\n\t\t\t\t\t\t\t\t\t\t\t\t % (self.bin_width, temp_freq, \n\t\t\t\t\t\t\t\t\t\t\t\t\t1/(np.ceil(samp_rate/float(temp_freq))*temp_freq)))\n\t\t\n\t\tself.cond_label = []\n\n\t\t\n\t\tdef circ(ori, bound = 360):\n\t\t\t\"\"\"Func that Ensures all orientation values are between 0 and 360 degrees.\n\t\t\t\"\"\"\n\t\t\t# ori[ori<-360] += 720\n\t\t\t# ori[ori<0] += 360\n\t\t\t# ori[ori>360] -= 360\n\t\t\t# ori[ori>720] -= 720\n\n\n\t\t\treturn ori % bound\n\n\t\t# if list of conditions provided directly\n\t\tif len(con_list) > 0:\n\t\t\t\n\t\t\t# Must match number of conditions\n\t\t\tassert len(con_list) == n_con, ('the number of labels provided '\n\t\t\t\t\t\t\t\t\t\t'manually (%s) does not match the '\n\t\t\t\t\t\t\t\t\t\t'number of conditions (%s).' % \n\t\t\t\t\t\t\t\t\t\t(len(con_list), n_con))\n\t\t\t \n\t\t\t# Must all be strings \n\t\t\tassert all(isinstance(l, str) for l in con_list), ('not all the '\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'labels provided '\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'are strings')\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \n\t\t\t# List of conditions as strings\n\t\t\tself.cond_label = con_list\n\t\t\t\n\t\t\t# Convert to floats\n\t\t\t# Relying on numpy conversion error should list be unable to convert to float.\n\t\t\tself.conditions = np.array(con_list).astype('float')\n\t\t\t\n\t\t\t\n\t\t\tif biphasic:\n\t\t\t\t\n\n\t\t\t\t# self.conditions has been defined as an np.ndarray\n\t\t\t\tself.conditions2 = self.conditions \n\n\t\t\t# # Generate list of strings or labels\n\t\t\t# for c in range(n_con):\n\t\t\t# label = '%s / %s %s' %(self.conditions[c], self.conditions2[c],\n\t\t\t# self.parameters['condition_unit'])\n\t\t\t# self.cond_label.append(label)\n\n\t\t\t# else:\n\t\t\t# for c in range(n_con):\n\t\t\t\t\t\n\t\t\t# label = '%s %s' %(self.conditions[c],\n\t\t\t# self.parameters['condition_unit'])\n\t\t\t# self.cond_label.append(label)\n\n\t\t\t\t\n\t\t\n\t\t# if condition tpye is orientation\n\t\telif con_type.lower() == con_types[0]:\n\t\t\t\n\t\t\t# Generate full range of conditions\n\t\t\tself.conditions = circ(np.arange(beg, beg+(n_con*intvl), intvl))\n\t\t\t\n\t\t\tassert len(self.conditions) == n_con, ('The amount of condition labels (%s) '\n\t\t\t\t\t\t\t\t\t\t\t'and conditions (%s) do not match; '\n\t\t\t\t\t\t\t\t\t\t\t'check your condition parameters' % \n\t\t\t\t\t\t\t\t\t\t\t(self.cond_label.size, n_con))\n\t\t\t\n\t\t\tif biphasic:\n\t\t\t\t\n\n\t\t\t\t# self.conditions has been defined as an np.ndarray\n\t\t\t\tself.conditions2 = circ(self.conditions + 180) \n\n\t\t\t\t# Generate list of strings or labels\n\t\t\t\tfor c in range(n_con):\n\t\t\t\t\tlabel = '%s / %s %s' %(self.conditions[c], self.conditions2[c],\n\t\t\t\t\t\t\t\t\t\t self.parameters['condition_unit'])\n\t\t\t\t\tself.cond_label.append(label)\n\t\t\t# Generate list of strings for non-biphasic. \n\t\t\telse:\n\t\t\t\t\n\t\t\t\tfor c in range(n_con):\n\t\t\t\t\tlabel = '%s %s' %(self.conditions[c],\n\t\t\t\t\t\t\t\t\t self.parameters['condition_unit'])\n\t\t\t\t\tself.cond_label.append(label)\n\t\t\t\t\t\n\t\t# IF condition type is Spat Freq \n\t\telif con_type.lower() == con_types[1]:\n\t\t\tself.conditions = np.arange(beg, beg + (n_con*intvl), intvl)\n\t\t\t\n\t\t\tassert len(self.conditions) == n_con, ('The amount of condition labels (%s) '\n\t\t\t\t\t\t\t\t\t\t\t'and conditions (%s) do not match; '\n\t\t\t\t\t\t\t\t\t\t\t'check your condition parameters' % \n\t\t\t\t\t\t\t\t\t\t\t(self.cond_label.size, n_con))\n\n\t\t\tfor c in range(n_con):\n\t\t\t\tlabel = '%s %s' %(self.conditions[c], self.parameters['condition_unit'])\n\t\t\t\tself.cond_label.append(label)\n\n\t\t# IF condition type is dl_bar\t\t\t\t\t\n\t\telif con_type.lower() == con_types[4]:\n\n\t\t\tself.conditions = np.array([0, 1])\n\t\t\tself.cond_label = ['dark','light']\n\n\t\t\tif len(con_list) > 0:\n\t\t\t\tself.conditions = np.array(con_list).astype('float')\n\n\t\t\t\tif con_list[0] > con_list[1]:\n\t\t\t\t\tself.cond_label = self.cond_label[::-1]\n\n\t\t\tif biphasic:\n\n\t\t\t\tself.conditions2 = self.conditions\n\n\t\t\t\tself.cond_label.extend(\n\t\t\t\t\t[\n\t\t\t\t\t\tcl + ' second'\n\t\t\t\t\t\tfor cl in self.cond_label\n\t\t\t\t\t]\t\n\t\t\t\t\t)\n\n\n\n\t\t# if condition type is not predefined in this method, presume linear range \n\t\telif not con_type.lower() in con_types:\n\t\t\t\n\t\t\tself.conditions = np.arange(beg, beg+(n_con*intvl), intvl)\n\n\n\t\t\tif biphasic:\n\t\t\t\t\n\n\t\t\t\t# self.conditions has been defined as an np.ndarray\n\t\t\t\tself.conditions2 = self.conditions \n\n\t\t\t\t# Generate list of strings or labels\n\t\t\t\tfor c in range(n_con):\n\t\t\t\t\tlabel = '%s / %s %s' %(self.conditions[c], self.conditions2[c],\n\t\t\t\t\t\t\t\t\t\t self.parameters['condition_unit'])\n\t\t\t\t\tself.cond_label.append(label)\n\n\t\t\telse:\n\t\t\t\tfor c in range(n_con):\n\t\t\t\t\t\n\t\t\t\t\tlabel = '%s %s' %(self.conditions[c],\n\t\t\t\t\t\t\t\t\t self.parameters['condition_unit'])\n\t\t\t\t\tself.cond_label.append(label)", "def SplitValues(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_SplitCurve_SplitValues(self, *args)", "def gaussify_bands(self, sigma):\n for key, band in self.bands.items():\n self.gauss_bands[key] = gaussian_filter(input=band, sigma=sigma)", "def fit_data(curves, snType='Ia',bands=None, models=None, params=None, bounds={}, ignore=None, constants=None,\n\t\t\t method='parallel',t0_guess=None,refModel=None,effect_names=[],effect_frames=[],fitting_method='nest',\n\t\t\t dust=None,flip=False,guess_amplitude=True,seriesError=None,showPlots=False,microlensing=None,\n\t\t\t kernel='RBF',seriesGrids=None,refImage='image_1',nMicroSamples=100,color_curve=None,verbose=True,**kwargs):\n\n\t#get together user arguments\n\targs = locals()\n\tfor k in kwargs.keys():\n\t\targs[k]=kwargs[k]\n\n\tif isinstance(curves,(list,tuple)):\n\t\targs['curves']=[]\n\t\tfor i in range(len(curves)):\n\t\t\ttemp=_sntd_deepcopy(curves[i])\n\t\t\ttemp.nsn=i+1\n\t\t\targs['curves'].append(temp)\n\t\targs['parlist']=True\n\telse:\n\t\targs['curves']=_sntd_deepcopy(curves)\n\t\targs['parlist']=False\n\n\targs['bands'] = [bands] if bands is not None and not isinstance(bands,(tuple,list,np.ndarray)) else bands\n\t#sets the bands to user's if defined (set, so that they're unique), otherwise to all the bands that exist in curves\n\n\targs['bands'] = list(set(bands)) if bands is not None else None\n\n\targs['bands'] = list(curves.bands) if not isinstance(curves,(list,tuple,np.ndarray)) else list(curves[0].bands)\n\n\tmodels=[models] if models and not isinstance(models,(tuple,list)) else models\n\tif not models:\n\t\tmod,types=np.loadtxt(os.path.join(__dir__,'data','sncosmo','models.ref'),dtype='str',unpack=True)\n\t\tmodDict={mod[i]:types[i] for i in range(len(mod))}\n\t\tif snType!='Ia':\n\t\t\tmods = [x[0] for x in sncosmo.models._SOURCES._loaders.keys() if x[0] in modDict.keys() and modDict[x[0]][:len(snType)]==snType]\n\t\telif snType=='Ia':\n\t\t\tmods = [x[0] for x in sncosmo.models._SOURCES._loaders.keys() if 'salt2' in x[0]]\n\telse:\n\t\tmods=models\n\tmods=set(mods)\n\targs['mods']=mods\n\t#sncosmo fitting function for model determination\n\targs['sn_func'] = {'minuit': sncosmo.fit_lc, 'mcmc': sncosmo.mcmc_lc, 'nest': nest_lc}\n\n\t#get any properties set in kwargs that exist for the defined fitting function\n\targs['props'] = {x: kwargs[x] for x in kwargs.keys() if\n\t\t\t\t\t x in [y for y in inspect.signature(args['sn_func'][fitting_method]).parameters] and x != 'verbose'}\n\n\tif method not in ['parallel','series','color']:\n\t\traise RuntimeError('Parameter \"method\" must be \"parallel\",\"series\", or \"color\".')\n\tif microlensing is not None and method !='parallel':\n\t\tprint('Microlensing uncertainty only set up for parallel right now, switching to parallel method...')\n\t\tmethod='parallel'\n\t\n\tif method=='parallel':\n\t\tif args['parlist']:\n\t\t\tpar_arg_vals=[]\n\t\t\tfor i in range(len(args['curves'])):\n\t\t\t\ttemp_args={}\n\t\t\t\tfor par_key in ['snType','bounds','constants','t0_guess','refModel','color_curve','seriesGrids']:\n\t\t\t\t\tif isinstance(args[par_key],(list,tuple,np.ndarray)):\n\t\t\t\t\t\ttemp_args[par_key]=args[par_key][i] \n\t\t\t\tfor par_key in ['bands','models','ignore','params']:\n\t\t\t\t\tif isinstance(args[par_key],(list,tuple,np.ndarray)) and np.any([isinstance(x,(list,tuple,np.ndarray)) for x in args[par_key]]):\n\t\t\t\t\t\ttemp_args[par_key]=args[par_key][i] \n\t\t\t\tpar_arg_vals.append([args['curves'][i],temp_args])\n\t\t\tcurves=pyParz.foreach(par_arg_vals,_fitparallel,[args])\n\n\t\telse:\n\t\t\tcurves=_fitparallel(args)\n\telif method=='series':\n\t\tif args['parlist']:\n\n\t\t\tpar_arg_vals=[]\n\t\t\tfor i in range(len(args['curves'])):\n\t\t\t\ttemp_args={}\n\t\t\t\tfor par_key in ['snType','bounds','constants','t0_guess','refModel','color_curve','seriesGrids']:\n\t\t\t\t\tif isinstance(args[par_key],(list,tuple,np.ndarray)):\n\t\t\t\t\t\ttemp_args[par_key]=args[par_key][i] \n\t\t\t\tfor par_key in ['bands','models','ignore','params']:\n\t\t\t\t\tif isinstance(args[par_key],(list,tuple,np.ndarray)) and np.any([isinstance(x,(list,tuple,np.ndarray)) for x in args[par_key]]):\n\t\t\t\t\t\ttemp_args[par_key]=args[par_key][i] \n\t\t\t\tpar_arg_vals.append([args['curves'][i],temp_args])\n\t\t\tcurves=pyParz.foreach(par_arg_vals,_fitseries,[args])\n\t\telse:\n\t\t\tcurves=_fitseries(args)\n\n\telse:\n\t\tif args['parlist']:\n\t\t\tpar_arg_vals=[]\n\t\t\tfor i in range(len(args['curves'])):\n\t\t\t\ttemp_args={}\n\t\t\t\tfor par_key in ['snType','bounds','constants','t0_guess','refModel','color_curve','seriesGrids']:\n\t\t\t\t\tif isinstance(args[par_key],(list,tuple,np.ndarray)):\n\t\t\t\t\t\ttemp_args[par_key]=args[par_key][i] \n\t\t\t\tfor par_key in ['bands','models','ignore','params']:\n\t\t\t\t\tif isinstance(args[par_key],(list,tuple,np.ndarray)) and np.any([isinstance(x,(list,tuple,np.ndarray)) for x in args[par_key]]):\n\t\t\t\t\t\ttemp_args[par_key]=args[par_key][i] \n\t\t\t\tpar_arg_vals.append([args['curves'][i],temp_args])\n\t\t\tcurves=pyParz.foreach(par_arg_vals,_fitColor,[args])\n\t\telse:\n\t\t\tcurves=_fitColor(args)\n\n\treturn curves", "def filt_hp(sig: np.ndarray, Ss: int, Cfs: int, Cfs1: None,\n order=5) -> np.ndarray:\n nyq = 0.5 * Ss\n normal_cutoff = Cfs / nyq\n b, a = butter(order, normal_cutoff, btype='high', analog=False)\n return lfilter(b, a, sig)", "def apply_on_bins(self, bins, functions, return_dict=False):\n binned_data = self.bin(bins)\n\n if return_dict:\n return_values = {\n name: np.asarray(\n [func(bin, 0) for bin in binned_data]\n ).flatten()\n for name, func in functions.items()\n }\n else:\n return_values = GroupedArrays()\n for name, func in functions.items():\n return_values[name] = np.asarray([\n func(bin, 0) for bin in binned_data]\n ).flatten()\n\n return return_values", "def setCutRatios(self, cutRatios):\n\t\t# TODO: Check if this is an array\n\t\t# Adjust the ratio according to specifications\n\t\tfor i in range(len(cutRatios)):\n\t\t\tif cutRatios[i] < 0.5:\n\t\t\t\tcutRatios[i] = 1 - cutRatios[i]\n\t\t\telif cutRatios[i] > 1.0:\n\t\t\t\t# Scary\n\t\t\t\t# Maybe raise an error\n\t\t\t\tcutRatios[i] = cutRatios[i] - 1\n\t\tself.cutRatios = cutRatios", "def hist_data(list_source, frq=151, ln=False, data_lim=None):\n fluxes = []\n\n if data_lim is not None:\n min_acceptable = data_lim[0]\n else:\n min_acceptable = None\n if data_lim is not None:\n max_acceptable = data_lim[1]\n else:\n max_acceptable = None\n \n for gleam_obj in list_source:\n I = gleam_obj.flux_by_frq[frq]\n if is_constrained(I, min_acceptable, max_acceptable):\n if ln:\n fluxes.append(np.log(I))\n else:\n fluxes.append(I)\n \n return np.array(fluxes)", "def apply_trigger_first(cut_fn):\n def wrapped(arrays, cut):\n arrays = svjflatanalysis.arrayutils.apply_trigger_and_jetpt550(arrays, 2018)\n return cut_fn(arrays, cut)\n return wrapped", "def __init__(self, NumofBandits=10, MeanRange=[-5, 5], sigma=1):\n self.NumofBandits = NumofBandits\n self.sigma = sigma\n # Random generate the mean value of each action\n self.MeanList = np.random.uniform(MeanRange[0], MeanRange[1], self.NumofBandits)", "def fit(self, data, discrete_features=None, bandwidth=1.0,\n num_discretization_bins=4, pseudocount=1.0):\n if bandwidth <= 0:\n raise ValueError(\"Bandwidth must be positive.\")\n \n if discrete_features != None and \\\n len(discrete_features) != data.shape[1]:\n raise ValueError(\"Discrete features array and data arrays\"\n \"shape don't match.\")\n \n if num_discretization_bins < 0:\n raise ValueError(\"Number of descretization bins can't be negetive.\")\n \n if num_discretization_bins == 0:\n for bool in discrete_features:\n if bool:\n raise ValueError(\"Number of descretization bins can't be\"\n \"zero if there is a continuous feature.\")\n \n if pseudocount < 0:\n raise ValueError(\"Pseudocount can't be negative.\")\n \n if discrete_features == None:\n discrete_features = [False] * data.shape[1]\n\n self.num_features_ = data.shape[1]\n self.discrete_features_ = discrete_features\n self.num_discretization_bins_ = num_discretization_bins\n\n discretized_data = np.array(data, copy=True)\n continuous_data = data[:, np.invert(discrete_features)]\n\n discretizer = KBinsDiscretizer(n_bins=num_discretization_bins,\n encode='ordinal', strategy='quantile')\n discretizer.fit(continuous_data)\n\n discretized_data[:, np.invert(discrete_features)] = \\\n discretizer.transform(continuous_data)\n self.discretizer_ = discretizer\n\n self.model_ = BayesianNetwork.from_samples(discretized_data,\n algorithm='chow-liu', n_jobs=-1, pseudocount=pseudocount)\n self.model_.bake()\n \n # Table for bin edges\n bins = discretizer.bin_edges_\n\n # Kdes for continuous data.\n self.tnkdes_ = []\n\n i = 0\n for k in range(self.num_features_):\n if discrete_features[k]:\n continue\n \n bins[i][0] = -np.inf\n bins[i][len(bins[i]) - 1] = np.inf\n bin_kdes = []\n \n # loop of boundary\n for j in range(len(bins[i]) - 1):\n # Bound for this bin.\n lower_bound = bins[i][j]\n upper_bound = bins[i][j+1]\n \n # Create a kde using the data in the current bin.\n current_feature_data = data[:, k]\n cur_bin_data = current_feature_data[discretized_data[:, k] == j]\n kde = TruncatedNormalKernelDensity(bandwidth=bandwidth,\n lowerbound=lower_bound, upperbound=upper_bound)\n kde.fit(cur_bin_data)\n bin_kdes.append(kde)\n \n i = i + 1\n self.tnkdes_.append(bin_kdes)", "def create_intervals_hsb(confidence, n_samples, data):\n print(data)\n intervals = []\n if not isinstance(data, Iterable):\n assert isinstance(data, float)\n return [create_interval_hsb(confidence, n_samples, data)]\n for data_point in data:\n try:\n assert isinstance(data_point, float)\n except AssertionError:\n data_point = float(data_point)\n intervals.append(create_interval_hsb(confidence, n_samples, data_point))\n return intervals", "def apply_cuts(chain, isotope, tree, volume):\n\n #open file which inlcudes fill levels and fill days\n infile = open(\"/users/langrock/plotting_macros/Partial_fill/split_level.txt\",\"r\")\n\n #define root file to save root files to\n outputroot = ROOT.TFile(\"/data/langrock/PartialFill/Full/root/\" + isotope + \"_\" + chain + \"_\" + volume +\".root\",\"recreate\")\n\n #define histograms\n hist = define_histograms.DefineHistograms()\n\n events_full = 0\n events_pocut = 0\n events_deltatcut = 0\n events_bifidvolcut = 0\n events_deltarcut = 0\n events_bicut = 0\n events_allcut = 0\n\n #get fill days and fill level from file, loop through each line and perform the cut selection on each day of filling\n for line in infile:\n words = line.split()\n\n if len(words)!=0:\n\n d = float(words[0])\n z_level = float(words[1])\n \n #loop through the events in the root file\n for i in range(tree.GetEntries()):\n #get variables from previous events\n tree.GetEntry(i-1)\n nhits_prev = tree.nhits\n radius_prev = math.sqrt(tree.posx*tree.posx+tree.posy*tree.posy+tree.posz*tree.posz)\n time_prev = tree.uTNSecs + tree.uTSecs*math.pow(10,9) + tree.uTDays*24*60*60*math.pow(10,9)\n energy_prev = tree.energy\n fitValid_prev = tree.fitValid\n x_prev = tree.posx\n y_prev = tree.posy\n z_prev = tree.posz\n\n #get variables from current events\n tree.GetEntry(i)\n nhits = tree.nhits\n radius = math.sqrt(tree.posx*tree.posx+tree.posy*tree.posy+tree.posz*tree.posz)\n time = tree.uTNSecs + tree.uTSecs*math.pow(10,9) + tree.uTDays*24*60*60*math.pow(10,9)\n energy = tree.energy\n fitValid = tree.fitValid\n x = tree.posx\n y = tree.posy\n z = tree.posz\n\n #get day at which events were simulated\n day = tree.uTDays\n\n #define time differance and event distance\n delta_t = time - time_prev\n delta_r = math.sqrt(math.pow((x_prev - x),2) + math.pow((y_prev - y),2) + math.pow((z_prev - z),2))\n\n fidvol_value = 5000\n \n #if the event was generated on the current day of filling, apply cuts\n if d == day:\n\n #fill histograms and count events\n hist.h_energy_full.Fill(energy)\n hist.h_nhitspo_full.Fill(nhits)\n hist.h_nhitsbi_full.Fill(nhits_prev)\n hist.h_deltat_full.Fill(delta_t)\n hist.h_deltar_full.Fill(delta_r)\n hist.h_rfidvolbi_full.Fill(radius_prev)\n\n events_full += 1\n\n #apply fiducial vlume cut\n if radius> 0 and radius < fidvol_value and z >= z_level+653:\n\n hist.h_energy_pocut.Fill(energy)\n hist.h_nhitspo_pocut.Fill(nhits)\n hist.h_nhitsbi_pocut.Fill(nhits_prev)\n hist.h_deltat_pocut.Fill(delta_t)\n hist.h_deltar_pocut.Fill(delta_r)\n hist.h_rfidvolbi_pocut.Fill(radius_prev)\n \n events_pocut += 1\n\n #bipo212 cut selection\n if chain == \"bipo212\":\n #apply polonium candidate cut\n if nhits >= 450 and nhits <= 580:\n\n hist.h_energy_deltatcut.Fill(energy)\n hist.h_nhitspo_deltatcut.Fill(nhits)\n hist.h_nhitsbi_deltatcut.Fill(nhits_prev)\n hist.h_deltat_deltatcut.Fill(delta_t)\n hist.h_deltar_deltatcut.Fill(delta_r)\n hist.h_rfidvolbi_deltatcut.Fill(radius_prev)\n\n events_deltatcut += 1\n\n #time difference cut\n if delta_t < 3690:\n\n hist.h_energy_bifidvolcut.Fill(energy)\n hist.h_nhitspo_bifidvolcut.Fill(nhits)\n hist.h_nhitsbi_bifidvolcut.Fill(nhits_prev)\n hist.h_deltat_bifidvolcut.Fill(delta_t)\n hist.h_deltar_bifidvolcut.Fill(delta_r)\n hist.h_rfidvolbi_bifidvolcut.Fill(radius_prev)\n\n events_bifidvolcut += 1\n\n #fiducial radius cut on bismuth candidate\n if radius_prev > 0 and radius_prev < fidvol_value and z_prev >= z_level+653:\n\n hist.h_energy_deltarcut.Fill(energy)\n hist.h_nhitspo_deltarcut.Fill(nhits)\n hist.h_nhitsbi_deltarcut.Fill(nhits_prev)\n hist.h_deltat_deltarcut.Fill(delta_t)\n hist.h_deltar_deltarcut.Fill(delta_r)\n hist.h_rfidvolbi_deltarcut.Fill(radius_prev)\n\n events_deltarcut += 1\n\n #distance cut\n if delta_r > 0 and delta_r < 1500:\n\n hist.h_energy_bicut.Fill(energy)\n hist.h_nhitspo_bicut.Fill(nhits)\n hist.h_nhitsbi_bicut.Fill(nhits_prev)\n hist.h_deltat_bicut.Fill(delta_t)\n hist.h_deltar_bicut.Fill(delta_r)\n hist.h_rfidvolbi_bicut.Fill(radius_prev)\n\n events_bicut += 1\n\n #nhits cut on the bismuth candidate\n if nhits_prev >= 100:\n\n hist.h_energy_allcut.Fill(energy)\n hist.h_nhitspo_allcut.Fill(nhits)\n hist.h_nhitsbi_allcut.Fill(nhits_prev)\n hist.h_deltat_allcut.Fill(delta_t)\n hist.h_deltar_allcut.Fill(delta_r)\n hist.h_rfidvolbi_allcut.Fill(radius_prev)\n\n events_allcut += 1\n \n #bipo214 cut selection\n elif chain == \"bipo214\":\n #nhits cut on polonium candidate\n if nhits >= 290 and nhits <= 450:\n\n hist.h_energy_deltatcut.Fill(energy)\n hist.h_nhitspo_deltatcut.Fill(nhits)\n hist.h_nhitsbi_deltatcut.Fill(nhits_prev)\n hist.h_deltat_deltatcut.Fill(delta_t)\n hist.h_deltar_deltatcut.Fill(delta_r)\n hist.h_rfidvolbi_deltatcut.Fill(radius_prev)\n\n events_deltatcut += 1\n\n #time difference cut\n if delta_t > 3690 and delta_t < 1798788:\n\n hist.h_energy_bifidvolcut.Fill(energy)\n hist.h_nhitspo_bifidvolcut.Fill(nhits)\n hist.h_nhitsbi_bifidvolcut.Fill(nhits_prev)\n hist.h_deltat_bifidvolcut.Fill(delta_t)\n hist.h_deltar_bifidvolcut.Fill(delta_r)\n hist.h_rfidvolbi_bifidvolcut.Fill(radius_prev)\n\n events_bifidvolcut += 1\n\n #fiducial volume cut on bismuth candidate\n if radius_prev > 0 and radius_prev < fidvol_value and z_prev >= z_level+653:\n\n hist.h_energy_deltarcut.Fill(energy)\n hist.h_nhitspo_deltarcut.Fill(nhits)\n hist.h_nhitsbi_deltarcut.Fill(nhits_prev)\n hist.h_deltat_deltarcut.Fill(delta_t)\n hist.h_deltar_deltarcut.Fill(delta_r)\n hist.h_rfidvolbi_deltarcut.Fill(radius_prev)\n \n events_deltarcut += 1\n\n #distance cut\n if delta_r > 0 and delta_r < 1500:\n\n hist.h_energy_bicut.Fill(energy)\n hist.h_nhitspo_bicut.Fill(nhits)\n hist.h_nhitsbi_bicut.Fill(nhits_prev)\n hist.h_deltat_bicut.Fill(delta_t)\n hist.h_deltar_bicut.Fill(delta_r)\n hist.h_rfidvolbi_bicut.Fill(radius_prev)\n\n events_bicut += 1\n\n #nhits cut on the bismuth candidate\n if nhits_prev >= 600:\n\n hist.h_energy_allcut.Fill(energy)\n hist.h_nhitspo_allcut.Fill(nhits)\n hist.h_nhitsbi_allcut.Fill(nhits_prev)\n hist.h_deltat_allcut.Fill(delta_t)\n hist.h_deltar_allcut.Fill(delta_r)\n hist.h_rfidvolbi_allcut.Fill(radius_prev)\n\n events_allcut += 1\n \n #write all histograms to file\n outputroot.Write()\n outputroot.Close()\n\n #create string with all event counts\n outputstring = isotope + \"\\t all events: \" + str(events_full) + \"\\t fiducial volume: \" + str(events_pocut) + \"\\t Po nhits cut: \" + str(events_deltatcut) + \"\\t Delta t cut: \" + str(events_bifidvolcut) + \"\\t fiducial volume: \" + str(events_deltarcut) + \"\\t Delta r cut: \" + str(events_bicut) + \"\\t Bi nhits cut: \" + str(events_allcut) + \"\\n \" \n\n return outputstring", "def select_sources(cat_table, cuts):\n nsrc = len(cat_table)\n full_mask = np.ones((nsrc), bool)\n for cut in cuts:\n if cut == 'mask_extended':\n full_mask *= mask_extended(cat_table)\n elif cut == 'select_extended':\n full_mask *= select_extended(cat_table)\n else:\n full_mask *= make_mask(cat_table, cut)\n\n lout = [src_name.strip() for src_name in cat_table['Source_Name'][full_mask]]\n return lout", "def binspecdattomatch( wavelength, flux, wavetomatch, fluxerr=[], sigclip=0,\n sumerrs=False ):\n w,f = wavelength, flux\n if len(fluxerr):\n df = fluxerr\n else :\n df=np.zeros(len(f))\n\n wavetomatch = np.asarray(wavetomatch)\n wavetomatch_halfbinwidth = np.diff(wavetomatch)/2.\n lastbinlow = wavetomatch[-1] - wavetomatch_halfbinwidth[-1]\n lastbinhigh = wavetomatch[-1] + wavetomatch_halfbinwidth[-1]\n wavebinedges = np.append( wavetomatch[:-1]-wavetomatch_halfbinwidth,\n np.array([lastbinlow,lastbinhigh]))\n\n wbinned, dwbinned, fbinned, dfbinned = [], [], [], []\n for i in range(len(wavebinedges)-1):\n wavebinmin=wavebinedges[i]\n wavebinmax=wavebinedges[i+1]\n iinbin = np.where((w>=wavebinmin)&(w<wavebinmax))\n\n winbin = w[iinbin]\n finbin = f[iinbin]\n dfinbin = df[iinbin]\n\n if sigclip :\n # use sigma clipping to reject outliers\n igoodval = isigclip( finbin, sigclip )\n if len(igoodval) :\n wbinval = np.mean( winbin[igoodval] )\n fbinval = np.mean( finbin[igoodval] )\n dwbinval = (winbin[igoodval].max() - winbin[igoodval].min())/2.\n #dwbinval = (wbin.max() - wbin.min())/2.\n if sumerrs :\n # flux uncertainty is the quadratic sum of the mean flux error\n # and the error of the mean\n dfbinval1 = np.std( finbin[igoodval] ) / np.sqrt(len(igoodval)-2)\n dfbinval2 = np.mean( dfinbin[igoodval] ) / np.sqrt(len(igoodval)-2)\n dfbinval = np.sqrt( dfbinval1**2 + dfbinval2**2 )\n else :\n # flux uncertainty is the std error of the mean\n dfbinval = np.std( finbin[igoodval] ) / np.sqrt(len(igoodval)-2)\n else :\n # use a straight median\n wbinval = np.median( winbin )\n fbinval = np.median( finbin )\n dwbinval = (winbin[-1]-winbin[0])/2.\n if sumerrs :\n # flux uncertainty is the quadratic sum of the mean flux error\n # and the error of the mean\n dfbinval1 = np.std( finbin )/np.sqrt(len(finbin)-2)\n dfbinval2 = np.mean( dfinbin )\n dfbinval = np.sqrt( dfbinval1**2 + dfbinval2**2 )\n else :\n # flux uncertainty is the std error of the mean\n dfbinval = np.std( finbin ) / np.sqrt(max(1,len(finbin)))\n\n wbinned.append( wbinval )\n fbinned.append( fbinval )\n dwbinned.append( dwbinval )\n dfbinned.append( dfbinval )\n\n return( np.array( wbinned ), np.array(dwbinned), np.array(fbinned), np.array(dfbinned) )", "def cutoff(self, *args, **kwargs) -> Any:\n pass", "def __init__(self,ptbins,etabins,data=None):\n self._ptbins = ptbins\n self._etabins = etabins\n if data is not None:\n self._data = data\n else:\n self._data = [ [ (0,0) for i in range(len(self._etabins)+1) ] for i in range(len(self._ptbins)+1) ]\n self.__check()", "def discretize_all(self, cond = 5, bins=3):\n\n self.bin_discretize(np.where(self.arity>cond)[0],bins)\n self.data=self.data.astype(int)", "def generate_cut_strings(var, bin_edges, bottom_inclusive=True):\n incl = '=' if bottom_inclusive else ''\n return ['{var} >{incl} {low:g} && {var} < {high:g}'.format(var=var, low=bin_low,\n high=bin_high, incl=incl)\n for (bin_low, bin_high) in bin_edges]", "def test_skl_hist_gradient_boosting_with_categorical():\n # We don't yet support HistGradientBoostingClassifier with categorical splits\n # So make sure that an exception is thrown properly\n rng = np.random.RandomState(0)\n n_samples = 1000\n f1 = rng.rand(n_samples)\n f2 = rng.randint(4, size=n_samples)\n X = np.c_[f1, f2]\n y = np.zeros(shape=n_samples)\n y[X[:, 1] % 2 == 0] = 1\n clf = HistGradientBoostingClassifier(max_iter=20, categorical_features=[1])\n clf.fit(X, y)\n np.testing.assert_array_equal(clf.is_categorical_, [False, True])\n\n with pytest.raises(\n NotImplementedError, match=r\"Categorical splits are not yet supported.*\"\n ):\n treelite.sklearn.import_model(clf)", "def model_hist(xvar, yvar, modfuncs, nbins=95, crange=(-10.0, 10.0)):\n hists = [TH2D(\n 'hmodel{0}{1}'.format(c, i), 'hmodel{0}{1}'.format(c, i),\n nbins, crange[0], crange[1],\n nbins, crange[0], crange[1]\n ) for (i, c) in ic]\n for xbin in range(nbins):\n xlo = hists[0].GetXaxis().GetBinLowEdge(xbin+1)\n xup = hists[0].GetXaxis().GetBinUpEdge(xbin+1)\n for ybin in range(nbins):\n ylo = hists[0].GetXaxis().GetBinLowEdge(ybin+1)\n yup = hists[0].GetXaxis().GetBinUpEdge(ybin+1)\n name = 'bin_{0}_{1}'.format(xbin, ybin)\n xvar.setRange(name, xlo, xup)\n yvar.setRange(name, ylo, yup)\n for hist, modfunc in zip(hists, modfuncs):\n integral = modfunc.createIntegral(\n RooArgSet(xvar, yvar),\n RooFit.NormSet(RooArgSet(xvar, yvar)),\n RooFit.Range(name)\n ).getVal()\n hist.SetBinContent(xbin+1, ybin+1, integral)\n return hists", "def test_wrong_number_of_filter_thresholds(self, threshold, n_inputs):\n with pytest.raises(ValueError, match=\"If multiple filter_thresholds are provided\"):\n coefficients(self.dummy_fn, n_inputs, 2, True, filter_threshold=threshold)", "def cut(x, y, scalars, area):\n xmin, xmax, ymin, ymax = area\n if len(x) != len(y):\n raise ValueError(\"x and y must have the same length\")\n inside = [i for i in xrange(len(x))\n if x[i] >= xmin and x[i] <= xmax and y[i] >= ymin and y[i] <= ymax]\n return [x[inside], y[inside], [s[inside] for s in scalars]]", "def cut(x, y, scalars, area):\n xmin, xmax, ymin, ymax = area\n if len(x) != len(y):\n raise ValueError(\"x and y must have the same length\")\n inside = [i for i in xrange(len(x))\n if x[i] >= xmin and x[i] <= xmax\n and y[i] >= ymin and y[i] <= ymax]\n return [x[inside], y[inside], [s[inside] for s in scalars]]" ]
[ "0.55629873", "0.5544333", "0.54654413", "0.53966707", "0.5293295", "0.5175403", "0.5154368", "0.50937045", "0.5059277", "0.5027049", "0.50096345", "0.49881732", "0.49795693", "0.4978986", "0.49496424", "0.4933744", "0.4921186", "0.49105307", "0.49013457", "0.48807377", "0.48686138", "0.48626393", "0.48209476", "0.4818179", "0.48062032", "0.47978446", "0.47949582", "0.4768826", "0.47638527", "0.47498536", "0.4729141", "0.4719727", "0.47184148", "0.4717857", "0.47169614", "0.4711209", "0.4709669", "0.4705734", "0.47018006", "0.469633", "0.46939197", "0.46908796", "0.4687088", "0.46819073", "0.46701536", "0.46660504", "0.46658692", "0.46604714", "0.46585548", "0.465407", "0.46426988", "0.46391135", "0.4635308", "0.46216506", "0.46153283", "0.4602054", "0.45966792", "0.45963544", "0.45899552", "0.45833117", "0.45666677", "0.4562584", "0.45595375", "0.45471376", "0.45424253", "0.45334724", "0.45323044", "0.45229122", "0.45212153", "0.4511119", "0.45106092", "0.45073536", "0.4505167", "0.45008215", "0.44973966", "0.44961432", "0.44960845", "0.44877875", "0.4483125", "0.44813848", "0.44791853", "0.44779688", "0.4471486", "0.44703308", "0.44654804", "0.446297", "0.4454724", "0.44518557", "0.44458064", "0.4439374", "0.4434735", "0.44282925", "0.44160154", "0.44157645", "0.44118264", "0.44085428", "0.44051948", "0.43994242", "0.43938276", "0.43849838" ]
0.6186934
0
Audit the commit for proper endofline characters. The UNIX type EOL is the only allowed EOL character.
def audit_eol(self): # Regex's.... re_commit = re.compile("^\xff(.+)\xff$") re_filename = re.compile("^diff --(cc |git a\/.+ b\/)(.+)$") blocked_eol = re.compile(r"(?:\r\n|\n\r|\r)$") # Bool to allow special files such as vcards to bypass the check eol_allowed = False # Do EOL audit! process = get_change_diff( self.repository, ["-p"] ) for line in process.stdout: commit_change = re.match( re_commit, line ) if commit_change: commit = commit_change.group(1) continue file_change = re.match( re_filename, line ) if file_change: filename = file_change.group(2) eol_violation = False eol_allowed = False # Check if it's an allowed mimetype # First - check with the mimetypes system, to see if it can tell guessed_type, _ = mimetypes.guess_type(filename) if guessed_type in self.ALLOWED_EOL_MIMETYPES: eol_allowed = True continue # Second check: by file extension # NOTE: This uses the FIRST dot as extension splitted_filename = filename.split(os.extsep) # Check if there's an extension or not # NOTE This assumes that files use dots for extensions only! if len(splitted_filename) > 1: extension = splitted_filename[1] if extension in self.ALLOWED_EOL_EXTENSIONS: eol_allowed = True continue # Unless they added it, ignore it if not line.startswith("+"): continue if re.search( blocked_eol, line ) and not eol_violation: # Is this an allowed filename? if eol_allowed: continue # Failure has been found... handle it eol_violation = True self.__log_failure(commit, "End of Line Style (non-Unix): " + filename);
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eol(self):\n if self.current not in EOL:\n self.on_parser_error(\"EOL expected\")\n self.maybe_eol()", "def _output_commit_line(self): # noqa: C901, E501 pylint: disable=too-many-branches\n seen_this = False\n chars_written = 0\n for i in range(self.num_columns + 1):\n if i == self.num_columns:\n if seen_this:\n break\n col_commit = self.commit\n else:\n col = self.columns[i]\n col_commit = self.columns[i].commit\n\n if col_commit == self.commit:\n seen_this = True\n self.buf += '*'\n chars_written += 1\n\n if self.num_parents > 2:\n chars_written += self._draw_octopus_merge()\n elif seen_this and self.num_parents > 2:\n self._write_column(col, '\\\\')\n chars_written += 1\n elif seen_this and self.num_parents == 2:\n # This is a 2-way merge commit. There is no\n # GraphState.PRE_COMMIT stage for 2-way merges, so this is the\n # first line of output for this commit. Check to see what the\n # previous line of output was.\n #\n # If it was GraphState.POST_MERGE, the branch line coming into\n # this commit may have been '\\', and not '|' or '/'. If so,\n # output the branch line as '\\' on this line, instead of '|'.\n # This makes the output look nicer.\n if (self.prev_state == GraphState.POST_MERGE and\n self.prev_commit_index < i):\n self._write_column(col, '\\\\')\n else:\n self._write_column(col, '|')\n chars_written += 1\n else:\n self._write_column(col, '|')\n chars_written += 1\n self.buf += ' '\n chars_written += 1\n\n self._pad_horizontally(chars_written)\n if self.num_parents > 1:\n self._update_state(GraphState.POST_MERGE)\n elif self._is_mapping_correct():\n self._update_state(GraphState.PADDING)\n else:\n self._update_state(GraphState.COLLAPSING)", "def test_no_final_eol(self, env: yaenv.Env):\n from tempfile import mkstemp\n env.envfile = mkstemp()[-1]\n with open(env, 'w') as f:\n f.write('EOL=no')\n env['BLANK'] = ''\n with open(env, 'r') as f:\n assert len(f.readlines()) == 2", "def eat_EOL(self):\n # print(\"Start eating EOL\")\n self.eat(EOL)\n while self.current_token.type == EOL:\n self.eat(EOL)\n # print(\"Stop eating EOL\")", "def escape_eol_chars(options):\n pass", "def log(self, chars):\n self.insert(END, chars+'\\n')\n self.see(END)\n self.update()", "def _(event):\n if line.is_multiline:\n line.newline()\n else:\n if line.validate():\n cli_ref().line.add_to_history()\n cli_ref().set_return_value(line.document)", "def _endline(line):\n return line.rstrip() + '\\n'", "def test_message_truncated_correctly_commit_log_entry(self):\n commit = collection_models.CollectionCommitLogEntryModel.create(\n 'b', 0, 'committer_id', 'a', 'a' * 400, [{}],\n constants.ACTIVITY_STATUS_PUBLIC, False)\n commit.collection_id = 'b'\n commit.update_timestamps()\n commit.put()\n self._run_one_off_job()\n self.assertEqual(\n len(\n collection_models.CollectionCommitLogEntryModel.get_by_id(\n commit.id).commit_message),\n 375)\n\n # Ensure nothing happens to messages of proper length.\n self._run_one_off_job()\n self.assertEqual(\n len(\n collection_models.CollectionCommitLogEntryModel.get_by_id(\n commit.id).commit_message),\n 375)", "def deal_lines(self, lines, conf):\n if lines == ['']:\n print \"NO new %s commit!\" % conf\n else:\n for line in lines:\n if re.search('\\d+ files? changed', line) is None:\n pos = line.find(' ')\n if pos != -1:\n try:\n parts = line.split(' ', 2)\n commit_id = parts[0]\n self.current_commit = commit_id\n stamp = int(parts[1])\n ti = datetime.datetime.fromtimestamp(float(stamp))\n s_time = datetime.datetime.fromtimestamp(float(0))\n if self.start_date == s_time:\n self.start_date = ti\n elif self.start_date > ti:\n self.start_date = ti\n author, mail = parts[2].split('<', 1)\n message = mail.split('> ', 1)[1]\n mail = mail.split('>', 1)[0]\n if re.search(': ', message) is not None:\n messagetype = message.split(': ', 1)[0]\n if messagetype not in CLASSIFICATION:\n messagetype = 'OTR'\n else:\n messagetype = 'OTR'\n if commit_id not in self.commit_dictionary:\n self.commit_dictionary[commit_id]\\\n = [commit_id, mail,\n stamp, messagetype,\n messagetype, 0, 0, 0, 0]\n # [files, inserted, deleted, total_lines]\n if mail not in self.author_dictionary:\n self.author_dictionary[mail] = [author,\n mail, 0, 0,\n 0, 0, 1,\n stamp]\n # [files,inserted,deleted,total_lines,commit,stamp]\n else:\n self.author_dictionary[mail][6] += 1\n if stamp > self.author_dictionary[mail][7]:\n self.author_dictionary[mail][7] = stamp\n self.total_patches += 1\n except:\n print 'Warning: unexpected line \"%s\"' % line\n else:\n if conf == 'no_merges':\n try:\n commit_id = self.current_commit\n numbers = self.getstatsummarycounts(line)\n if len(numbers) == 3:\n (files, inserted, deleted) = \\\n map(lambda el: int(el), numbers)\n total_lines = inserted - deleted\n self.commit_dictionary[commit_id][5] = files\n self.commit_dictionary[commit_id][6] = inserted\n self.commit_dictionary[commit_id][7] = deleted\n self.commit_dictionary[commit_id][8] = total_lines\n self.author_dictionary[mail][2] += files\n self.author_dictionary[mail][3] += inserted\n self.author_dictionary[mail][4] += deleted\n self.author_dictionary[mail][5] += total_lines\n self.total_lines_inserted += inserted\n self.total_lines_deleted += deleted\n self.total_lines += total_lines\n self.current_commit = None\n except:\n print 'Warning: unexpected line \"%s\"' % line", "def test_end_of_line_single_char_last_line(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last non-blank line\n \n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last non-blank line\n \n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"7.0\", \"7.0\"),\n after_sel=(\"7.1\", \"7.1\"),\n command_name=\"end-of-line\",\n )", "def test_dos_eol():\n import figleaf, figleaf.annotate_html\n \n figleaf.start()\n execfile(os.path.join(thisdir, 'tst_dos_eol.py'))\n figleaf.stop()\n\n coverage = figleaf.get_data().gather_files()\n\n tmpdir = tempfile.mkdtemp('.figleaf')\n\n try:\n figleaf.annotate_html.report_as_html(coverage, tmpdir, [], {})\n finally:\n files = glob.glob('%s/*' % (tmpdir,))\n for f in files:\n os.unlink(f)\n os.rmdir(tmpdir)", "def maybe_eol(self):\n if self.current == CR:\n self.next()\n if self.current == LF:\n self.next()\n elif self.current == LF:\n self.next()", "def convert_line_endings():\n files = []\n for ext in [\n \".py\",\n \".sh\",\n \"Dockerfile\",\n \".txt\",\n \".csv\",\n \".mhd\",\n \".gitignore\",\n ]:\n files.extend(Path(\".\").glob(f\"**/*{ext}\"))\n\n for file in files:\n with open(str(file), \"rb\") as f:\n lines = f.read()\n\n lines = lines.replace(EOL_WIN, EOL_UNIX).replace(EOL_MAC, EOL_UNIX)\n\n with open(str(file), \"wb\") as f:\n f.write(lines)", "def do_EOF(self, line):\n print()\n models.storage.save()\n return True", "def __convertEOL(self):\n aw = self.activeWindow()\n aw.convertEols(aw.eolMode())", "def do_EOF(self, line):\n print(\"\")\n return True", "def fix_line_endings(fname, eol=b'\\n'):\n lines = [chomp(line) for line in open(fname, 'rb').readlines()]\n with open(fname, 'wb') as fp:\n for line in lines:\n fp.write(line + eol)", "def logwrite(self, line):\n sql = b\"update log set log_text=concat(log_text,'\" + self.__timestamp() + line + \"') where log_id=\" + self.logid +\";\\n\"\n self.logme.stdin.write(sql)\n self.logme.stdin.flush()\n return True", "def _check_last_character(line_index, input_line, code_character):\n global _total_lines_of_code\n if input_line.endswith(code_character):\n _code_lines.append(line_index)\n _total_lines_of_code += 1", "def GetEOLChar(self):\n m_id = self.GetEOLMode()\n if m_id == wx.stc.STC_EOL_CR:\n return u'\\r'\n elif m_id == wx.stc.STC_EOL_CRLF:\n return u'\\r\\n'\n else:\n return u'\\n'", "def do_EOF(self, line):\n return True", "def do_EOF(self, line):\n return True", "def do_EOF(self, line):\n return True", "def eol(self):\n return self.pos == len(self.tokens)", "def git_append(msg):\n pipe = Popen('git log -1 --pretty=%B', stdout=PIPE, shell=True)\n old_msg = pipe.stdout.read()\n new_msg = '%s\\n%s' % (old_msg.rstrip(), msg)\n\n pipe = Popen('git commit --amend --file=-', stdin=PIPE, shell=True)\n pipe.communicate(new_msg)", "def do_EOF(self, line):\n print()\n return True", "def expect_eol(self):\n if self.length != 0:\n raise ParseError('Spurius words after parsing instruction')", "def _parse_commit_log(base_commit, tip_commit):\n\n class LogState(object):\n SEPARATOR_LINE = 0\n COMMIT_SHA1_LINE = 1\n MERGE_LINE = 2\n AUTHOR_LINE = 3\n COMMITTER_LINE = 4\n MIDDLE_SEPARATOR_LINE = 5\n TITLE_LINE = 6\n BLANK_LINE = 7\n BODY_LINES = 8\n\n commit_info = {}\n check_churn = True\n check_move = True\n\n git_log_cmd = shlex.split(\n 'git log --format=full --reverse {base_commit}..{tip_commit}'.format(\n base_commit=base_commit, tip_commit=tip_commit))\n git_log_output = subprocess.check_output(git_log_cmd)\n\n log_line_state = LogState.SEPARATOR_LINE\n commit_sha1 = None\n merge = None\n author = None\n committer = None\n title = None\n separator = None\n body = []\n git_log_output_lines = git_log_output.splitlines()\n for idx, line in enumerate(git_log_output_lines, 1):\n # commit line\n if (\n log_line_state == LogState.SEPARATOR_LINE and\n line.startswith('commit ')):\n commit_sha1 = line.split(' ')[1]\n log_line_state = LogState.COMMIT_SHA1_LINE\n continue\n\n # Merge: line\n if (\n log_line_state == LogState.COMMIT_SHA1_LINE and\n line.startswith('Merge: ')):\n merge = line.split(' ', 1)[1]\n log_line_state = LogState.MERGE_LINE\n continue\n\n # Author: line\n if (\n log_line_state in [\n LogState.COMMIT_SHA1_LINE, LogState.MERGE_LINE] and\n line.startswith('Author: ')):\n author = line.split(' ', 1)[1]\n log_line_state = LogState.AUTHOR_LINE\n continue\n\n # Commit: line\n if log_line_state == LogState.AUTHOR_LINE and line.startswith('Commit: '):\n committer = line.split(' ', 1)[1]\n log_line_state = LogState.COMMITTER_LINE\n continue\n\n # empty line after Commit: line\n if log_line_state == LogState.COMMITTER_LINE and line == '':\n log_line_state = LogState.MIDDLE_SEPARATOR_LINE\n continue\n\n # Title line of commit message\n if (\n log_line_state == LogState.MIDDLE_SEPARATOR_LINE and\n line.startswith(' ')):\n title = line.lstrip(' ')\n log_line_state = LogState.TITLE_LINE\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # Blank line between title and body (still contains 4 space prefix)\n if log_line_state == LogState.TITLE_LINE and line.startswith(' '):\n separator = line.lstrip(' ')\n log_line_state = LogState.BLANK_LINE\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # Body lines\n if (\n log_line_state in [LogState.BLANK_LINE, LogState.BODY_LINES] and\n line.startswith(' ')):\n body.append(line.lstrip(' '))\n log_line_state = LogState.BODY_LINES\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # End of commit message\n if (\n log_line_state in [\n LogState.TITLE_LINE, LogState.BLANK_LINE,\n LogState.BODY_LINES] and\n line == ''):\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n\n log_line_state = LogState.SEPARATOR_LINE\n commit_sha1 = None\n merge = None\n author = None\n committer = None\n title = None\n separator = None\n body = []\n\n return commit_info", "def process(self):\n\n form = cgi.FieldStorage()\n commit = self.read_commit(form)\n\n print(\"Content-Type: text/plain; charset='utf-8'\\r\")\n print(\"Cache-Control: max-age=60\\r\")\n if form.getfirst(\"download\", \"false\") == \"true\":\n print(\"Content-Disposition: attachment; filename=\\\"patch.txt\\\"\\r\")\n\n print(\"\\r\")\n\n print((\"#\" + json.dumps(PostsaiCommitViewer.format_commit_header(commit), default=convert_to_builtin_type)))\n sys.stdout.flush()\n PostsaiCommitViewer.dump_commit_diff(commit)", "def FixLineEndingsForWindows(self,str):\n # TODO: this should not really be part of this class\n if str[-2:]==\"\\r\\n\":\n return str\n if str[-1:]==\"\\n\":\n return str[:-1]+\"\\r\\n\"\n else:\n return str + \"\\r\\n\"", "def do_eof(self, line):\n print \"\"\n return True", "def open_editor_to_amend_commit():\n command = f\"git commit --amend\"\n logging.debug(f\"Executing command: {command}\")\n p = subprocess.Popen(command, shell=True)\n p.communicate()", "def test_EOF(self):\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"EOF\")\n out = f.getvalue()\n self.assertTrue(len(out) == 1)\n self.assertEqual(\"\\n\", out)\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"EOF fake\")\n msj = f.getvalue().strip()\n self.assertFalse(len(msj) == 1)\n self.assertEqual(\"\", msj)", "def test_archive_commitlog_point_in_time_ln(self):\n self.run_archive_commitlog(restore_point_in_time=True, archive_command='ln')", "def lf(self):\n self._write('\\n')", "def _clean_commit(self, line):\n cleaned_line = {\n 'repo': line['origin'],\n 'hash': line['data_commit'],\n 'author': line['data_Author'],\n 'category': \"commit\",\n 'created_date': utils.str_to_dt_data(line['data_AuthorDate']),\n 'commit': line['data_Commit'],\n 'commit_date': utils.str_to_dt_data(line['data_CommitDate']),\n 'files_no': len(line['data_files']),\n 'refs': line['data_refs'],\n 'parents': line['data_parents'],\n 'files': line['data_files']\n }\n\n actions = 0\n for file in line['data_files']:\n if 'action' in file:\n actions += 1\n cleaned_line['files_action'] = actions\n\n try:\n non_merge = math.isnan(line['data_Merge'])\n\n except (TypeError, KeyError):\n non_merge = False\n\n cleaned_line['merge'] = not non_merge\n return cleaned_line", "def test_end_of_buffer(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.3\", \"1.3\"),\n after_sel=(\"7.0\", \"7.0\"),\n command_name=\"end-of-buffer\",\n )", "def test_kill_to_end_of_line_after_last_visible_char(self):\n before_b = \"\"\"\\\n line 1\n # The next line contains two trailing blanks.\n line 3 \n line 4\n \"\"\"\n after_b = \"\"\"\\\n line 1\n # The next line contains two trailing blanks.\n line 3line 4\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"3.6\", \"3.6\"),\n after_sel=(\"3.6\", \"3.6\"),\n command_name=\"kill-to-end-of-line\",\n )", "def test_end_of_line_internal_blank_line(self):\n before_b = \"\"\"\\\n first line\n\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.0\", \"2.0\"),\n after_sel=(\"2.0\", \"2.0\"),\n command_name=\"end-of-line\",\n )", "def git_commit(self, msg):\n self.git_repo.git.add(all=True)\n self.git_repo.git.commit(message='[dots] {}'.format(msg))", "def fix_end(self, node):\n if node.header.tokens[0].type == Token.SEPARATOR:\n indent = node.header.tokens[0]\n else:\n indent = Token(Token.SEPARATOR, self.formatting_config.separator)\n node.end = End([indent, Token(Token.END, \"END\"), Token(Token.EOL)])", "def test_end_of_line_blank_last_line(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last non-blank line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last non-blank line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"7.0\", \"7.0\"),\n after_sel=(\"7.0\", \"7.0\"),\n command_name=\"end-of-line\",\n )", "def commit_history(cli):\n result = []\n record = OrderedDict()\n for line in cli.splitlines():\n r = re.search(' ([A-Z][a-z]+(?: ID)?): (.*?) +([A-Z][a-z]+): (.*)', line)\n if not r:\n continue\n record[r.group(1)] = r.group(2)\n record[r.group(3)] = r.group(4)\n if r.group(3) == 'Comment':\n result.append(record)\n record = OrderedDict()\n return result", "def deleteLastChar (self) :\r\n c = self.data_.drop();\r\n # Notice the \\n so we can notice when new lines begin\r\n if (c=='\\n') :\r\n self.lineNumber_ -= 1\r\n # Find last \\n ... if we can\r\n index_of_last_newline = -1\r\n for ii in xrange(0, len(self.data_)) :\r\n if (self.data_.peek(len(self.data_)-ii-1)=='\\n') :\r\n index_of_last_newline = ii\r\n break \r\n \r\n self.charNumber_ = index_of_last_newline\r\n if (index_of_last_newline==-1) : self.charNumber = 80\r\n else :\r\n self.charNumber_-=1;", "def _output_skip_line(self):\n self.buf += '...'\n self._pad_horizontally(3)\n\n if self.num_parents >= 3 and self.commit_index < self.num_columns - 1:\n self._update_state(GraphState.PRE_COMMIT)\n else:\n self._update_state(GraphState.COMMIT)", "def test_diff_git_line_without_a_b_quotes(self):\n diff = (\n b'diff --git \"foo\" \"foo\"\\n'\n b'deleted file mode 100644\\n'\n b'index 612544e4343bf04967eb5ea80257f6c64d6f42c7..'\n b'0000000000000000000000000000000000000000\\n'\n )\n\n parsed_files = self.tool.get_parser(diff).parse()\n self.assertEqual(len(parsed_files), 1)\n\n self.assert_parsed_diff_file(\n parsed_files[0],\n orig_filename=b'foo',\n orig_file_details=b'612544e4343bf04967eb5ea80257f6c64d6f42c7',\n modified_filename=b'foo',\n modified_file_details=b'0000000000000000000000000000000000000000',\n old_unix_mode='100644',\n deleted=True,\n data=diff)", "def postcmd(self, stop, line):\n\n if line and shlex.split(line)[0] == 'commit':\n # for the moment, self.target is the indication of a successful creation\n if self.target:\n return True\n else:\n return False\n\n return AttributeEditor.postcmd(self, stop, line)", "def test_very_verbose_output_not_truncated(self, monkeypatch):\n hooks = setup_hooks(very_verbose=True)\n line_length = 20\n monkeypatch.setattr(\n \"repobee_junit4._output._truncate_lines\",\n partial(_output._truncate_lines, max_len=line_length),\n )\n\n result = hooks.act_on_cloned_repo(FAIL_REPO)\n\n lines = result.msg.split(os.linesep)\n assert len(lines) > 1\n # the first line can be somewhat longer due to staus message\n # and color codes\n assert any([len(line) > line_length for line in lines[1:]])", "def tprint_raw(self, cmd, end='\\n'):\n self.fileHandle.write(cmd + end)", "def multiline_carriage_return(event):\n b = event.cli.current_buffer\n carriage_return(b, event.cli)", "def _update_end_lineno():\n if origin:\n record.origin.line_end = lineno", "def main():\n smart_commit_msg_filename = SMART_COMMIT_MSG_FILENAME\n paths = get_staged_paths()\n if not len(paths):\n raise Exception(\"did you even add anything to staging\")\n paths += [smart_commit_msg_filename]\n mr_edited_file = max(paths, key=lambda k: os.path.getmtime(k))\n if mr_edited_file == smart_commit_msg_filename:\n print(git_commit())\n else:\n print(\"Update the patch notes!\")", "def commit(self, msg):\n self.runtime.logger.info('Commit config: {}'.format(msg))\n with Dir(self.runtime.metadata_dir):\n exectools.cmd_assert([\"git\", \"add\", \".\"])\n exectools.cmd_assert([\"git\", \"commit\", \"--allow-empty\", \"-m\", msg])", "def test_verbose_output_is_truncated(self, monkeypatch):\n hooks = setup_hooks(verbose=True)\n line_length = 20\n monkeypatch.setattr(\n \"repobee_junit4._output._truncate_lines\",\n partial(_output._truncate_lines, max_len=line_length),\n )\n\n result = hooks.act_on_cloned_repo(FAIL_REPO)\n\n lines = result.msg.split(os.linesep)[1:] # skip summary line\n assert len(lines) > 1\n # the first line can be somewhat longer due to staus message\n # and color codes\n assert all([len(line) <= line_length for line in lines[1:]])", "def test_opt_charsetEndOfLine(self):\n line = b\"CHARSET UTF-8\"\n identifier, remainder = self.server.opt_charset(line)\n self.assertEqual(identifier, b\"UTF-8\")\n self.assertEqual(remainder, b\"\")", "def writeline(self, line):\n self.sendall((six.text_type(line) + u'\\r\\n').encode(self.encoding))", "def amend_commit_with_file(tmp_file_name):\n command = f\"git commit --amend --allow-empty -F {tmp_file_name}\"\n logging.debug(f\"Executing command: {command}\")\n p = subprocess.Popen(command, shell=True)\n p.communicate()", "def test_end_of_line(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.0\", \"1.0\"),\n after_sel=(\"1.10\", \"1.10\"),\n command_name=\"end-of-line\",\n )", "def getCommitsSinceLastRelease(self):\n f = open(self.last_released, 'r')\n old_rev = f.read().replace('\\n', '')\n f.close()\n new_rev = commands.getoutput('cd '+self.proj_dir+' && git log -1 --format=%H')\n cmd = 'cd '+self.proj_dir+' && git log --no-merges --pretty=format:\"%s\" '+old_rev+'..'+new_rev\n unreleased_commits = commands.getoutput(cmd) \n print 'Commits since last release:'\n print unreleased_commits\n unreleased_commits = unreleased_commits.split('\\n')\n self.commit_msgs = unreleased_commits\n self.new_rev = new_rev", "def eos(comment):\n comment=re.sub(r'(\\W+/.)',r'\\1 \\n',comment,flags=re.IGNORECASE)\n return comment", "def _get_line(self):\n line = self.file.readline(self.maxline + 1)\n if len(line) > self.maxline:\n print(f\"ERROR: got more than {self.maxline} bytes\")\n if not line:\n print(\"Received EOF\")\n if line[-2:] == CRLF:\n line = line[:-2]\n elif line[-1:] in CRLF:\n line = line[:-1]\n return line + CRLF", "def last_commit_short_log():\n subprocess.check_output('git log -1 --pretty=format:%h:%s'.split()).decode()", "def end_of_line():\n d = get_app().current_buffer.document\n at_end = d.is_cursor_at_the_end_of_line\n last_line = d.is_cursor_at_the_end\n\n return bool(at_end and not last_line)", "def __read_last_lines(self) -> str:\n with open(LOGFILE_OPENINGS, \"r\", encoding=\"utf-8\") as f:\n last_lines = f.readlines()[-10:]\n return \" 🌸 \" + \"\\n🌸 \".join(\n map(lambda l: repr(LogLine.from_line(l)), last_lines)\n )", "def process_IN_CLOSE_WRITE(self, event):\n self.git.post_change(event.pathname, commit_msg=\"dotfile_tracker update: \"+event.pathname)", "def is_eof(line):\n return line == \"\"", "def getEOLComment(self, address: ghidra.program.model.address.Address) -> unicode:\n ...", "def ensure_ending_newline(self, text):\n if text and text[-1] != '\\n':\n return text + '\\n'\n else:\n return text", "def svn_diff_hunk_readline_modified_text(*args):\n return _diff.svn_diff_hunk_readline_modified_text(*args)", "def post_command(self) -> str:\n rtn = ''\n if self.terminator:\n rtn += self.terminator\n\n if self.suffix:\n rtn += ' ' + self.suffix\n\n if self.pipe_to:\n rtn += ' | ' + self.pipe_to\n\n if self.output:\n rtn += ' ' + self.output\n if self.output_to:\n rtn += ' ' + self.output_to\n\n return rtn", "def test_nextLineAtEnd(self):\n s = 'hello world'\n self.widget.buffer = s\n self.widget.setInputHistory(History(['first', 'second', 'last']))\n self.widget.keystrokeReceived('\\x0e', None)\n self.assertEqual(self.widget.buffer, s)\n self.assertEqual(self.widget.cursor, 0)", "def test_get_git_commit(self):\n git_commit = get_git_commit()\n # output format: ['fafdb957049917ede565cebc58b29899f597fb5a', 'Fri Mar 29 11:09:50 2019 -0400']\n self.assertEqual(len(git_commit[0]), 40)\n self.assertEqual(len(git_commit[1].split()), 6)", "def _(event):\n buffer = event.current_buffer\n\n if buffer.document.is_cursor_at_the_end_of_line:\n buffer.cursor_position += buffer.document.get_start_of_line_position(after_whitespace=False)\n else:\n buffer.cursor_position += buffer.document.get_end_of_line_position()", "def check_commit_problems(self, commit, diff):\n\n # Initialise\n self._license_problem = False\n self._commit_problem = False\n self._commit_notes = defaultdict(list)\n\n # Unsafe regex checks...\n unsafe_matches = list()\n unsafe_matches.append( r\"\\b(KRun::runCommand|K3?ShellProcess|setUseShell|setShellCommand)\\b\\s*[\\(\\r\\n]\" )\n unsafe_matches.append( r\"\\b(system|popen|mktemp|mkstemp|tmpnam|gets|syslog|strptime)\\b\\s*[\\(\\r\\n]\" )\n unsafe_matches.append( r\"(scanf)\\b\\s*[\\(\\r\\n]\" )\n valid_filename_regex = r\"\\.(cpp|cc|cxx|C|c\\+\\+|c|l|y||h|H|hh|hxx|hpp|h\\+\\+|qml)$\"\n\n # Retrieve the diff and do the problem checks...\n filename = unicode(\"\")\n filediff = list()\n for line in diff:\n file_change = re.match( \"^diff --(cc |git a\\/.+ b\\/)(.+)$\", line )\n if file_change:\n # Are we changing file? If so, we have the full diff, so do a license check....\n if filename != \"\" and commit.files_changed[ filename ][\"change\"] in ['A'] and re.search(valid_filename_regex, filename):\n self.check_commit_license(filename, ''.join(filediff))\n\n filediff = list()\n filename = file_change.group(2)\n continue\n\n # Diff headers are bogus\n if re.match(\"@@ -\\d+,\\d+ \\+\\d+ @@\", line):\n filediff = list()\n continue\n\n # Do an incremental check for *.desktop syntax errors....\n if re.search(\"\\.desktop$\", filename) and re.search(\"[^=]+=.*[ \\t]$\", line) and line.startswith(\"+\") and not re.match(\"^\\+#\", line):\n self._commit_notes[filename].append( \"[TRAILING SPACE] **\" )\n self._commit_problem = True\n\n # Check for things which are unsafe...\n for safety_match in unsafe_matches:\n match = re.match(safety_match, line)\n if match:\n note = \"[POSSIBLY UNSAFE: {0}] **\".format( match.group(1) )\n self._commit_notes[filename].append(note)\n self._commit_problem = True\n\n # Store the diff....\n filediff.append(line)\n\n if filename != \"\" and commit.files_changed[ filename ][\"change\"] in ['A'] and re.search(valid_filename_regex, filename):\n self.check_commit_license(filename, ''.join(filediff))", "def _padding_line(self):\n if self.state != GraphState.COMMIT:\n self._next_line()\n return\n\n # Output the row containing this commit\n # Iterate up to and including self.num_columns, since the current commit\n # may not be in any of the existing columns. (This happens when the\n # current commit doesn't have any children that we have already\n # processed.)\n for i in range(self.num_columns):\n col = self.columns[i]\n self._write_column(col, '|')\n if col.commit == self.commit and self.num_parents > 2:\n self.buf += ' ' * (self.num_parents - 2) * 2\n else:\n self.buf += ' '\n\n self._pad_horizontally(self.num_columns)\n\n # Update self.prev_state since we have output a padding line\n self.prev_state = GraphState.PADDING", "def test_newlinesBeforeLineBreaking(self):\n # Because MAX_COMMAND_LENGTH includes framing characters, this long\n # line is slightly longer than half the permissible message size.\n longline = \"o\" * (irc.MAX_COMMAND_LENGTH // 2)\n\n self.client.msg(\"foo\", longline + \"\\n\" + longline)\n self.assertEqual(\n self.client.lines, [\"PRIVMSG foo :\" + longline, \"PRIVMSG foo :\" + longline]\n )", "def test_diff_git_line_without_a_b_and_spaces_changed(self):\n diff = (b'diff --git foo bar1 foo bar2\\n'\n b'deleted file mode 100644\\n'\n b'index 612544e4343bf04967eb5ea80257f6c64d6f42c7..'\n b'0000000000000000000000000000000000000000\\n')\n\n with self.assertRaises(DiffParserError) as cm:\n self.tool.get_parser(diff).parse()\n\n self.assertTrue(str(cm.exception).startswith(\n 'Unable to parse the \"diff --git\" line'))", "def test_diff_git_line_without_a_b_and_spaces_quotes(self):\n diff = (\n b'diff --git \"foo bar1\" \"foo bar1\"\\n'\n b'deleted file mode 100644\\n'\n b'index 612544e4343bf04967eb5ea80257f6c64d6f42c7..'\n b'0000000000000000000000000000000000000000\\n'\n )\n\n parsed_files = self.tool.get_parser(diff).parse()\n self.assertEqual(len(parsed_files), 1)\n\n self.assert_parsed_diff_file(\n parsed_files[0],\n orig_filename=b'foo bar1',\n orig_file_details=b'612544e4343bf04967eb5ea80257f6c64d6f42c7',\n modified_filename=b'foo bar1',\n modified_file_details=b'0000000000000000000000000000000000000000',\n old_unix_mode='100644',\n deleted=True,\n data=diff)", "def _newLine(self, usePos = True):", "def end_of_line():\r\n set_point(point().end_of_line())", "def test_write_quotes_unix(self):\n\n # Set up the\n self.config[api.APP_NAME]['line_separator'] = 'unix'\n\n path = tests.test_util.init_quotefile(self.tempdir, \"quotes1.txt\")\n quotes = api.read_quotes(path)\n\n # Call write_quotes to write file\n api.write_quotes(path, quotes)\n\n # Verify unix line separator used when file written\n with open(path, \"rb\") as openfile:\n whole_file = openfile.read().decode(\"utf-8\")\n expected = \"The Linux philosophy is 'Laugh in the face of danger'. Oops. Wrong One. 'Do it yourself'. Yes, that's it. | Linus Torvalds | | U\\n\" + \\\n \"The depressing thing about tennis is that no matter how good I get, I'll never be as good as a wall. | Mitch Hedberg | | U\\n\" + \\\n \"Ask for what you want and be prepared to get it. | Maya Angelou | | U\\n\" + \\\n \"They that can give up essential liberty to obtain a little temporary safety deserve neither liberty nor safety. | Ben Franklin | | U\\n\"\n self.assertEqual(expected, whole_file)", "def test_diff_git_line_without_a_b_and_spaces(self):\n diff = (\n b'diff --git foo bar1 foo bar1\\n'\n b'deleted file mode 100644\\n'\n b'index 612544e4343bf04967eb5ea80257f6c64d6f42c7..'\n b'0000000000000000000000000000000000000000\\n'\n )\n\n parsed_files = self.tool.get_parser(diff).parse()\n self.assertEqual(len(parsed_files), 1)\n\n self.assert_parsed_diff_file(\n parsed_files[0],\n orig_filename=b'foo bar1',\n orig_file_details=b'612544e4343bf04967eb5ea80257f6c64d6f42c7',\n modified_filename=b'foo bar1',\n modified_file_details=b'0000000000000000000000000000000000000000',\n old_unix_mode='100644',\n deleted=True,\n data=diff)", "def end(self, commit: bool) -> None:\n ...", "def test_emptyline(self):\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"\\n\")\n out = \"\"\n self.assertEqual(out, f.getvalue())\n\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\" \\n\")\n out = \"\"\n self.assertEqual(out, f.getvalue())", "def __init__(self):\n super(LineEnd, self).__init__(r\"$\", regex.MULTILINE)", "def writeLog(msg, addEndline=True):\n\n with open(LOG_FILE, \"a\") as f:\n f.write(\"\\n\")\n f.write(msg)\n \n if addEndline == True:\n f.write(\"\\n---------------------------------------------\\n\")", "def commit(self) -> None:\n if self._edits and len(self._edits) > 0:\n self._update(\"\\n;\\n\".join(self._edits))\n self._edits = None", "def newLineEvent(self, line):\n self.newLine_callback(line)", "def parseCommit() -> str:\n cmd_tag = f\"git --no-pager diff --diff-filter=ACMR --name-only HEAD~1 HEAD\"\n print(f\"COMMAND: {cmd_tag}\")\n print(\"\", flush=True)\n fileList = subprocess.check_output(cmd_tag, shell=True)\n return fileList.decode('utf-8').splitlines()", "def commit(self):\n # PEP 249\n pass", "def output_line(self, line, eol=b'\\r\\n'):\n self.queue_output(bytes(line, 'utf-8') + eol)", "def _writeline(self, data):\n self._write(data+chr(13)+chr(10))", "def test_no_body_max_line_length_option_ignored(self, custom_config):\n del custom_config['body']['max_line_length']\n check = CommitMessagesCheck(CheckConfig('whatever', 'error', **custom_config))\n result = check.run(\n {\n 'commits': [\n {\n 'stats': {'total': 2},\n 'message': 'xxxxx\\n\\n{}'.format('A' * 1000),\n 'sha': 'aa',\n 'url': '',\n }\n ]\n }\n )[0]\n assert result.success is True", "def commit(self):\n run('git', 'add', '.')\n run('git', 'commit', '-a', '-m', 'updates')", "def end_of_line_marker(self,event):\n for child in self.app.children:\n child.source.SetViewEOL(event.IsChecked())\n self.set('ViewEol',event.IsChecked())", "def ignore_newline(self, t):\n self.lineno += t.value.count('\\n')", "def _output_pre_commit_line(self):\n assert self.num_parents >= 3, 'not enough parents to add expansion row'\n num_expansion_rows = (self.num_parents - 2) * 2\n\n # self.expansion_row tracks the current expansion row we are on.\n # It should be in the range [0, num_expansion_rows - 1]\n assert (0 <= self.expansion_row < num_expansion_rows), \\\n 'wrong number of expansion rows'\n\n # Output the row\n seen_this = False\n chars_written = 0\n for i in range(self.num_columns):\n col = self.columns[i]\n if col.commit == self.commit:\n seen_this = True\n self._write_column(col, '|')\n self.buf += ' ' * self.expansion_row\n chars_written += 1 + self.expansion_row\n elif seen_this and (self.expansion_row == 0):\n # This is the first line of the pre-commit output. If the\n # previous commit was a merge commit and ended in the\n # GraphState.POST_MERGE state, all branch lines after\n # self.prev_commit_index were printed as \"\\\" on the previous\n # line. Continue to print them as \"\\\" on this line. Otherwise,\n # print the branch lines as \"|\".\n if (self.prev_state == GraphState.POST_MERGE and\n self.prev_commit_index < i):\n self._write_column(col, '\\\\')\n else:\n self._write_column(col, '|')\n chars_written += 1\n elif seen_this and (self.expansion_row > 0):\n self._write_column(col, '\\\\')\n chars_written += 1\n else:\n self._write_column(col, '|')\n chars_written += 1\n self.buf += ' '\n chars_written += 1\n\n self._pad_horizontally(chars_written)\n\n # Increment self.expansion_row, and move to state GraphState.COMMIT if\n # necessary\n self.expansion_row += 1\n if self.expansion_row >= num_expansion_rows:\n self._update_state(GraphState.COMMIT)", "def dump_commit_diff(commit):\n\n for file in commit:\n if file[4] == \"\" or \".\" not in file[4]:\n sys.stdout.flush()\n print((\"Index: \" + file[3] + \" deleted\\r\"))\n sys.stdout.flush()\n else:\n subprocess.call([\n \"cvs\",\n \"-d\",\n file[8],\n \"rdiff\",\n \"-u\",\n \"-r\",\n PostsaiCommitViewer.calculate_previous_cvs_revision(file[4]),\n \"-r\",\n file[4],\n file[3]])", "def on_commit_comment(self, payload):\n pass" ]
[ "0.6151211", "0.6065239", "0.57468516", "0.5741316", "0.5723494", "0.5639385", "0.5574638", "0.5561204", "0.5554823", "0.55486727", "0.553186", "0.5530341", "0.55275774", "0.5481987", "0.54660696", "0.540383", "0.5398025", "0.5388231", "0.53565466", "0.53498983", "0.5348075", "0.53350383", "0.53350383", "0.53350383", "0.5301547", "0.5287191", "0.52785945", "0.5272429", "0.5216899", "0.5199439", "0.51965606", "0.51958996", "0.5182201", "0.51658374", "0.5151651", "0.5131205", "0.5123198", "0.5110141", "0.5095969", "0.5095288", "0.50862616", "0.50832283", "0.5070124", "0.50539064", "0.5050461", "0.5045376", "0.5043486", "0.5032805", "0.50019383", "0.4998493", "0.49959862", "0.4994089", "0.4983356", "0.49682492", "0.49674013", "0.49418634", "0.49400648", "0.49352655", "0.49313456", "0.49303907", "0.49302313", "0.49287176", "0.49250227", "0.49109894", "0.49083823", "0.49077713", "0.49035642", "0.49013", "0.4892948", "0.48900348", "0.48793516", "0.4862601", "0.48596126", "0.4858863", "0.4855316", "0.48519164", "0.48485437", "0.48484778", "0.4840892", "0.48404077", "0.48399553", "0.48392496", "0.48383242", "0.4836133", "0.48309672", "0.48260388", "0.48030323", "0.48012698", "0.47939387", "0.4791329", "0.47811088", "0.47797728", "0.47680917", "0.47644937", "0.47640824", "0.47640216", "0.47631606", "0.47631282", "0.47598407", "0.47589427" ]
0.83223575
0
Takes a user and a group name, and returns `True` if the user is in that group.
def is_in_group(user, group_name): return is_in_group_user_id(user.id, group_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_user_in_group(user, group):\n users = group.get_users()\n if user in users:\n return True\n return False", "def is_in_group(user, group_name):\n return user.groups.filter(name__exact=group_name).exists()", "def is_in_group(user, group_name):\n return Group.objects.get(name=group_name).user_set.filter(id=user.id).exists()", "def is_user_in_group(user, group):\n\n if user == group.get_name():\n return True\n elif user in group.get_users():\n return True\n else:\n for group in group.get_groups():\n return is_user_in_group(user, group)\n\n return False", "def is_user_in_group(user: str, group: Group) -> bool:\n if group is None or user is None or user is \"\":\n return False\n if user in group.get_users():\n return True\n for sub_group in group.get_groups():\n user_exists = is_user_in_group(user, sub_group)\n if user_exists:\n return True\n return False", "def is_user_in_group(user, group):\r\n if type(group) is not Group:\r\n raise ValueError(\"Not a valid group\")\r\n\r\n if type(user) is not str:\r\n raise ValueError(\"Not a valid user\")\r\n\r\n user_name = find_user(user, group)\r\n if user_name == \"\":\r\n return False\r\n\r\n return True", "def _is_in_group(user, group_name):\n try:\n return Group.objects.get(name=group_name).user_set.filter(id=user.id).exists()\n except Group.DoesNotExist:\n return None", "def is_user_in_group(user, group):\n # Check group\n if user in group.users: # O(N)\n return True\n\n # Check subgroups\n for sub_group in group.groups: # O(N)\n if is_user_in_group(user, sub_group):\n return True\n\n return False", "def is_user_in_group(_cls, user, group):\n if user is None or group is None:\n return \"Please enter a valid user and group\"\n\n if user in group.get_users():\n return True\n else:\n for sub_group in group.get_groups():\n if Group.is_user_in_group(user, sub_group):\n return True\n\n return False", "def user_in_group(user, *group_names):\n\treturn bool(user.groups.filter(name__in=group_names)) | user.is_superuser", "def is_user_in_group(user, group):\n sub_user=group.get_users() # Get all the users within the group\n\n if user in sub_user: # If user is within the group, return True\n return True\n\n sub_group=group.get_groups() # Get all the sub groups within the group\n\n if len(sub_group)==0: # Base case if there are no sub groups within group\n return False\n\n for item in sub_group: # Recursively search within sub groups for the user\n return is_user_in_group(user,item)\n return False", "def _user_belongs_to(group_name):\n user_name = _get_user_name()\n groups = _get_user_groups(user_name)\n return group_name in groups", "def has_group(user, group_name):\n return user.groups.filter(name=group_name).exists()", "def is_in_group_user_id(user_id, group_name):\n try:\n return Group.objects.get(name=group_name).user_set.filter(id=user_id).exists()\n except Group.DoesNotExist:\n return None", "def has_group(group, user, request):\n return group_names[group] in groupfinder(user.username, request)", "def alreay_in_group(self,uid,group_id):\n uid = str(uid)\n user_group_list = self.get_group_list_via_uid(uid)\n return True if group_id in user_group_list else False", "def is_in_group(self, group):\n return group in self.get_all_groups()", "def group_authenticated(self, user_token, group):\n if self.authenticated(user_token):\n token = self.token_storage.get(user_token)\n groups = self.get_groups(token.username)\n if group in groups:\n return True\n\n return False", "def is_group(self, group_name):\n\n return group_name in self._group", "def in_group(self, group):\n\n return self.secondary_groups.filter(\n groups_users.c.group_id == group.id).count() > 0", "def userMemebership(self, username, group):\r\n return group in self.getUserGroups(username)", "def is_member_of_group(self, mail, group):\n members = self.get_group_members(group)\n\n if mail in members:\n return True\n return False", "def belongs_to(self, group):\n return self in group.users", "def has_group(self,groupname):\n\n if not self.check_prereqs():\n return False\n\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n query=self.create_query(self.sql_get_groups_query+\" WHERE $groupname_field$='$groupname$'\",{'groupname':groupname,'groupname_field':self.sql_groupname_field})\n self.log.debug(\"sqlflexibleauthstore: has_group: %s\" % (query,))\n\n cursor.execute(query)\n for row in cursor:\n return True\n return False", "def has_permission(user, required_groups):\n user_groups = set([g.name for g in user.groups.all()])\n return user_groups.issuperset(required_groups)", "def IsObjectInGroup(object_id, group_name=None):\n rhobj = rhutil.coercerhinoobject(object_id, True, True)\n count = rhobj.GroupCount\n if count<1: return False\n if not group_name: return True\n index = scriptcontext.doc.Groups.Find(group_name, True)\n if index<0: raise ValueError(\"%s group does not exist\"%group_name)\n group_ids = rhobj.GetGroupList()\n for id in group_ids:\n if id==index: return True\n return False", "def in_projects_admin_group(user):\n if user:\n return user.groups.filter(name='projects_admin').count() != 0", "def test_has_access_is_in_group(self):\n user, usrmgr_mock = self.__get_test_instance(\n \"@foouser\", 1337, group=\"foogroup\")\n usrmgr_mock.return_value.user_is_in_group.return_value = True\n with patch.object(user, \"save\"):\n user.has_access(\"foogroup\")", "def allowed_group_access_use(user, group):\n return (user.has_perm(\"vnswww.group_use_any\")\n or (user.has_perm(\"vnswww.group_use_org\")\n and group.org == user.get_profile().org))", "def has_group(self, resolvable):\n group = self._resolve_group(resolvable)\n\n for g in self.groups.query(name=group.name):\n if g.name == group.name:\n return True\n\n return False", "def check_presence_groups(self, id_user:int) -> bool:\n try:\n value_list = self.cursor.execute(f\"SELECT * FROM {table_users_groups} where id_user={id_user};\").fetchone()\n if value_list:\n return True\n return False\n except Exception as e:\n msg = f\"We faced problems with checking the groups for users. Error: {e}\"\n self.proceed_error(msg)\n return False", "def __is_permission_in_groups(self, name: str) -> bool:\n permission = Permission.objects.get(codename=name)\n\n for group_name in main_app_groups:\n group = Group.objects.get(name=group_name)\n if permission in group.permissions.all():\n return True\n\n return False", "def test_by_user_user_is_in_group(self):\n recipient = self.create_user()\n thread = self.create_thread(recipient=recipient)\n result = Thread.public.by_user(user=recipient)\n self.assertIn(thread, result)", "def get_member_from_group(member, group_name):\n query= \"SELECT * FROM groupmembers WHERE member='{}' AND group_id='{}'\".format(member, group_name)\n cur.execute(query)\n result = cur.fetchall()\n if len(result) > 1:\n return True\n return False", "def group_required(*group_names):\n\n def in_groups(current_user):\n if not settings.ENABLE_PERMISSIONS:\n return True\n if current_user.is_authenticated:\n if current_user.groups.filter(name__in=group_names).exists():\n return True\n return False\n\n return user_passes_test(in_groups)", "def group_required(group_names):\n\ttry:\n\t\tuser = CrequestMiddleware.get_request().user\n\t\tif user.is_authenticated():\n\t\t\ttest = user.groups.filter(name=group_names).exists()\n\texcept (AttributeError):\n\t\ttest = False\n\n\n\treturn user_passes_test(test)", "def check_user_group(required_groups):\n\n if current_user.is_anonymous:\n raise UnauthorisedAccessError\n\n master_group = (PermissionGroups.query \n .filter_by(group_name='Master')\n .first())\n if master_group in current_user.access_groups:\n return True\n\n access = [current_user.has_auth_access(PermissionGroups.query.filter_by(\n group_name=group).first())\n for group in required_groups]\n if not any(access):\n raise UnauthorisedAccessError", "def is_group(group_name):\n\n try:\n r_json = requests.get(\n 'https://api.rozklad.org.ua/v2/groups/{}'.format(group_name)).json()\n message_text = r_json['message']\n if message_text == 'Ok':\n return True\n elif message_text == 'Group not found':\n return False\n else:\n logger.error(message_text)\n except ConnectionError as error_text:\n logger.error(error_text)\n except IndexError as error_text:\n logger.error(error_text)", "def group_required(*group_names):\n def in_groups(u):\n if u.is_authenticated():\n if bool(u.groups.filter(name__in=group_names)) | u.is_superuser:\n return True\n return False\n return user_passes_test(in_groups)", "def is_in_retina_graders_group(user):\n return user.groups.filter(name=settings.RETINA_GRADERS_GROUP_NAME).exists()", "def is_in_retina_group(user):\n return is_in_retina_graders_group(user) or is_in_retina_admins_group(user)", "def is_in_retina_admins_group(user):\n return user.groups.filter(name=settings.RETINA_ADMINS_GROUP_NAME).exists()", "def group_required(*group_names):\n\n def in_groups(u):\n if u.is_authenticated():\n if bool(u.groups.filter(name__in=group_names)) | u.is_superuser:\n return True\n return False\n return user_passes_test(in_groups)", "def check_user_has_owner_clearance(self, userName, userGroup):\n dataBase = self.read_database()\n owners = dataBase['userGroups'][userGroup]['owners']\n return userName in owners", "def group_exists(self):\n return AzureTools().group_exists(names.group_name(self))", "def check_user_group_connection(self, id_group:int, id_user:int) -> bool:\n try:\n value_list = self.cursor.execute(f\"SELECT * FROM {table_users_groups} WHERE id_group={id_group} AND id_user={id_user};\").fetchone()\n if value_list:\n return True\n return False\n except Exception as e:\n msg = f\"We have problem with getting values from the {table_users_groups}. Mistake: {e}\"\n self.proceed_error(msg)\n return False", "def security_group_exists(self, sg_id=None, name=None):\n if sg_id:\n return sg_id in [sg.id for sg in self.get_all_security_groups()]\n elif name:\n return name in [sg.name for sg in self.get_all_security_groups()]", "def verify_user_group_details(connection_obj, uid, group, device=\"server\"):\n output = get_user_group_details(connection_obj,device=device)\n if not output:\n st.log(\"Output not found {}\".format(output))\n return False\n if uid:\n user_data = re.findall(r\"uid=\\d+\\({}\\)\".format(uid), output)\n if not user_data:\n st.log(\"User data not found -- {}\".format(uid))\n return False\n if group:\n group_data = re.findall(r\"gid=\\d+\\({}\\)\".format(group), output)\n if not group_data:\n st.log(\"Group data not found -- {}\".format(group))\n return False\n return True", "def group_required(*group_names):\n\tdef in_groups(u):\n\t\tif u.is_authenticated():\n\t\t\tif bool(u.groups.filter(name__in=group_names)) | u.is_superuser:\n\t\t\t\treturn True\n\t\treturn False\n\treturn user_passes_test(in_groups, login_url='/')", "def check_gadm(user_id):\n cur = g.db.execute('select gadm from user_group where id_user == ?', [user_id])\n for row in cur.fetchall():\n if row[0] == 1:\n return True\n return False", "def has_call_permission_for_local_group(user, local_group, permission):\n\n \"\"\"Check Feature Access and Local Group Permissions\"\"\"\n if hasattr(user, 'localgroupprofile'):\n local_group_profile = user.localgroupprofile\n if has_call_feature_access_for_local_group(local_group):\n return local_group_profile.has_permission_for_local_group(\n local_group,\n permission\n )\n\n \"\"\"Otherwise False\"\"\"\n return False", "def is_create_group(string, nickname):\n if string == f\"{nickname} created the group.\":\n return True\n return False", "def is_admin(user):\n return user.groups.filter(name='Profesores').exists()", "def test_user_in_group_can_access(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n\n utils.test_can_access(self, self.url)", "def has_group_address(self, group_address):\n return self.switch.has_group_address(group_address)", "def get_group(self, obj):\n group = Group.objects.filter(name=\"teachers\")\n users = User.objects.filter(groups__in=group)\n if obj in users:\n return \"teachers\"\n else:\n return \"students\"", "def is_member(user: User) -> bool:\n if not user:\n raise TypeError('user should not be None')\n return user.name.startswith('L')", "def is_participant(self, user) -> bool:\n return (\n user.is_superuser\n or user.groups.filter(pk=self.participants_group.pk).exists()\n )", "def es_utilizado(self):\n group = Group.objects.filter(id=self.id)\n group = group.all()[0] if group.exists() else None\n # group = Group.objects.get(name=self.nombre)\n return group.user_set.all().exists() if group is not None else False", "def check_ldap_group_existence(group_id):\n endpoint = f\"/identities/groups/{group_id}\"\n http_response = call_rest_api(endpoint, \"head\", **config.DEFAULT_REST_KWARGS)\n if http_response.status_code == 200: # 200 = 'OK. Group exists.'\n return True\n return False", "def perform_graph_call(token, user) -> bool:\n _dict = perform_request(app_config.ENDPOINT, token)\n _ids = get_all_group_ids(token)\n for _id in app_config.GROUP_ID:\n if _id in set(_ids):\n return True\n return False", "def check_group_user_existence(self, group_id, user_id):\n resp, _ = self.head('groups/%s/users/%s' % (group_id, user_id))\n self.expected_success(204, resp.status)\n return rest_client.ResponseBody(resp)", "def has_privileges_for_group(self, group_id: int) -> bool:\n from apps.enrollment.courses.models.group import Group\n\n try:\n group = Group.objects.get(pk=group_id)\n return group.teacher == self or group.course.owner == self or self.user.is_staff\n except Group.DoesNotExist:\n logger.error(\n 'Function Employee.has_privileges_for_group(group_id = %d) throws Group.DoesNotExist exception.' %\n group_id)\n return False", "def add_member(self, user):\n user_in = user.get_groups()\n for group in user_in:\n if self.usergroup_node == group.usergroup_node:\n print('user is already a member')\n return False\n membership = Relationship(user.get(), 'in', self.usergroup_node)\n graph.create(membership)\n return self.usergroup_node", "def check_user_has_read_clearance(self, userName, userGroup):\n dataBase = self.read_database()\n owners = dataBase['userGroups'][userGroup]['owners']\n members = dataBase['userGroups'][userGroup]['members']\n return userName in owners or userName in members", "def is_participant(self,user):\n if user.is_superuser:\n return True\n\n if user.groups.filter(name=self.participants_group_name).count() > 0:\n return True\n else:\n return False", "def check_group_exists(self, group_name):\n for grp in self.get_list_groups():\n if grp[\"name\"] == group_name:\n return grp[\"id\"], grp[\"members\"]\n\n return None", "def is_group_admin_group(self):\n groups = self['__store']\n return self.group_id in groups.get_group_admin_group_ids()", "def is_member(self, username):\n usernames = [user.username for user in self.members]\n return True if username in usernames else False", "def can_be_accessed(self, user):\n if self.shared_with_everyone:\n return True\n\n if self.user == user or self.users_allowed.filter(pk=user.pk).exists():\n return True\n\n for group in self.groups_allowed.all():\n if user.groups.filter(pk=group.pk).exists():\n return True\n\n return False", "def test_logged_user_in_group_can_access(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n\n utils.test_can_access(self, self.url)", "def check_policy_groups(zone, org, verbose=False):\n complies = True\n\n if not zone:\n zone = get_local_zone(verbose)\n if not zone:\n # some error\n return False\n\n\n # Check that the groups 'ids-user#localzone' and\n # (if org provided) check that 'ids-<org>#localzone'\n # also exists. An error from underlying function\n # calls will cause non-compliance to be flagged.\n if org:\n u = 'ids-%s#%s' % (org, zone)\n rc = irods_user_exists(u, verbose)\n if rc < 1:\n if rc == 0 and verbose:\n print(' group %s does not exist.' % (u,))\n complies = False\n elif verbose:\n print(' group %s exists according to policy.' % (u,))\n \n\n u = 'ids-user#%s' % (zone,)\n rc = irods_user_exists(u, verbose)\n if rc < 1:\n if rc == 0 and verbose:\n print(' group %s does not exist.' % (u,))\n complies = False\n elif verbose:\n print(' group %s exists according to policy.' % (u,))\n \n return complies", "def user_exist(cls,user_name):\n for user in cls.user_list:\n if user.user_name == user_name:\n return True\n return False", "def has_user(self, username):\n\t\treturn username in self.users", "def can_substitute(userid, group):", "def __contains__(self, user_name):\n tuples = self._execute(\n \"SELECT name FROM users WHERE name == ?\",\n (user_name,)\n )\n return len(tuples) == 1", "def is_user_channel_member(channel_id, u_id):\n for selected_id in database.get_channel_data(channel_id)[\"member_ids\"]:\n if selected_id == u_id:\n return True\n return False", "def contains(self, user_id: int, client_name: str) -> bool:\n return client_name in self.clients[user_id]", "def synchronize_group(self, group, prefix, blacklist):\n\n try:\n group_name = group[1]['cn'][0]\n group_members = group[1]['member']\n except Exception as e:\n self.logger.error(\"Failed to retrieve group name and members: {0}\".format(e))\n return False\n\n self.logger.debug(\n \"Group '{0}' has members: {1}\".format(\n group_name, group_members\n )\n )\n\n role_match = None\n role_match = re.search(\n '^{}(?P<role_name>[a-zA-Z0-9_]+)'.format(prefix), group_name\n )\n\n if role_match:\n role_name = role_match.groups('role_name')[0]\n else:\n self.logger.warning(\n \"Group '{0}' did not match the pattern, skipping...\".format(\n group_name\n )\n )\n return False\n\n if role_name in blacklist:\n self.logger.info(\n \"Skipping group '{0}' which is on the blacklist.\".format(\n group_name\n )\n )\n return False\n\n # First, ensure that the role exists\n try:\n self.psql_cur.execute(\n \"SELECT 1 FROM pg_roles WHERE rolname='{0}'\".format(role_name)\n )\n result = self.psql_cur.fetchone()\n except psycopg2.Error as e:\n self.logger.error(unicode(e.message).encode('utf-8'))\n return False\n\n if not result or result[0] == 0:\n self.logger.warning(\n \"Group {0} does not have a PG role, skipping...\".format(\n group_name\n )\n )\n return False\n\n # Second, extract each member from the list.\n try:\n authorized_users = self.extract_users(group_members)\n except Exception as e:\n self.logger.error(\n \"Failed to extract users from LDAP for {0}: {1}\".format(\n group_name, e\n )\n )\n return False\n\n # Third, add authorized users to the role\n try:\n self.add_authorized_users(role_name, authorized_users)\n except Exception as e:\n self.logger.error(\n \"Failed to add users to the PG role for group {0}: {1}\".format(\n group_name, e\n )\n )\n return False\n\n # Lastly, remove all users that are not on the list\n try:\n self.purge_unauthorized_users(role_name, authorized_users)\n except Exception as e:\n self.logger.error(\n \"Failed to remove unauthorized users from group {0}: {1}\".format(\n group_name, e\n )\n )\n return False\n\n return True", "def test_user_is_group_member(self):\n self.user.add_to_group(self.thread.group.pk)\n self.assertEqual(\n Thread.public.get_by_user(\n thread_id=self.thread.pk, user=self.user),\n self.thread\n )", "def user_is_student(userobj):\n from .assignment_group import AssignmentGroup\n return AssignmentGroup.published_where_is_candidate(userobj).exists()", "def group_exists(name):\n with fabric.api.settings(fabric.api.hide('warnings', 'stderr', 'stdout', 'running'), warn_only=True):\n group_data = fabric.api.run(\n \"cat /etc/group | egrep '^%s:' ; true\" %\n (name))\n\n if group_data:\n name, _, gid, members = group_data.split(\":\", 4)\n return dict(name=name, gid=gid, members=tuple(m.strip()\n for m in members.split(\",\")))\n else:\n return None", "def is_membership(self, gid, membership):\n if membership not in [ 'member', 'manager', 'owner']:\n raise Exception(\"Membership request is unexpect as: {m}. Only member, owner or manager inquery allowed.\".format(m=membership))\n url = \"{b}/group/is-{m}/{gid}\".format(b=self.base_url, m=membership, gid=gid)\n r = self.get(url)\n print r", "def load_user_groups(user):\n if not user.is_authenticated:\n return False\n \n user.is_faculty = len(user.groups.filter(name='faculty')) > 0\n user.is_student = not user.is_faculty\n\n return True", "def is_group(id):\n return id.startswith('G')", "def is_group(g, node):\n if node not in g.nodes():\n print('Not a node in the graph')\n return False\n elif g.node[node]['type'] == 'group':\n return True\n else:\n return False", "def is_evaluador(user):\n return user.groups.filter(name='Evaluadores').exists()", "def has_permission(self, request, view):\n usuario = request.user\n grupo = usuario.grupo\n return grupo.name in [\"SuperUsuario\", \"Administrador\"]", "def user_exists(cls, name):\n\n for user in cls.user_list:\n if user.user_name == name:\n return True\n\n return False", "def check_insert_group_user(self, id_user:int, id_group:int) -> bool:\n try:\n value_list = self.cursor.execute(f\"SELECT * FROM {table_user_group_connect} WHERE id_user={id_user} AND id_group={id_group};\").fetchone()\n if value_list:\n return True\n return False\n except Exception as e:\n msg = f\"We faced problems with the check previous insertion on th. Mistake: {e} \"\n self.proceed_error(msg)\n return False", "def IsCorpUser(cnxn, services, user_id):\n user_group_ids = services.usergroup.LookupMemberships(cnxn, user_id)\n corp_mode_groups_dict = services.user.LookupUserIDs(\n cnxn, settings.corp_mode_user_groups, autocreate=True)\n corp_mode_group_ids = set(corp_mode_groups_dict.values())\n corp_mode = any(gid in corp_mode_group_ids for gid in user_group_ids)\n return corp_mode", "def has_permission(self, request, view):\n authenticated = super(IsRpcRacker, self).has_permission(request, view)\n user_groups = getattr(request.user, 'roles', set())\n if not isinstance(user_groups, set):\n user_groups = set(user_groups)\n return authenticated and bool(self.rpc_groups & user_groups)", "def isOp(self, user, channel=None):\n if channel is not None:\n return user in self.opsets[channel]\n\n for ch in self.opsets:\n if user in self.opsets[ch]:\n return True\n return False", "def group_exists(self, path_to_group, groupname):\n self.open_db()\n try:\n group = self.h5file.get_node(path_to_group,\n name=groupname)\n except tb.NoSuchNodeError:\n group = False\n return group", "def has_group(self):\n # first-party\n from tcex.api.tc.v3.groups.group_filter import GroupFilter\n\n groups = GroupFilter(Tql())\n self._tql.add_filter('hasGroup', TqlOperator.EQ, groups, TqlType.SUB_QUERY)\n return groups", "def has_group_address(self, group_address):\n return (\n # self.datetime.has_group_address(group_address)\n self.date.has_group_address(group_address)\n or self.time.has_group_address(group_address) # noqa W503\n )", "def is_valid_group(self, destination):\n # TODO: for now we just check if this is not an email\n if '@' in destination: # is this an email ?\n return False\n else:\n return True", "def test_is_member_ok(self):\n self.add_group('testgroup', ['user:[email protected]'])\n\n # baphomet is not a member\n request = endpoints_api.MembershipRequest.combined_message_class(\n group='testgroup',\n identity='user:[email protected]')\n response = self.call_api('membership', msg_dict(request), 200)\n self.assertEqual({u'is_member': False}, response.json)\n\n # mithras is a member\n request = endpoints_api.MembershipRequest.combined_message_class(\n group='testgroup',\n identity='user:[email protected]')\n response = self.call_api('membership', msg_dict(request), 200)\n self.assertEqual({u'is_member': True}, response.json)", "def get_in_users(self, obj):\n try:\n user = self.context.get('request').user\n except Exception:\n # raise serializers.ValidationError('Could not access request.user')\n return False\n if user in obj.users.all():\n return True\n else:\n return False", "def user_is_admin(user):\n return user in admins" ]
[ "0.90404195", "0.89909446", "0.8921993", "0.88956964", "0.8691311", "0.8651488", "0.85226196", "0.8471016", "0.8424121", "0.83981097", "0.83976525", "0.8325658", "0.8266118", "0.8199991", "0.81931895", "0.7539715", "0.7505139", "0.7332905", "0.726839", "0.7217727", "0.7149777", "0.70672965", "0.7025459", "0.6690672", "0.66790277", "0.66556174", "0.65745276", "0.64787424", "0.64599943", "0.6438533", "0.64373606", "0.6390754", "0.63735694", "0.6358276", "0.63396734", "0.62987417", "0.62770003", "0.62487996", "0.6217103", "0.6173215", "0.616613", "0.61519307", "0.6139528", "0.6127016", "0.610568", "0.60711217", "0.60367084", "0.6019135", "0.60033154", "0.59905773", "0.59526557", "0.5913453", "0.5900932", "0.58843905", "0.58823127", "0.58717406", "0.58624905", "0.5852626", "0.5846344", "0.5839214", "0.5836654", "0.5831581", "0.58104044", "0.58035696", "0.57856023", "0.57706773", "0.57550436", "0.5737449", "0.5719927", "0.57027304", "0.56996137", "0.56918013", "0.5683228", "0.566756", "0.56555116", "0.56537306", "0.5652808", "0.5640099", "0.56352943", "0.5608059", "0.5601321", "0.5600844", "0.5583052", "0.5578873", "0.5555122", "0.55414474", "0.55336124", "0.55211115", "0.5516436", "0.549699", "0.548945", "0.5474519", "0.54682827", "0.5466391", "0.54632413", "0.5454919", "0.54539335", "0.544934", "0.54428005", "0.5438819" ]
0.90948594
0
Score vector of model. Default implementation sums score_obs. The gradient of loglike with respect to each parameter.
def score(self, params, *args, **kwargs): try: # If an analytic score_obs is available, try this first before # falling back to numerical differentiation below return self.score_obs(params, *args, **kwargs).sum(0) except NotImplementedError: # Fallback in case a `loglike` is implemented but `loglikeobs` # is not. approx_func = (approx_fprime_cs if self._use_approx_cs else approx_fprime) return approx_func(params, self.loglike, args=args, kwargs=kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def score(self, s):\n fv = s.feature_vector\n product = fv.dot(self.params.T)[0, 0]\n return s.score(lmwt=self.lmwt) + product", "def svm_loss(scores, y):\r\n\r\n N = scores.shape[0]\r\n\r\n # Compute svm data loss\r\n correct_class_scores = scores[range(N), y]\r\n margins = np.maximum(0.0, scores - correct_class_scores[:, None] + 1.0)\r\n margins[range(N), y] = 0.0\r\n loss = np.sum(margins) / N\r\n\r\n # Compute gradient off loss function w.r.t. scores\r\n num_pos = np.sum(margins > 0, axis=1)\r\n dscores = np.zeros(scores.shape)\r\n dscores[margins > 0] = 1\r\n dscores[range(N), y] -= num_pos\r\n dscores /= N\r\n\r\n return loss, dscores", "def score_full(self, params):\n\n if type(params) is not MixedLMParams:\n params = MixedLMParams.from_packed(params, self.k_fe,\n self.use_sqrt)\n\n fe_params = params.get_fe_params()\n cov_re = params.get_cov_re()\n try:\n cov_re_inv = np.linalg.inv(cov_re)\n except np.linalg.LinAlgError:\n cov_re_inv = None\n\n score_fe = np.zeros(self.k_fe, dtype=np.float64)\n score_re = np.zeros(self.k_re2, dtype=np.float64)\n\n # Handle the covariance penalty.\n if self.cov_pen is not None:\n score_re -= self.cov_pen.grad(cov_re, cov_re_inv)\n\n # Handle the fixed effects penalty.\n if self.fe_pen is not None:\n score_fe -= self.fe_pen.grad(fe_params)\n\n # resid' V^{-1} resid, summed over the groups (a scalar)\n rvir = 0.\n\n # exog' V^{-1} resid, summed over the groups (a k_fe\n # dimensional vector)\n xtvir = 0.\n\n # exog' V^{_1} exog, summed over the groups (a k_fe x k_fe\n # matrix)\n xtvix = 0.\n\n # V^{-1} exog' dV/dQ_jj exog V^{-1}, where Q_jj is the jj^th\n # covariance parameter.\n xtax = [0.,] * self.k_re2\n\n # Temporary related to the gradient of log |V|\n dlv = np.zeros(self.k_re2, dtype=np.float64)\n\n # resid' V^{-1} dV/dQ_jj V^{-1} resid (a scalar)\n rvavr = np.zeros(self.k_re2, dtype=np.float64)\n\n for k in range(self.n_groups):\n\n exog = self.exog_li[k]\n ex_r = self.exog_re_li[k]\n ex2_r = self.exog_re2_li[k]\n\n # The residuals\n expval = np.dot(exog, fe_params)\n resid = self.endog_li[k] - expval\n\n if self.reml:\n viexog = _smw_solve(1., ex_r, ex2_r, cov_re,\n cov_re_inv, exog)\n xtvix += np.dot(exog.T, viexog)\n\n # Contributions to the covariance parameter gradient\n jj = 0\n vex = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n ex_r)\n vir = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n resid)\n for jj,mat in self._gen_dV_dPsi(ex_r):\n dlv[jj] = np.trace(_smw_solve(1., ex_r, ex2_r, cov_re,\n cov_re_inv, mat))\n rvavr[jj] += np.dot(vir, np.dot(mat, vir))\n if self.reml:\n xtax[jj] += np.dot(viexog.T, np.dot(mat, viexog))\n\n # Contribution of log|V| to the covariance parameter\n # gradient.\n score_re -= 0.5 * dlv\n\n # Needed for the fixed effects params gradient\n rvir += np.dot(resid, vir)\n xtvir += np.dot(exog.T, vir)\n\n fac = self.n_totobs\n if self.reml:\n fac -= self.exog.shape[1]\n\n score_fe += fac * xtvir / rvir\n score_re += 0.5 * fac * rvavr / rvir\n\n if self.reml:\n for j in range(self.k_re2):\n score_re[j] += 0.5 * np.trace(np.linalg.solve(\n xtvix, xtax[j]))\n\n score_vec = np.concatenate((score_fe, score_re))\n\n if self._freepat is not None:\n return self._freepat.get_packed() * score_vec\n else:\n return score_vec", "def loss(self, y_true, score, pos_label=_NoValue):\n if pos_label is not _NoValue:\n raise ValueError(\"`pos_label` not supported\")\n\n score = score.atleast_2d() # Working with 2-D arrays only\n\n p = CSoftmax().softmax(score) # SoftMax function\n\n # find-like indexing (list of lists)\n return -CArray(p[[list(range(score.shape[0])), y_true.tolist()]]).log()", "def score_obs(self, params, *args, **kwargs):\n if self._use_approx_cs:\n return approx_fprime_cs(params, self.loglikeobs,\n args=args, kwargs=kwargs)\n else:\n return approx_fprime(params, self.loglikeobs,\n args=args, kwargs=kwargs)", "def loss_fun(model: GPModel, params: dict) -> float:\n py = model.module.call(params, train_ds['index_points'])\n return -py.log_prob(train_ds['y'])", "def score_model(self, length):\n train_score = self.dtr.score(self.X_train, self.y_train)\n test_score = self.dtr.score(self.X_test, self.y_test)\n self.scores.append([length, train_score, test_score])", "def svm_loss_vectorized(W, X, y, reg):\n num_classes = W.shape[1]\n num_train = X.shape[0]\n #loss = 0.0 \n loss = 0.0\n scores = np.zeros((1,num_classes))\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the structured SVM loss, storing the #\n # result in loss. #\n #############################################################################\n \n # lines begin with double \"#\" are the last version of code!!!!!\n \n ##for i in xrange(num_train):\n #XX = np.tile(X[i,:],(num_classes,1)) # try to use broadcasting\n #scores = np.sum(np.multiply(XX,W.T), axis = 1)\n ## scores = np.sum(np.multiply(X[i,:],W.T), axis = 1)\n \n ## if i ==1: print scores\n \n #loss += np.sum(scores - scores[y[i]]) + num_classes -1\n #http://stackoverflow.com/questions/2900084/counting-positive-elements-in-a-list-with-python-list-comprehensions\n ## scores+=1\n ## scores[y[i]]-=1 \n #however, this is sum over index, not values, glaube ich \n #loss+= sum(x < 0 for x in (scores-scores[y[i]]))\n ## loss+= (scores-scores[y[i]])[scores-scores[y[i]]>0].sum()\n #pass\n ############################################\n # construct a zero loop version\n ############################################\n scores2D = np.zeros((num_train, num_classes)) #used to store dotted scores\n scores1D = np.zeros((num_train,1)) #used to store corrected scores\n #index1D = np.zeros((1,num_classes))\n #index1D = range(num_classes) \n #scores1D = y[index1D]\n \n scores2D = np.dot(X,W) \n ##for i in xrange(num_train):\n ## scores1D[i,0]=scores2D[i,y[i]]-1 #find the correct scores and fill them into scores1D, the value -1 is because: si-sj+1\n ## scores2D[i,y[i]]-=1 # we want at corrected score voxel, the value should be 0, correct score -1 - \n #(correct score -1) = 0\n #####################################\n #for loop replacement###\n indexInsert = np.arange(num_train)\n scores1D[indexInsert,0] = scores2D[indexInsert,y[indexInsert]] -1 #using array indexing\n scores2D[indexInsert,y[indexInsert]] -=1\n \n ##################################### \n \n #scores2D = X.dot(W)\n #http://stackoverflow.com/questions/9497290/how-would-i-sum-a-multi-dimensional-array-in-the-most-succinct-python\n #rewrite summation\n #loss += (scores2D-scores1D)[scores2D-scores1D >0].sum()\n #temp = scores2D-np.tile (scores1D, (1,num_classes)) # for each score minus the corrected score\n temp = scores2D-scores1D #broadcasting!!\n #print temp[1,:]\n temp= temp.clip(min=0) \n #loss += sum(map(sum, (temp)[temp>0]))\n #loss += sum(map(sum, (temp)))\n #loss += (temp)[temp >0].sum()\n loss += sum(sum(x) for x in temp) #sum them up\n #loss -= num_train # minus 1 is because in each train, due to the plus 1 above , correct score - correct \n # score +1 = 1, but it should be 0, therefore, i deduce them at the last minute \n # ( then I made this also in the for loop to meet intuitive)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n loss /= num_train\n loss += 0.5 * reg * np.sum(W * W)\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the gradient for the structured SVM #\n # loss, storing the result in dW. #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n #tempBool = np.divide(temp, temp)\n #tempBool = tempBool.clip(max=1,min=0)\n #http://stackoverflow.com/questions/19666626/replace-all-elements-of-python-numpy-array-that-are-greater-than-some-value\n tempBool = np.copy(temp) # temp = scores2D-scores1D , temp= temp.clip(min=0)\n # temp is already the every score minus the correct labeled score\n tempBool[tempBool>0] = 1 # for every element, when it is positive, set it to one (for weighting)\n for j in xrange(num_train):\n tempBool[j,y[j]] =-1*sum(tempBool[j,:]) # calculate how many final scores, max(~~,0) are more than 0, add the number to the correct\n # label element, because it is the times that the corrected scores be used\n dW += np.reshape (X[j,:],(X.shape[1],1))*tempBool[j,:] # broadcasting, out-product\n #pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n dW/= num_train\n dW += reg*W\n \n return loss, dW", "def loss(W_vect, X, T):\n # log_prior = - 0.5 * L2_reg * jnp.dot(W_vect, W_vect)\n return jnp.mean((predictions(W_vect, X) - T)**2) + 0.5*jnp.log(2*jnp.pi)", "def score(self, X):\n nolist = False\n if not isinstance(X, list):\n X = [X]\n nolist = True\n\n scores = []\n for i in X:\n Xi = X[i]\n Xhati = self.predict(Xi)\n\n scores.append(1.0 - np.sum((Xi - Xhati)**2.0) / np.sum(Xi**2.0))\n\n if nolist:\n return scores[0]\n else:\n return scores", "def prob_calibration_function(truthvec, scorevec, reg_param_vec='default', knots='sample',\n method='logistic', force_prob=True, eps=1e-15, max_knots=200,\n transform_fn='none', random_state=942, verbose=False, cv_folds=5,\n unity_prior_weight=1, unity_prior_gridsize=20):\n from sklearn import linear_model\n from sklearn.metrics import log_loss, make_scorer\n\n if (unity_prior_weight>0):\n scorevec_coda, truthvec_coda = create_yeqx_bias_vectors(unity_prior_gridsize)\n coda_wt = unity_prior_weight/unity_prior_gridsize\n weightvec = np.concatenate((np.ones(len(scorevec)), coda_wt * np.ones(len(scorevec_coda))))\n scorevec = np.concatenate((scorevec, scorevec_coda))\n truthvec = np.concatenate((truthvec, truthvec_coda))\n\n if transform_fn != 'none':\n scorevec = transform_fn(scorevec)\n\n knot_vec = np.unique(scorevec)\n if (knots == 'sample'):\n num_unique = len(knot_vec)\n if (num_unique > max_knots):\n smallest_knot, biggest_knot = knot_vec[0], knot_vec[-1]\n inter_knot_vec = knot_vec[1:-1]\n random.seed(random_state)\n random.shuffle(inter_knot_vec)\n reduced_knot_vec = inter_knot_vec[:(max_knots-2)]\n reduced_knot_vec = np.concatenate((reduced_knot_vec, [smallest_knot, biggest_knot]))\n reduced_knot_vec = np.concatenate((reduced_knot_vec, np.linspace(0, 1, 21)))\n if (unity_prior_weight>0):\n reduced_knot_vec = np.concatenate((reduced_knot_vec, scorevec_coda))\n knot_vec = np.unique(reduced_knot_vec)\n if verbose:\n print(\"Originally there were {} knots. Reducing to {} while preserving first and last knot.\".format(num_unique, len(knot_vec)))\n X_mat = _natural_cubic_spline_basis_expansion(scorevec, knot_vec)\n\n if (method == 'logistic'):\n if ((type(reg_param_vec) == str) and (reg_param_vec == 'default')):\n reg_param_vec = 10**np.linspace(-7, 5, 61)\n if verbose:\n print(\"Trying {} values of C between {} and {}\".format(len(reg_param_vec), np.min(reg_param_vec), np.max(reg_param_vec)))\n reg = linear_model.LogisticRegressionCV(Cs=reg_param_vec, cv=StratifiedKFold(cv_folds, shuffle=True),\n scoring=make_scorer(log_loss, needs_proba=True, greater_is_better=False))\n if (unity_prior_weight>0):\n reg.fit(X_mat, truthvec, weightvec)\n else:\n reg.fit(X_mat, truthvec)\n if verbose:\n print(\"Best value found C = {}\".format(reg.C_))\n\n if (method == 'ridge'):\n if ((type(reg_param_vec) == str) and (reg_param_vec == 'default')):\n reg_param_vec = 10**np.linspace(-7, 7, 71)\n if verbose:\n print(\"Trying {} values of alpha between {} and {}\".format(len(reg_param_vec), np.min(reg_param_vec),np.max(reg_param_vec)))\n reg = linear_model.RidgeCV(alphas=reg_param_vec, cv=KFold(cv_folds, shuffle=True), scoring=make_scorer(mean_squared_error_trunc,needs_proba=False, greater_is_better=False))\n reg.fit(X_mat, truthvec)\n if verbose:\n print(\"Best value found alpha = {}\".format(reg.alpha_))\n\n def calibrate_scores(new_scores):\n new_scores = np.maximum(new_scores,knot_vec[0]*np.ones(len(new_scores)))\n new_scores = np.minimum(new_scores,knot_vec[-1]*np.ones(len(new_scores)))\n if transform_fn != 'none':\n new_scores = transform_fn(new_scores)\n basis_exp = _natural_cubic_spline_basis_expansion(new_scores,knot_vec)\n if (method == 'logistic'):\n outvec = reg.predict_proba(basis_exp)[:,1]\n if (method == 'ridge'):\n outvec = reg.predict(basis_exp)\n if force_prob:\n outvec = np.where(outvec < eps, eps, outvec)\n outvec = np.where(outvec > 1-eps, 1-eps, outvec)\n return outvec\n\n return calibrate_scores", "def calc_score(model, scorer, X, y_true):\n\n y_preds = model.predict(X)\n score = scorer(y_true, y_preds)\n\n return score", "def eval_score( # type: ignore\n self, model_in: torch.Tensor, target: Optional[torch.Tensor] = None, idx=None, next_obs=None\n ) -> torch.Tensor:\n # target = target.repeat((self.num_members, 1, 1))\n loss = self._vaml_loss(model_in, target, idx, next_obs=next_obs, eval=True)\n if self.add_mse:\n loss += self._mse_loss(model_in, target).mean(-1, keepdim=True)\n return loss.detach()", "def scoring_function(self, model, y_true, y_predicted_probability):", "def score(self,x,**kwargs):\r\n if self.kfun != 'matrix' and len(self.sv): \r\n k = self.kfun(x,self.sv,**self.cparam)\r\n #print \"Kernel after test: \", k\r\n else:\r\n k = x\r\n \r\n \r\n self.W=self.alphas \r\n self.mat=self.kfun(np.array([self.sv[1]]), self.sv,**self.cparam) \r\n self.bias=self.svLabels[1]- np.dot((self.alphas*self.svLabels).T,self.mat.T) \r\n z=np.dot((self.alphas*self.svLabels).T,k.T)+self.bias\r\n \r\n #print \"bias: \", self.bias, \"\\nZ: \",z\r\n \r\n \r\n return z", "def get_score(self, solution: np.array) -> float:\n pass", "def update(self, returns, log_probs):\n policy_gradient = []\n for log_prob, Gt in zip(log_probs, returns):\n policy_gradient.append(-log_prob * Gt)\n\n loss = torch.stack(policy_gradient).sum()\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()", "def lm(self, lm_para=LmPara()):\r\n if self.doc_len == 0:\r\n return np.log(MIN_LM_SCORE)\r\n v_tf = np.maximum(self.v_tf, lm_para.min_tf)\r\n v_tf /= self.doc_len\r\n v_tf = np.maximum(v_tf, MIN_LM_SCORE)\r\n score = np.log(v_tf).dot(self.v_q_tf)\r\n\r\n return score", "def log_loss(self):\n probabilities = self.probability_array().copy()\n # need to flip the probabilities for p < 0.5 with this binary case.\n # 1 - old_val is same as oldval*-1 + 1. Do in 2 steps:\n probabilities[np.equal(0, self.y)] *= -1\n probabilities[np.equal(0, self.y)] += 1\n # when multiclass: np.amax(probabilities, 1)\n return np.log(probabilities).sum()", "def fast_loss_and_grad(self, X, y):\n loss = 0.0\n grad = np.zeros(self.W.shape) # initialize the gradient as zero\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the softmax loss and gradient WITHOUT any for loops.\n # ================================================================ #\n \n num_train = X.shape[0]\n num_classes = self.W.shape[0]\n \n# # vectorized loss calculation #\n class_scores_matrix = np.dot(self.W,X.T) # calculating class scores matrix (C x m): rows are class scores transposes\n class_scores_matrix -= np.max(class_scores_matrix) # considering the possible issue for numerical instability and account for it\n exp_a = np.exp(class_scores_matrix) # calculating the exponents\n \n# y_exp = np.array(exp_a[y, np.arange(0, class_scores_matrix.shape[1])])\n# #print(exp_a[:,:3])\n# #print(y[:3])\n# #print(y_exp[:3])\n \n# tt = np.sum(exp_a,axis=0)\n# tt2 = np.divide(tt,y_exp)\n# print(num_train)\n# tt3 = np.power(tt2,1/num_train)\n# loss = np.log(np.prod(tt3))\n \n \n \n \n (C, D) = self.W.shape\n N = X.shape[0]\n\n scores = np.dot(self.W, X.T)\n scores -= np.max(scores) # shift by log C to avoid numerical instability\n\n y_mat = np.zeros(shape = (C, N))\n y_mat[y, range(N)] = 1\n\n # matrix of all zeros except for a single wx + log C value in each column that corresponds to the\n # quantity we need to subtract from each row of scores\n correct_wx = np.multiply(y_mat, scores)\n\n # create a single row of the correct wx_y + log C values for each data point\n sums_wy = np.sum(correct_wx, axis=0) # sum over each column\n\n exp_scores = np.exp(scores)\n sums_exp = np.sum(exp_scores, axis=0) # sum over each column\n result = np.log(sums_exp)\n\n result -= sums_wy\n\n loss = np.sum(result)\n loss /= num_train\n \n \n # vectorized gradient calculation #\n exp_a_sum = np.sum(exp_a,axis=0)\n\n y_mat_corres = np.zeros(shape = (num_classes, num_train))\n y_mat_corres[y, range(num_train)] = 1\n sum_exp_scores = np.sum(exp_a, axis=0) \n sum_exp_scores = 1.0 / exp_a_sum # division by sum over columns\n exp_a *= sum_exp_scores\n grad = np.dot(exp_a, X)\n grad -= np.dot(y_mat_corres, X)\n grad /= num_train\n \n\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss, grad", "def score(self):\n self.set_idx()\n if self.idx:\n diffs = self.diffs()\n weights = self.weights\n return np.sum(weights * diffs) / np.sum(weights)\n else:\n return 0.0", "def calc_loss(X, Y, model):\n Z = predict(X, model)\n return -(Y * np.log(Z)).sum() / len(Y)", "def score(self, x, y=None):\n _, logp = self.score_samples(x)\n return logp", "def predict_score(self, X):\r\n if self.score:\r\n preds = self.model.predictValue(X)\r\n return preds", "def objective(self,w):\n l = 0\n for i in range(len(self.x)):\n # Each example contributes log(sigma(y_i * x_i . w))\n l -= log(sigmoid(self.y[i] * np.dot(w, self.x[i,:])))\n # regularisation 1/2 * alpha * ||w||^2\n l += 0.5 * self.alpha * np.dot(w,w)\n return l", "def score(self, X, y):\n\n u = ((y - self.predict(X)) ** 2).sum()\n v = ((y - np.mean(y)) ** 2).sum()\n score = 1 - u / v\n\n return score", "def score(self, indices):\n self.model.eval()\n _, prediction = self.model(self.propagation_matrix, self.features).max(dim=1)\n correct = prediction[indices].eq(self.target[indices]).sum().item()\n acc = correct / indices.shape[0]\n return acc", "def total_score(self, logits):\n previous = torch.full((1, self.tag_size), -10000., device=device)\n previous[0][self.tag_map[self.start_tag]] = 0.\n\n for index in range(len(logits)):\n previous = previous.expand(self.tag_size, self.tag_size).t()\n emit = logits[index].view(1, -1).expand(self.tag_size, self.tag_size)\n scores = previous + emit + self.transitions\n previous = log_sum_exp(scores)\n\n # previous = previous + self.transitions[:, self.tag_map[self.stop_tag]]\n # previous += self.transitions[self.tag_map[self.stop_tag]]\n previous += self.transitions[self.tag_map[:, self.stop_tag]]\n total_scores = log_sum_exp(previous.t())[0]\n return total_scores", "def log_likelihood_grad_rew(self, data, reward_model, bias_params):", "def loss_gradient(self, targets, scores):\n m = targets * scores\n numer = 4. * (2. * numpy.arctan(m) - 1.)\n denom = 1. + m**2\n return numer/denom", "def score(self, X: np.ndarray) -> np.ndarray:\n # Matrix where predictions[i, j] is the prediction (1 or -1) for data point i\n # by learner j.\n predictions = np.zeros((len(X), self.num_learners))\n for i, learner in enumerate(self.learners):\n predictions[:, i] = learner.predict(X)\n return predictions @ self.learner_weights", "def loglike(self, params):\n\n if type(params) is not MixedLMParams:\n params = MixedLMParams.from_packed(params, self.k_fe,\n self.use_sqrt)\n\n fe_params = params.get_fe_params()\n cov_re = params.get_cov_re()\n try:\n cov_re_inv = np.linalg.inv(cov_re)\n except np.linalg.LinAlgError:\n cov_re_inv = None\n _, cov_re_logdet = np.linalg.slogdet(cov_re)\n\n # The residuals\n expval = np.dot(self.exog, fe_params)\n resid_all = self.endog - expval\n\n likeval = 0.\n\n # Handle the covariance penalty\n if self.cov_pen is not None:\n likeval -= self.cov_pen.func(cov_re, cov_re_inv)\n\n # Handle the fixed effects penalty\n if self.fe_pen is not None:\n likeval -= self.fe_pen.func(fe_params)\n\n xvx, qf = 0., 0.\n for k, lab in enumerate(self.group_labels):\n\n exog = self.exog_li[k]\n ex_r = self.exog_re_li[k]\n ex2_r = self.exog_re2_li[k]\n resid = resid_all[self.row_indices[lab]]\n\n # Part 1 of the log likelihood (for both ML and REML)\n ld = _smw_logdet(1., ex_r, ex2_r, cov_re, cov_re_inv,\n cov_re_logdet)\n likeval -= ld / 2.\n\n # Part 2 of the log likelihood (for both ML and REML)\n u = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv, resid)\n qf += np.dot(resid, u)\n\n # Adjustment for REML\n if self.reml:\n mat = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n exog)\n xvx += np.dot(exog.T, mat)\n\n if self.reml:\n likeval -= (self.n_totobs - self.k_fe) * np.log(qf) / 2.\n _,ld = np.linalg.slogdet(xvx)\n likeval -= ld / 2.\n likeval -= (self.n_totobs - self.k_fe) * np.log(2 * np.pi) / 2.\n likeval += ((self.n_totobs - self.k_fe) *\n np.log(self.n_totobs - self.k_fe) / 2.)\n likeval -= (self.n_totobs - self.k_fe) / 2.\n else:\n likeval -= self.n_totobs * np.log(qf) / 2.\n likeval -= self.n_totobs * np.log(2 * np.pi) / 2.\n likeval += self.n_totobs * np.log(self.n_totobs) / 2.\n likeval -= self.n_totobs / 2.\n\n return likeval", "def score(self, X):\n return _betadiv(X, parafac(self.factors_), self.beta).sum()", "def score(self, data):\n\n score_mappings = {\n \"0\": np.log(self.class_zero_doc_count / self.total_docs),\n \"1\": np.log(self.class_one_doc_count / self.total_docs)\n }\n\n features = self.featurize(data)\n\n for f in features:\n\n if(f[0] in self.class_zero):\n cond_prob_zero = np.log((self.class_zero[f[0]] + 1) / (self.class_zero_feature_count + len(self.vocab)))\n elif(f[0] in self.vocab):\n cond_prob_zero = np.log(1 / (self.class_zero_feature_count + len(self.vocab)))\n else:\n cond_prob_zero = 0\n\n if(f[0] in self.class_one):\n cond_prob_one = np.log((self.class_one[f[0]] + 1) / (self.class_one_feature_count + len(self.vocab)))\n elif(f[0] in self.vocab):\n cond_prob_one = np.log(1 / (self.class_one_feature_count + len(self.vocab)))\n else:\n cond_prob_one = 0\n\n score_mappings[\"0\"] += cond_prob_zero\n score_mappings[\"1\"] += cond_prob_one\n\n score_mappings[\"0\"] = np.exp(score_mappings[\"0\"])\n score_mappings[\"1\"] = np.exp(score_mappings[\"1\"])\n\n return score_mappings", "def score(self, model, context):\n pass", "def score_model(self, model, test_training, test_target):\n\n target_prediction = model.predict(test_training)\n from sklearn.metrics import classification_report\n if(self.VERBOSE):\n print(classification_report(test_target, target_prediction))\n\n return [\n f1_score(test_target, target_prediction, average='weighted'),\n precision_score(test_target, target_prediction, average='weighted'),\n recall_score(test_target, target_prediction, average='weighted')\n ]", "def prob_calibration_function_multiclass(truthvec, scoremat, verbose=False, **kwargs):\n from sklearn import linear_model\n from sklearn.metrics import log_loss, make_scorer\n\n num_classes = scoremat.shape[1]\n function_list = []\n for i in range(num_classes):\n scorevec = scoremat[:,i]\n curr_truthvec = (truthvec==i).astype(int)\n function_list.append(prob_calibration_function(curr_truthvec,scorevec,verbose=verbose,**kwargs))\n\n def calibrate_scores_multiclass(new_scoremat):\n a,b = new_scoremat.shape\n pre_probmat = np.zeros((a,b))\n for i in range(num_classes):\n pre_probmat[:,i] = function_list[i](new_scoremat[:,i])\n probmat = (pre_probmat.T/np.sum(pre_probmat,axis=1)).T\n #if (not extrapolate):\n # new_scores = np.maximum(new_scores,knot_vec[0]*np.ones(len(new_scores)))\n # new_scores = np.minimum(new_scores,knot_vec[-1]*np.ones(len(new_scores)))\n return probmat\n return calibrate_scores_multiclass, function_list", "def loss_fn(model):\n with flax.nn.stateful(state) as new_state:\n with flax.nn.stochastic(prng_key):\n logits = model(batch['image'])\n loss = cross_entropy_loss(logits, batch['label'])\n # TODO(britefury): check if applying L2 regularization to weights but\n # *not* biases improves results\n weight_penalty_params = jax.tree_leaves(model.params)\n weight_l2 = sum([jnp.sum(x ** 2)\n for x in weight_penalty_params\n if x.ndim > 1])\n weight_penalty = l2_reg * 0.5 * weight_l2\n loss = loss + weight_penalty\n return loss, (new_state, logits)", "def score(self, X, y):\r\n n_feature, _ = self.check_model()\r\n _, n_label = y.shape\r\n y = self.check_X_y_weights(X, y)\r\n\r\n if X.shape[1] == (n_feature + 1):\r\n X = X[:, 1:]\r\n\r\n assert (X.shape[1] == n_feature), \"X is of the wrong shape\"\r\n\r\n if self.scoring_func is None:\r\n y_pred = self.forward(X)\r\n\r\n loss = self.loss_func(torch.from_numpy(y_pred).float(),\r\n torch.from_numpy(y).float())\r\n loss = torch.mean(torch.sum(loss, 1)).numpy()\r\n\r\n return - loss\r\n else:\r\n y_pred = self.predict(X)\r\n return self.scoring_func(y_pred, y)", "def loss(Y,Y_pred):\n\n Y = Y.tolist()\n Y_pred = Y_pred.tolist()\n score = 0\n for i in range(len(Y)):\n score += (Y[i]-Y_pred[i])**2\n score=cmath.sqrt(score/len(Y))\n return score", "def objective_function(self, y_true, y_predicted, X=None, sample_weight=None):\n return metrics.log_loss(y_true, y_predicted, sample_weight=sample_weight)", "def objective_function(self, y_true, y_predicted, X=None, sample_weight=None):\n return metrics.log_loss(y_true, y_predicted, sample_weight=sample_weight)", "def loss_func(self, logits, targets):\r\n return -np.sum(targets * np.log(logits)) / logits.shape[0]", "def model(X, Y, word_to_vec_map, learning_rate = 0.01, num_iterations = 400):\n \n # Get a valid word contained in the word_to_vec_map \n any_word = list(word_to_vec_map.keys())[0]\n \n # Initialize cost. It is needed during grading\n cost = 0\n \n # Define number of training examples\n m = Y.shape[0] # number of training examples\n n_y = len(np.unique(Y)) # number of classes \n n_h = word_to_vec_map[any_word].shape[0] # dimensions of the GloVe vectors \n \n # Initialize parameters using Xavier initialization\n W = np.random.randn(n_y, n_h) / np.sqrt(n_h)\n b = np.zeros((n_y,))\n \n # Convert Y to Y_onehot with n_y classes\n Y_oh = convert_to_one_hot(Y, C = n_y) \n \n # Optimization loop\n for t in range(num_iterations): # Loop over the number of iterations\n for i in range(m): # Loop over the training examples\n \n ### START CODE HERE ### (≈ 4 lines of code)\n # Average the word vectors of the words from the i'th training example\n # def sentence_to_avg(sentence, word_to_vec_map): # return avg\n avg = sentence_to_avg(X[i], word_to_vec_map)\n\n # Forward propagate the avg through the softmax layer. \n # You can use np.dot() to perform the multiplication.\n z = np.dot(W, avg) + b\n a = softmax(z)\n\n # Compute cost using the i'th training label's one hot representation and \"A\" (the output of the softmax)\n cost = - np.sum(Y_oh[i] * a)\n ### END CODE HERE ###\n \n # Compute gradients \n dz = a - Y_oh[i]\n dW = np.dot(dz.reshape(n_y,1), avg.reshape(1, n_h))\n db = dz\n\n # Update parameters with Stochastic Gradient Descent\n W = W - learning_rate * dW\n b = b - learning_rate * db\n \n if t % 100 == 0:\n print(\"Epoch: \" + str(t) + \" --- cost = \" + str(cost))\n pred = predict(X, Y, W, b, word_to_vec_map) #predict is defined in emo_utils.py\n\n return pred, W, b", "def loss_grad_softmax_vectorized(W, X, y):\n loss = 0 \n grad = np.zeros_like(W)\n dim, num_train = X.shape\n\n scores = W.dot(X) # [K, N]\n # Shift scores so that the highest value is 0\n scores -= np.max(scores)\n scores_exp = np.exp(scores)\n correct_scores_exp = scores_exp[y, range(num_train)] # [N, ]\n scores_exp_sum = np.sum(scores_exp, axis=0) # [N, ]\n loss = -np.sum(np.log(correct_scores_exp / scores_exp_sum))\n loss /= num_train\n #loss += 0.5 * reg * np.sum(W * W)\n\n scores_exp_normalized = scores_exp / scores_exp_sum\n # deal with the correct class\n scores_exp_normalized[y, range(num_train)] -= 1 # [K, N]\n grad = scores_exp_normalized.dot(X.T)\n grad /= num_train\n grad += W\n\n return grad", "def __log_scores(self, scores, loss, tag):\r\n\t\tprint(\"\\t{:6s} - \".format(tag), end=\" \")\r\n\t\tfor name, value in scores.items():\r\n\t\t\tprint(name, '{:.4f}'.format(value), end=\", \")\r\n\t\tprint(\" Loss: {:.4f}\".format(loss))", "def scores(self, eouts, temperature=1.0):\n return torch.log_softmax(self.output(eouts) / temperature, dim=-1)", "def score(self, sentence):\n # TODO your code here\n score = 0.0\n for i,token in enumerate(sentence[1:]):\n prev = sentence[i]\n current = token\n freq = self.vocab[current][prev] + self.epsilon\n\n score += math.log(freq)\n score -= math.log(self.word_counts[prev] + self.epsilon * self.v)\n return score", "def compute_loss(self, x, label):\n # Forward propagation\n y_hat = self.forward_pass(x)\n return -np.log(y_hat[label])", "def score_calc(self, annotations, predictions):\n\n mean_probabilities_of_classes = np.expand_dims(np.mean(predictions, axis=0), axis=0)\n KL_d = predictions * (np.log(predictions + self.eps) - np.log(mean_probabilities_of_classes + self.eps))\n KL_D = KL_d.sum(axis=1)\n\n score = np.exp(np.mean(KL_D))\n return score", "def loglike(self, params, *args, **kwargs):\n return np.sum(self.loglikeobs(params, *args, **kwargs))", "def get_score(self, input, target_idx, noise_idx):\n raise NotImplementedError()", "def _score_for_model(meta):\n mean_acc = list()\n pipes = meta[\"pipeline\"]\n acc = meta[\"accuracy\"]\n if \"tagger\" in pipes:\n mean_acc.append(acc[\"tags_acc\"])\n if \"morphologizer\" in pipes:\n mean_acc.append((acc[\"morphs_acc\"] + acc[\"pos_acc\"]) / 2)\n if \"parser\" in pipes:\n mean_acc.append((acc[\"uas\"] + acc[\"las\"]) / 2)\n if \"ner\" in pipes:\n mean_acc.append((acc[\"ents_p\"] + acc[\"ents_r\"] + acc[\"ents_f\"]) / 3)\n if \"textcat\" in pipes:\n mean_acc.append(acc[\"textcat_score\"])\n if \"senter\" in pipes:\n mean_acc.append((acc[\"sent_p\"] + acc[\"sent_r\"] + acc[\"sent_f\"]) / 3)\n return sum(mean_acc) / len(mean_acc)", "def loss(self, X, y=None, reg=0.0):\r\n Ws = self.weights\r\n bs = self.biases\r\n N, D = X.shape # number of samples, number of features per sample\r\n\r\n # Compute the forward pass\r\n self.activations = []\r\n for i in xrange(len(Ws)): # for each set of weights\r\n W,b = Ws[i], bs[i]\r\n if i == 0:\r\n H = np.dot(X,W) + b\r\n else:\r\n H = np.dot(self.activations[-1],W) + b\r\n if i < len(Ws) - 1: # if we're computing hidden activations, apply nonlinear function\r\n H = (H > 0) * (H) + (H < 0) * (H/100.0)\r\n self.activations.append(H)\r\n scores = self.activations[-1]\r\n \r\n # If there's no labels provided, stop here\r\n if y is None:\r\n return scores\r\n\r\n # Compute the loss\r\n exped_scores = np.exp(scores)\r\n sums = np.sum(exped_scores,axis=1)\r\n # softmax classifier loss\r\n data_loss = (-1.0/N) * np.sum(np.log(exped_scores[range(N),y.astype(int)] / sums))\r\n\r\n # loss due to regularization\r\n reg_loss = 0\r\n for i in xrange(len(Ws)):\r\n reg_loss += np.sum(Ws[i]**2)\r\n reg_loss *= reg*(0.5)\r\n\r\n loss = data_loss + reg_loss\r\n \r\n # Compute gradients\r\n weights_grads = []\r\n biases_grads = []\r\n activation_grads = []\r\n for i in xrange(len(Ws)):\r\n weights_grads.append(np.copy(Ws[i]))\r\n biases_grads.append(np.copy(bs[i]))\r\n activation_grads.append(np.copy(self.activations[i]))\r\n\r\n DlossDscores = np.array(exped_scores / (N * np.matrix(sums).T))\r\n DlossDscores[range(N),y.astype(int)] -= (1.0/N)\r\n \r\n for i in xrange(len(Ws)-1,-1,-1):\r\n if i == 0:\r\n weights_grads[0] = np.dot(X.T, activation_grads[0]) + reg*Ws[0]\r\n biases_grads[0] = np.dot(np.ones((1,N)), activation_grads[0])[0]\r\n elif i == len(Ws)-1:\r\n H = self.activations[i-1]\r\n weights_grads[i] = np.dot(H.T, DlossDscores) + reg*Ws[i]\r\n biases_grads[i] = np.dot(np.ones((1,N)), DlossDscores)[0]\r\n dH = np.dot(DlossDscores, Ws[i].T)\r\n activation_grads[i-1] = dH\r\n else:\r\n H = self.activations[i-1]\r\n dH_out = activation_grads[i]\r\n weights_grads[i] = np.dot(H.T, dH_out) + reg*Ws[i]\r\n biases_grads[i] = np.dot(np.ones((1,N)), dH_out)[0]\r\n dH = np.dot(dH_out, Ws[i].T)\r\n dH = dH * (H > 0) + dH/100.0 * (H < 0)\r\n activation_grads[i-1] = dH\r\n \r\n grads = {}\r\n grads['weights'] = weights_grads\r\n grads['biases'] = biases_grads\r\n\r\n return loss, grads", "def softmax_loss(scores, y):\r\n N = scores.shape[0] # number of input data\r\n\r\n # compute data loss\r\n shifted_logits = scores - np.max(scores, axis=1, keepdims=True)\r\n Z = np.sum(np.exp(shifted_logits), axis=1, keepdims=True)\r\n log_probs = shifted_logits - np.log(Z)\r\n probs = np.exp(log_probs)\r\n loss = -np.sum(log_probs[range(N), y]) / N\r\n\r\n # Compute gradient of loss function w.r.t. scores\r\n dscores = probs.copy()\r\n dscores[range(N), y] -= 1\r\n dscores /= N\r\n \r\n return loss, dscores", "def svm_loss(x, y):\n loss, dx = None, None\n ###########################################################################\n # TODO: Implement loss and gradient for multiclass SVM classification. #\n # This will be similar to the svm loss vectorized implementation in #\n # cs231n/classifiers/linear_svm.py. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n y_temp = np.ones((x.shape[0], x.shape[1])) # 1로 구성된 x와 같은 쉐입의 매트릭스를 만든다\n #print(y_temp)\n y_score = x[np.arange(x.shape[0]), y] # 정답레이블의 스코어로만 구성된 하나의 컬럼 벡터를 만든다\n y_score = np.reshape(y_score, (x.shape[0], 1)) # 브로드캐스팅을 위해 리쉐입 해준다\n y_temp[np.arange(x.shape[0]), y] = 0 # 1로 구성된 템프매트릭스의 정답 레이블에 해당되는 인덱스에 0을 할당한다\n #print(y_temp)\n loss_temp = (x - y_score) - 1\n loss_temp = (-loss_temp * y_temp) / x.shape[0]\n loss = (np.sum(loss_temp))\n #print(loss_temp)\n\n #print(np.sum(loss_temp, axis = 1))\n \n temp = loss_temp * x.shape[0]\n temp[loss_temp > 0] = 1\n row_sum = np.sum(temp, axis = 1)\n temp[np.arange(x.shape[0]), y] = -row_sum.T\n dx = -temp\n\n dx /= x.shape[0]\n\n\n #print(dx)\n\n\n\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return loss, dx", "def score(self, X, label):\n pred_risk = self.predict(X)\n CI = self._metrics_ci(label, pred_risk)\n return CI", "def eval_additional_scores(self, **kwargs):\n self.model.eval()\n self.likelihood.eval()\n\n X_train_torch = torch.from_numpy(kwargs[\"X_train\"]).to(self.device)\n y_train_torch = torch.from_numpy(kwargs[\"y_train\"]).to(self.device)\n mll = gpytorch.mlls.VariationalELBO(self.likelihood, self.model, num_data=y_train_torch.numel())\n\n with torch.no_grad(), gpytorch.settings.num_likelihood_samples(self.num_likelihood_samples):\n f_pred = self.model(X_train_torch)\n elbo = mll(f_pred, y_train_torch).item()\n\n return {\n \"elbo\": elbo\n }", "def dloss(self, y_true, score, pos_label=None):\n score = score.atleast_2d() # Working with 2-D arrays only\n\n grad = CSoftmax().softmax(score)\n\n # we subtract -1 only to the elements equal to y_true\n grad[[list(range(score.shape[0])), y_true.tolist()]] -= 1.0\n\n # find-like indexing (list of lists)\n a = y_true.tolist() if pos_label is None else [pos_label]\n\n # Return elements equal to y_true (if pos_label is None) or pos_label\n return CArray(grad[[list(range(score.shape[0])), a]])", "def objective(self, var: ndarray) -> float:\n beta, gamma = self.get_vars(var)\n r = self.get_residual(beta)\n d = self.get_varmat(gamma)\n\n val = 0.5*(d.logdet() + r.dot(d.invdot(r)))\n val += self.fevar.prior_objective(beta)\n val += self.revar.prior_objective(gamma)\n\n return val", "def loss(self, targets, scores):\n return (2. * numpy.arctan(targets * scores) - 1.)**2", "def score(matrix,seq,ns=True):\n #specific_binding = sum([row[base_dict[b]] for row,b in zip(matrix,seq)])\n specific_binding = 0\n for i in xrange(len(matrix)): \n specific_binding += matrix[i][base_dict[seq[i]]]\n if ns:\n return log(exp(-beta*specific_binding) + exp(-beta*ns_binding_const))/-beta\n else:\n return specific_binding", "def score(model):\n # get the first layer\n layer = model.get_layer('encoder')\n # extracts weights\n weights = layer.get_weights()[0]\n # calculate the infinity norm as shown in the paper.\n # For each input feature get the absolute maximum weight\n # connected with this feature\n scores = np.linalg.norm(weights, ord=np.inf, axis=1)\n # the final score is a importance measure for each feature\n sorted_scores = sorted(range(len(scores)), key=lambda k: scores[k])\n return sorted_scores[::-1]", "def loss(self, X, y):\n\n # Initialize the loss to zero.\n loss = 0.0\n num_classes = self.W.shape[0] # C = num_classes\n num_train = X.shape[0]\n \n exp_a = np.zeros((num_classes,num_train))\n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the normalized softmax loss. Store it as the variable loss.\n # (That is, calculate the sum of the losses of all the training \n # set margins, and then normalize the loss by the number of \n # training examples.)\n # ================================================================ #\n \n \n for i in np.arange(num_train):\n \n Loss = 0.0\n\n class_scores = np.dot(self.W,X[i,:].T) # calculating class scores (C x 1 vector)\n class_scores -= np.max(class_scores) # considering the possible issue for numerical instability and account for it\n\n exp_a[:,i] = np.exp(class_scores) # turning class scores to probabilities (C x 1 vector), without normalization\n\n Loss -= np.log(exp_a[y[i],i]/np.sum(exp_a[:,i]))\n \n\n #p[:,i] = exp_a[:,i]/np.sum(exp_a[:,i]) # p now is a valid probability matrix\n #print(p[:,i])\n\n loss += Loss \n #print(Loss,i) \n \n pass\n loss /= num_train\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss", "def do_loss(logits, labels):\n return tf.reduce_sum(tf.square(logits - labels))", "def logscore(self, word, context=None):\n return log_base2(self.score(word, context))", "def score(self, data_test, labels_pred, is_train=False):\n return -np.log(np.clip(self.score_trust(data_test, labels_pred, is_train=is_train),\n sys.float_info.min, None))", "def grad_log(self, X):\n # \"\"\"\n # Evaluate the gradients (with respect to the input) of the log density at\n # each of the n points in X. This is the score function.\n\n # X: n x d numpy array.\n XB = np.dot(X, self.B)\n Y = 0.5*XB + self.c\n E2y = np.exp(2*Y)\n # n x dh\n Phi = old_div((E2y-1.0),(E2y+1))\n # n x dx\n T = np.dot(Phi, 0.5*self.B.T)\n S = self.b - X + T\n return S", "def loss(self, y: np.ndarray, y_hat: np.ndarray) -> float:\n losses = -(y * np.log(y_hat) + (1 - y) * np.log(1 - y_hat))\n return losses.mean() + self.reg / self.num_parameters * (\n (self.v[:, -1] ** 2).sum() + (self.w ** 2).sum()\n )", "def grad_log_q(self,z): \n param_count = 0\n grad = np.zeros((np.sum(self.approx_param_no),self.sims))\n for core_param in range(len(self.q)):\n for approx_param in range(self.q[core_param].param_no):\n grad[param_count] = self.q[core_param].vi_score(z[core_param],approx_param) \n param_count += 1\n return grad", "def __call__(self, score_outputs, labels):\n with tf.name_scope('rpn_loss'):\n levels = sorted(score_outputs.keys())\n\n score_losses = []\n for level in levels:\n score_losses.append(\n self._rpn_score_loss(\n score_outputs[level],\n labels[level],\n normalizer=tf.cast(\n tf.shape(score_outputs[level])[0] *\n self._rpn_batch_size_per_im,\n dtype=tf.float32)))\n\n # Sums per level losses to total loss.\n return tf.math.add_n(score_losses)", "def custom_scoring(y_te, y_pred):\n #weights computed with training data set\n w = np.array([0.02409584, 0.00787456, 0.03685528, 0.01760536, 0.04589969, 0.8483942 , 0.01724058, 0.00203449]);\n \n ## F1 SCORES\n #evaluate F1 score, precision and recall for each label, \n #along with custom proportionally weighted F1 score\n #and built in weighted and macro F1 scores\n F1_tab, Ptab, Rtab, pf1 = F1_score(y_te, y_pred, w)\n f = F1Score(8, threshold = 0.5, average = 'weighted')\n f.update_state(y_te, y_pred)\n wf1 = f.result().numpy() #weighted f1 score\n f.reset_states()\n f = F1Score(8, threshold = 0.5, average = 'macro')\n f.update_state(y_te, y_pred)\n mf1 = f.result().numpy() #macro f1 score\n f.reset_states()\n\n ##EDIT DISTANCE\n #edit_dist_av = LevDistMultilabels(y_true, y_pred)\n\n ##ACCURACY\n #evaluate accuracy per label\n acc_tab = Acc(y_te, y_pred)\n\n return wf1, mf1, pf1, F1_tab, Ptab, Rtab, acc_tab", "def objective(params):\n\t# hyperopt casts as float\n\tparams['num_boost_round'] = int(params['num_boost_round'])\n\tparams['num_leaves'] = int(params['num_leaves'])\n\n\t# need to be passed as parameter\n\tparams['is_unbalance'] = True\n\tparams['verbose'] = -1\n\tparams['seed'] = 1\n\n\tcv_result = lgb.cv(\n\t\tparams,\n\t\tdtrain,\n\t\tnum_boost_round=params['num_boost_round'],\n\t\tmetrics='binary_logloss',\n\t\tnfold=3,\n\t\tearly_stopping_rounds=20,\n\t\tstratified=False)\n\tearly_stop_dict[objective.i] = len(cv_result['binary_logloss-mean'])\n\terror = round(cv_result['binary_logloss-mean'][-1], 4)\n\tobjective.i+=1\n\treturn error", "def decision_function(self, X):\n X = atleast2d_or_csr(X)\n scores = safe_sparse_dot(X, self.coef_.T) + self.intercept_\n if self.classes.shape[0] == 2:\n return np.ravel(scores)\n else:\n return scores", "def score(self, word):\n assert self.words is not None, \"You need to train first.\"\n if word in self.words:\n return np.log(1 - self.a) + np.log(self.words[word] / self.nwords)\n else:\n logprob = 0\n for l in word:\n # this calculates add+1-smoothed probabilities to make\n # sure that unknown letters are treated correctly.\n # not required, using simply the relative\n # frequency is sufficient.\n logprob += np.log(self.letters.get(l, 1) /\n (self.nletters + len(self.letters)))\n return np.log(self.a) + logprob", "def score(self, X, y):\n\n stuff = self._vectorizer.transform(X)\n result = self._classifier.score(stuff,y)\n\n return result\n pass", "def loss(self, y_pred=None, y_true=None):\n ll = -0.5 * self.const - np.log(self.sigma_y) - 0.5 * (1. / self.sigma_y ** 2) * ((y_pred - y_true) ** 2)\n return -ll.sum(dim=0)", "def score(self, X, y):\n X_pp = self.preprocessor.transform(X)\n # Score the model on the data here\n return(self.estimator.score(X_pp, y))", "def log_loss(network, model_indices, sim_data, lambd=1.0):\n\n # Compute evidences\n alpha = network(sim_data)\n\n # Obtain probs\n model_probs = alpha / tf.reduce_sum(alpha, axis=1, keepdims=True)\n\n # Numerical stability\n model_probs = tf.clip_by_value(model_probs, 1e-15, 1 - 1e-15)\n\n # Actual loss + regularization (if given)\n loss = -tf.reduce_mean(tf.reduce_sum(model_indices * tf.math.log(model_probs), axis=1))\n if lambd > 0:\n kl = kl_dirichlet(model_indices, alpha)\n loss = loss + lambd * kl\n return loss", "def score(self, y_true, y_pred):\r\n pass", "def loss(self, rng_key, param_map, model, guide, *args, **kwargs):\n def single_particle_elbo(rng_key):\n model_seed, guide_seed = random.split(rng_key)\n seeded_model = seed(model, model_seed)\n seeded_guide = seed(guide, guide_seed)\n guide_log_density, guide_trace = log_density(seeded_guide, args, kwargs, param_map)\n seeded_model = replay(seeded_model, guide_trace)\n model_log_density, _ = log_density(seeded_model, args, kwargs, param_map)\n\n # log p(z) - log q(z)\n elbo = model_log_density - guide_log_density\n return elbo\n\n # Return (-elbo) since by convention we do gradient descent on a loss and\n # the ELBO is a lower bound that needs to be maximized.\n if self.num_particles == 1:\n return - single_particle_elbo(rng_key)\n else:\n rng_keys = random.split(rng_key, self.num_particles)\n return - jnp.mean(vmap(single_particle_elbo)(rng_keys))", "def eml_add_smooth(yi, xi, eqml):\n return (eqml[yi][xi] + 1) / (sum(eqml[yi].values()) + train_set_size)", "def real_path_score(self, logits, label):\n score = torch.zeros(1, device=device)\n label = torch.cat([torch.tensor([self.tag_map[self.start_tag]], dtype=torch.long, device=device), label.to(torch.long)])\n\n for index, logit in enumerate(logits):\n emission_score = logit[label[index + 1]]\n transition_score = self.transitions[label[index], label[index + 1]]\n # transition_score = self.transitions[label[index + 1], label[index]]\n score += emission_score + transition_score\n\n # Add the final Stop Tag, the final transition score\n score += self.transitions[label[-1], self.tag_map[self.stop_tag]]\n # score += self.transitions[self.tag_map[self.stop_tag], label[-1]]\n return score", "def compute_loss(self, obs, returns):", "def loss_and_grad(self, X, y):\n\n # Initialize the loss and gradient to zero.\n loss = 0.0\n grad = np.zeros_like(self.W)\n grad_tmp = np.zeros_like(self.W)\n num_classes = self.W.shape[0] # C = num_classes\n num_train = X.shape[0]\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the softmax loss and the gradient. Store the gradient\n # as the variable grad.\n # ================================================================ #\n \n exp_a = np.zeros((num_classes,num_train))\n for i in np.arange(num_train):\n \n Loss = 0.0\n\n class_scores = np.dot(self.W,X[i,:].T) # calculating class scores (C x 1 vector)\n class_scores -= np.max(class_scores) # considering the possible issue for numerical instability and account for it\n\n exp_a[:,i] = np.exp(class_scores) # turning class scores to probabilities (C x 1 vector), without normalization\n\n Loss -= np.log(exp_a[y[i],i]/np.sum(exp_a[:,i]))\n \n \n #if i==0:\n grada = np.zeros(X.shape[1])\n \n for j in range(num_classes):\n if j != y[i]:\n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) \n else: \n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) - X[i,:].T \n\n grad += grad_tmp\n loss += Loss \n \n pass\n\n\n loss /= num_train\n grad /= num_train\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss, grad", "def logloss_mc(y_true, y_prob, epsilon=1e-15):\n # normalize\n y_prob = y_prob / y_prob.sum(axis=1).reshape(-1, 1)\n y_prob = np.maximum(epsilon, y_prob)\n y_prob = np.minimum(1 - epsilon, y_prob)\n # get probabilities\n y = [y_prob[i, j] for (i, j) in enumerate(y_true)]\n ll = - np.mean(np.log(y))\n return ll", "def fit(self,\n obs: np.ndarray,\n discounted_returns: np.ndarray\n ) -> np.ndarray:\n\n with tf.GradientTape() as tape:\n # predicted value is calcualted by subbing in\n pred_value = self.model(obs, training=True)\n # fit value function by regression on mean-squared error (Pseudocode line 8)\n # minimizing the difference between the predicted value and the actual value\n critic_loss = self.loss_func(pred_value, tf.stop_gradient(discounted_returns))\n\n gradients = tape.gradient(critic_loss, self.model.trainable_variables)\n self.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables))\n\n return critic_loss", "def scores_(self):\n return self.predictor.scores_", "def forward(ctx, scores):\n size = scores.size()\n prob = F.softmax(scores, dim=-1)\n idx = multinomial(prob.view(-1, size[-1]), num_samples=1, replacement=False).view(size[:-1])\n scores_net = eye(scores.size(-1), device=scores.device)\n return scores_net[idx]", "def loss(self,A2,label):\r\n m = label.shape[0]\r\n\r\n log_likelihood = -np.log(A2[label,range(m)])\r\n loss = np.sum(log_likelihood) / m\r\n return loss", "def _loss_gradient(x0, x1, b, w, lam, weights=None):\n nvars = len(w)\n\n # initialize + regularization term\n loss = 0.5 * lam * np.sum(w ** 2)\n gradient = np.zeros(nvars + 1) # first position is b\n gradient[1:] = lam * w\n\n # we need prediction for x\n pred_x_0_1 = [LogisticRegression._sigmoid(x0, b, w), LogisticRegression._sigmoid(x1, b, w)]\n\n # the log likelihood\n log_like_x_0_1 = [np.log(1.0 - pred_x_0_1[0]),\n np.log(pred_x_0_1[1])]\n\n # also need the error for gradient.\n error = [pred_x_0_1[0],\n pred_x_0_1[1] - 1]\n\n if weights is None:\n loss += -np.sum(log_like_x_0_1[1]) - np.sum(log_like_x_0_1[0])\n gradient[0] += np.sum(error[0]) + np.sum(error[1]) # * 1 for bias term \n for k in range(nvars):\n gradient[k + 1] += np.sum(error[0] * x0[:, k]) + np.sum(error[1] * x1[:, k])\n else:\n loss += -np.sum(weights[1] * log_like_x_0_1[1]) - np.sum(weights[0] * log_like_x_0_1[0])\n gradient[0] += np.sum(error[0] * weights[0]) + np.sum(error[1] * weights[1])\n for k in range(nvars):\n gradient[k + 1] += ( np.sum(weights[0] * error[0] * x0[:, k]) +\n np.sum(weights[1] * error[1] * x1[:, k]) )\n return loss, gradient", "def score(self, X, y=...):\n ...", "def multiclass_log_loss(y_true, y_pred, eps=1e-15):\n clip = np.clip(y_pred, eps, 1 - eps)\n actual = np.zeros(y_pred.shape)\n rows = actual.shape[0]\n print rows\n print np.arange(rows)\n print (y_true.astype(int))\n actual[np.arange(rows), y_true.astype(int)] = 1\n print actual\n vsota = np.sum(actual * np.log(clip))\n print vsota\n return -1.0 / rows * vsota", "def loss(self, scores, true_pos, lamb=1e-7):\n loss = F.multi_margin_loss(scores, true_pos, margin=self.config[\"margin\"])\n if self.config[\"use_local_only\"]:\n return loss\n\n # regularization\n X = F.normalize(self.rel_embs)\n diff = (\n (\n X.view(self.config[\"n_rels\"], 1, -1)\n - X.view(1, self.config[\"n_rels\"], -1)\n )\n .pow(2)\n .sum(dim=2)\n .add_(1e-5)\n .sqrt()\n )\n diff = diff * (diff < 1).float()\n loss -= torch.sum(diff).mul(lamb)\n\n X = F.normalize(self.ew_embs)\n diff = (\n (\n X.view(self.config[\"n_rels\"], 1, -1)\n - X.view(1, self.config[\"n_rels\"], -1)\n )\n .pow(2)\n .sum(dim=2)\n .add_(1e-5)\n .sqrt()\n )\n diff = diff * (diff < 1).float()\n loss -= torch.sum(diff).mul(lamb)\n return loss", "def log_sum_exp(vec):\r\n\r\n\r\n max_score, idx = torch.max(vec, -1, keepdim = True) # ( B, to_target, 1)\r\n # max_score = torch.gather(vec, 1, idx.view(-1, 1, m_size)).view(-1, 1, m_size) # B * M\r\n # max_score.expand_as(vec)\r\n # to_target = vec.size(1)\r\n\r\n return max_score.squeeze(-1) + torch.log(torch.sum(torch.exp(vec - max_score.expand_as(vec)), -1)) # B * to_target\r", "def linear_regression(features, values):\n clf = SGDRegressor(n_iter=100)\n clf.fit(features,values)\n print(clf.score(features,values))\n intercept = clf.intercept_ \n params = clf.coef_\n \n return intercept, params", "def score(self, y_hat, y_true=None, verbose=True):\n nan_score = float(9999)\n act = y_true if y_true is not None else self.actual[self.y_cols]\n if hasattr(act, 'values'):\n act = act.values\n if hasattr(y_hat, 'values'):\n y_hat = y_hat.values\n assert act.shape == y_hat.shape, f'shape mismatch in DM.score(): ' \\\n f'{act.shape} != {y_hat.shape}'\n sklearn_metrics = {'mse': mse, 'msle': msle}\n lossf = sklearn_metrics[cfg.data_cfg['loss']]\n try:\n score = lossf(act, y_hat)\n except ValueError as err:\n print(err)\n print(f'NaNs in prediction. Setting score to {nan_score}.')\n score = nan_score\n return score, act", "def function(self, scores, multilabel):\n\n # Output functions\n # pylint: disable=C3001\n identity = lambda x: x\n sigmoid = lambda x: 1.0 / (1.0 + np.exp(-x))\n softmax = lambda x: np.exp(x) / np.sum(np.exp(x))\n function = identity if multilabel is None else sigmoid if multilabel else softmax\n\n # Apply output function\n return function(np.array(scores))", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n num_classes = W.shape[1]\n #print('num_classes = ', num_classes)\n num_train = X.shape[0]\n #print('num_train = ', num_train)\n \n min_score = 0.0\n shifted_scores = np.zeros(W.shape[1])\n #max_score = np.zeros(W.shape[1])\n max_score = 0.0\n \n loss_array = np.zeros(y.shape[0])\n for i in range(num_train):\n scores = X[i].dot(W)\n #print('scores dimensions = ', scores.shape)\n #print('scores = ', scores)\n #print('i =', i, 'y = ', y[i])\n min_score = np.min(scores)\n max_score = np.max(scores)\n #print(min_score,max_score)\n shifted_scores = np.multiply(-1,scores + abs(min_score))\n #print(scores)\n #print(shifted_scores)\n exp_scores = np.exp(shifted_scores)\n norm = np.amax(exp_scores)\n norm_scores = np.divide(exp_scores,norm)\n loss_array[i] = np.multiply(-1,np.log(norm_scores[y[i]]/(np.sum(norm_scores)-norm_scores[y[i]])))\n #print(loss_array)\n for j in range(num_classes): \n\t\n if j == y[i]: \n dW[:,j] = np.multiply(norm_scores[y[i]],1-norm_scores[y[i]])\n else:\n dW[:,j] = np.multiply(-1,np.multiply(norm_scores[y[i]],norm_scores[y[j]]))\n\t\t\t\n\t\t\t\n loss = np.amax(loss_array)\n\n # Add regularization to the loss.\n loss = 0.5 * reg * np.sum(W * W) + loss\n \n \n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def __call__(self, score_map, one_hot_label) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n assert not one_hot_label.requires_grad\n pi = one_hot_label.to(torch.float)\n\n assert not torch.any(torch.isinf(score_map))\n assert not torch.any(torch.isnan(score_map))\n\n log_qi = torch.log(self.clamp_softmax(score_map))\n\n assert not torch.any(torch.isnan(log_qi))\n\n log_fg_qi = log_qi[:, 1:, :, :]\n fg_pi = pi[:, 1:, :, :]\n fg_count = torch.sum(fg_pi, dim=(1, 2, 3)) + self.eps\n\n log_bg_qi = log_qi[:, 0:1, :, :]\n bg_pi = pi[:, 0:1, :, :]\n bg_count = torch.sum(bg_pi, dim=(1, 2, 3)) + self.eps\n\n fg_loss_ = torch.sum(fg_pi * log_fg_qi, dim=(1, 2, 3))\n fg_loss = -1 * torch.mean(fg_loss_ / fg_count) # mean reduce on batch\n\n bg_loss_ = torch.sum(bg_pi * log_bg_qi, dim=(1, 2, 3))\n bg_loss = -1 * torch.mean(bg_loss_ / bg_count) # mean reduce on batch\n\n total_loss = bg_loss + fg_loss\n assert not torch.any(torch.isnan(total_loss)), \\\n \"fg_loss: {} fg_count: {} bg_loss: {} bg_count: {}\".format(fg_loss, fg_count, bg_loss, bg_count)\n\n return total_loss, bg_loss, fg_loss" ]
[ "0.6385058", "0.6369366", "0.6337341", "0.62241894", "0.6202223", "0.6085212", "0.6069365", "0.5963406", "0.59437096", "0.59082556", "0.5902111", "0.58229357", "0.5803482", "0.57961464", "0.5775284", "0.5754475", "0.5751942", "0.5720772", "0.5715828", "0.57079184", "0.56972706", "0.56924284", "0.56900287", "0.5684798", "0.56828547", "0.5678644", "0.5676018", "0.5663591", "0.56587905", "0.56492484", "0.5648787", "0.56481785", "0.56374395", "0.5629737", "0.5618622", "0.56112176", "0.55889016", "0.5584572", "0.55768627", "0.5565633", "0.5564364", "0.5564364", "0.55539227", "0.55504405", "0.5550231", "0.5541401", "0.55369365", "0.5520265", "0.5516783", "0.5515999", "0.55155027", "0.5511123", "0.5509981", "0.55050623", "0.5502061", "0.54968494", "0.54931635", "0.5480789", "0.5477338", "0.54744023", "0.54708344", "0.5462712", "0.5453526", "0.54526544", "0.54509056", "0.5449664", "0.544792", "0.54381615", "0.5432927", "0.54307073", "0.5429782", "0.54242986", "0.54225075", "0.54199594", "0.54187393", "0.54067624", "0.5405294", "0.54005265", "0.53963625", "0.5395284", "0.5392596", "0.53923595", "0.5389547", "0.538912", "0.53888845", "0.5385502", "0.5383773", "0.5382428", "0.5381121", "0.53808665", "0.5380264", "0.53795123", "0.5376071", "0.53754514", "0.5373938", "0.5373181", "0.5370708", "0.53638506", "0.53604347", "0.5359435" ]
0.6393054
0
Evaluate the size expected from the FP relation for a given velocity dispersion and Vband apparent magnitude
def get_effective_radius(self, vel_disp, m_V): log_vel_disp = np.log10(vel_disp) log_R_eff = self.a*log_vel_disp + self.b*m_V + self.c + np.random.randn()*self.intrinsic_scatter R_eff = 10**log_R_eff return R_eff
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fluxRatio_fromVmag(Vmag):\n fluxRatio = 10.**(-0.4*Vmag)\n return fluxRatio", "def width_v_a(model: SingleRhNeutrinoModel) -> float:\n u = 0.5 * np.tan(2 * model.theta)\n return 9 * ALPHA_EM * GF**2 / (256 * np.pi**4) * model.mx**5 * u**2", "def testCalspecMags(self):\n std = MKIDStd.MKIDStd()\n bFilter = std.filters['B']\n vFilter = std.filters['V']\n\n # BD17\n bd17Flux = std.load(\"bd17\")\n B = std.getVegaMag(bd17Flux, bFilter)\n V = std.getVegaMag(bd17Flux, vFilter)\n self.assertAlmostEqual(B-V, 0.44, places=1, msg=\"value=%f\"%B)\n self.assertAlmostEqual(B, 9.47, places=0, msg=\"value=%f\"%B)", "def computeMagnitudeErr(instFluxErr, instFlux, calibrationErr, calibration, flux):\n return 2.5/np.log(10)*computeMaggiesErr(instFluxErr, instFlux, calibrationErr, calibration, flux) / flux", "def magnitude(v: Vector) -> float:\n return math.sqrt(sum_of_squares(v)) #math.sqrt() is a square root function", "def fl_over_avfl(self, searcher, docnum, fieldnum):\n return searcher.doc_field_length(docnum, fieldnum) / self.avg_field_length(searcher, fieldnum)", "def freq_optimization(self):\n index = identify_scale(self.vz, True)\n # In case the patient is limping\n if index > 35:\n index = index / 2\n print(f\"Scale used is {index}\")", "def magnitude(v):\n if len(v.arr) != 4 or v[3] != 0.0:\n raise ValueError(\"Only use this function with vectors.\")\n return np.sqrt(np.sum(np.square(v.arr)))", "def get_vcond(lambdam, taum):\n return 2 * lambdam / taum", "def aliveness(self, physics):\n return 0.", "def test_spectral_density_vega_wf(wf, fluxd, to):\n v = fluxd.to(to.unit, spectral_density_vega(wf))\n assert v.unit == to.unit\n if to.unit in (VEGAmag, JMmag):\n assert np.isclose(v.value, to.value, atol=0.001)\n else:\n assert np.isclose(v.value, to.value, rtol=0.001)", "def calculate_magnitude(self, band, system='AB'):\n\n if system not in ('AB', 'Vega'):\n raise ValueError('`system` must be one of `AB` or `Vega`')\n\n f1 = self.calculate_flux(band)\n\n if f1 > 0:\n magnitude = -2.5 * log10(f1 / band.flux[system])\n\n if system == 'Vega':\n # Add 0.026 because Vega has V = 0.026:\n magnitude += 0.026\n\n else:\n magnitude = np.inf\n\n return magnitude", "def vol(x):\r\n return pi*(topdia(x)/2000.)**2 * length (x)", "def calc_elv_spectra(self, red, comp, src):\n if ((src in red.data.keys())\n & (src in red.data.keys())):\n # check that the wavelenth grids are identical\n delt_wave = red.data[src].waves - comp.data[src].waves\n if np.sum(np.absolute(delt_wave)) > 0.01*u.micron:\n warnings.warn(\"wavelength grids not equal for %s\" % src,\n UserWarning)\n else:\n # reference band\n red_V = red.data['BAND'].get_band_mag('V')\n comp_V = comp.data['BAND'].get_band_mag('V')\n\n # setup the needed variables\n self.waves[src] = red.data[src].waves\n n_waves = len(self.waves[src])\n self.exts[src] = np.zeros(n_waves)\n self.uncs[src] = np.zeros(n_waves)\n self.npts[src] = np.zeros(n_waves)\n\n # only compute the extinction for good, positive fluxes\n print(comp.data[src].npts)\n print(comp.data[src].fluxes)\n indxs, = np.where((red.data[src].npts > 0)\n & (comp.data[src].npts > 0)\n & (red.data[src].fluxes.value > 0)\n & (comp.data[src].fluxes.value > 0))\n self.exts[src][indxs] = \\\n (-2.5*np.log10(red.data[src].fluxes[indxs]\n / comp.data[src].fluxes[indxs])\n + (comp_V[0] - red_V[0]))\n self.uncs[src][indxs] = np.sqrt(\n np.square(_flux_unc_as_mags(red.data[src].fluxes[indxs],\n red.data[src].uncs[indxs]))\n + np.square(_flux_unc_as_mags(comp.data[src].fluxes[indxs],\n comp.data[src].uncs[indxs]))\n + np.square(red_V[1])\n + np.square(comp_V[1]))\n self.npts[src][indxs] = np.full(len(indxs), 1)", "def magnitude(v: Vector) -> float:\n return math.sqrt(sum_of_squares(v))", "def velocity(n_core, q, beta_invariant, material_dispersion=None):\n c = scipy.constants.speed_of_light\n if material_dispersion is None:\n A = 2 / c / (2 + q)\n B = q * n_core**2 / c / (2 + q)\n else:\n N1 = n_core + material_dispersion\n y = 2 * n_core / N1\n A = 2 * N1 / n_core * (1 + 0.25 * y) / c / (q + 2)\n B = q * n_core**2 * A - 1 / 4 / c * N1 * n_core * y\n\n return A * beta_invariant + B / beta_invariant", "def get_fermi_velocities():\n\n vr = Vasprun('vasprun.xml')\n # eigenvalues = vr.eigenvalues\n bs = vr.get_band_structure()\n bands = bs.bands\n kpoints = bs.kpoints\n efermi = bs.efermi\n h_bar = 6.582e-16 # eV*s\n\n fermi_bands = []\n for spin in bands:\n for i in range(len(bands[spin])):\n if max(bands[spin][i]) > efermi > min(bands[spin][i]):\n fermi_bands.append(bands[spin][i])\n\n fermi_velocities = []\n for band in fermi_bands:\n for i in range(len(band)-1):\n if (band[i] < efermi < band[i+1]) or (band[i] > efermi > band[i+1]):\n dk = np.sqrt((kpoints[i+1].cart_coords[0]\n - kpoints[i].cart_coords[0])**2\n + (kpoints[i+1].cart_coords[1]\n - kpoints[i].cart_coords[1])**2)\n v_f = abs((band[i+1] - band[i]) / (h_bar * dk))\n fermi_velocities.append(v_f)\n\n return fermi_velocities # Values are in Angst./s", "def VarianceOfAbsAcceleration(self):\n H = []\n for i in range(len(self.omega_range)):\n \"\"\"Calculation of the Transmission matrix H\"\"\"\n H.append(np.linalg.inv((-self.omega_range[i] ** 2 * self.M\n - 1j * self.omega_range[i] * self.C\n + self.K)))\n \"\"\"squared absolute of the transmission matrix H multiplied with the diagonal of the mass matrix M (M*I)\"\"\"\n FRFacc = [H[wincr].dot(np.diagonal(self.M)) * self.omega_range[wincr] ** 2 for wincr in\n range(len(self.spectrum))]\n Habs2 = [(np.abs(np.ones(len(vector), dtype=float) - vector) ** 2) for vector in FRFacc]\n PSDexc = self.spectrum\n \"\"\"Response of all DOFs as PSD\"\"\"\n RespPSD = [Habs2[wincr] * PSDexc[wincr] for wincr in range(len(self.spectrum))]\n AccPSD = [abs(RespPSD[wincr] + 0*PSDexc[wincr]) for wincr in range(len(self.spectrum))]\n \"\"\"The variance of the response can be obtained with the integral of the response PSD. \n integral(PSD_response)\"\"\"\n variance = (np.trapz(AccPSD, self.omega_range, axis=0))\n return variance", "def magnitude(self):\n\t\treturn sqrt(self.dot(self))", "def magnitude(v):\n\treturn math.sqrt(sum_squares(v))", "def __q2v_cf(self, w, rhom, q):\n return float(q / (rhom + q / w))", "def calculate_magnitude(east, north, vertical):\n\n if not Ensemble.is_bad_velocity(east) and not Ensemble.is_bad_velocity(north) and not Ensemble.is_bad_velocity(vertical):\n return math.sqrt((east*east) + (north*north) + (vertical*vertical))\n else:\n return Ensemble.BadVelocity", "def CalcForce_aeroframe_DEP(V, CoefMatrix, x, rho, g):\r\n\r\n #Compute aero forces\r\n # here x must be of the form (alpha, beta, p, q, r, da, dr, de) (last one punctualy used)\r\n # set non dim for p,q,r\r\n nonDim=np.ones(7)\r\n nonDim[2]=g.b/(2*V)\r\n nonDim[3]=g.c/(2*V)\r\n nonDim[4]=g.b/(2*V)\r\n # F=np.dot(CoefMatrix,x[0:7]) # commented form, modification to account for symmetric drag increase of side slip\r\n F=np.zeros((3))\r\n M=np.zeros((3))\r\n xsym=np.copy(x[0:-1])\r\n xsym[1]=abs(xsym[1]) # make beta always positive since derivatives have already correct sign for drag and lift only\r\n xsym[-3]=abs(xsym[-3]) # make ailerons deflection always positive for drag increase and lift decrease\r\n xsym[-1]=abs(xsym[-1]) # make rudder deflection always positive for drag increase and lift decrease\r\n F[0]=np.dot(CoefMatrix[0],xsym)\r\n F[1]=np.dot(CoefMatrix[1],x[0:-1]) #side force\r\n F[2]=np.dot(CoefMatrix[2],xsym)\r\n M=np.dot(CoefMatrix[3:6,:],x[0:-1])\r\n# print(\"Printing moment coeff\")\r\n# print(M)\r\n\r\n \r\n #No need to project\r\n# alpha=x[0]\r\n# beta=x[1]\r\n# H=np.array([[math.cos(alpha)*math.sin(beta), -math.cos(alpha)*math.sin(beta), -math.sin(alpha)],[math.sin(beta), math.cos(beta), 0],[math.sin(alpha)*math.cos(beta), -math.sin(alpha)*math.sin(beta), math.cos(alpha)]])\r\n if V<=71 :\r\n Fbody=np.array([-F[0]-g.Cd0_fl,F[1],-F[2]-g.CL0_fl]) # add alpha=0 coefficients\r\n Moment=M+np.array([0,x[-1]*g.Cm_de+g.Cm0_fl,0])\r\n else:\r\n Fbody=np.array([-F[0]-g.Cd0,F[1],-F[2]-g.CL0]) # add alpha=0 coefficients\r\n Moment=M+np.array([0,x[-1]*g.Cm_de+g.Cm0,0])\r\n \r\n\r\n Fbody=0.5*V**2.0*rho*g.S*Fbody\r\n Moment=0.5*V**2.0*rho*g.S*g.b*Moment\r\n \r\n return np.append(Fbody, Moment)", "def test_mag_form_fac_case1():\n ion = MagneticFormFactor('Fe')\n formfac, _temp = ion.calc_mag_form_fac()[0], ion.calc_mag_form_fac()[1:]\n del _temp\n assert (abs(np.sum(formfac) - 74.155233575216599) < 1e-12)", "def width_h_invis(self):\n if m_higgs > 2.0 * self.mx:\n coupling = self.gsxx * self.stheta / np.sqrt(1 - self.stheta**2)\n\n val = (\n (coupling**2 * (m_higgs**2 - 4 * self.mx**2) ** 1.5)\n / (8.0 * m_higgs**2 * np.pi)\n ).real\n\n assert val >= 0\n\n return val\n else:\n return 0.0", "def approximate_volume(pdb_filenames, n_molecules_list, box_scaleup_factor=2.0):\n volume = 0.0 # in cubic angstroms\n for k, (pdb_file) in enumerate(pdb_filenames):\n molecule_volume = 0.0\n molecule_trj = md.load(pdb_filenames[k])\n for atom in molecule_trj.topology.atoms:\n if atom.element.symbol == 'H':\n molecule_volume += 5.0 # approximated from bondi radius = 1.06 angstroms\n else:\n molecule_volume += 15.0 # approximated from bondi radius of carbon = 1.53 angstroms\n volume += molecule_volume * n_molecules_list[k]\n box_size = volume**(1.0/3.0) * box_scaleup_factor\n return box_size", "def test_mag_form_fac_case2():\n ion = MagneticFormFactor('Fe')\n formfac, _temp = ion.calc_mag_form_fac(qrange=[0, 2])[0], ion.calc_mag_form_fac(qrange=[0, 2])[1:]\n del _temp\n assert (abs(np.sum(formfac) - 74.155233575216599) < 1e-12)", "def calc(self):\n\n # the following if query ensures that volume- and interaction-terms\n # are only calculated if tau > 0.\n # (to avoid nan-values from invalid function-evaluations)\n\n if self.V.tau.shape == (1,):\n Isurf = self.surface()\n # differentiation for non-existing canopy, as otherwise NAN values\n if self.V.tau > 0.:\n Ivol = self.volume()\n if self.int_Q is True:\n Iint = self.interaction()\n else:\n Iint = np.array([0.])\n else:\n Ivol = np.array([0.])\n Iint = np.array([0.])\n else:\n # calculate surface-term (valid for any tau-value)\n Isurf = self.surface()\n\n # store initial parameter-values\n old_t_0 = self.t_0\n old_p_0 = self.p_0\n old_t_ex = self.t_ex\n old_p_ex = self.p_ex\n\n old_tau = self.V._get_tau()\n old_omega = self.V._get_omega()\n old_NN = self.SRF._get_NormBRDF()\n\n # set mask for tau > 0.\n mask = old_tau > 0.\n valid_index = np.where(mask)\n inval_index = np.where(~mask)\n\n # set parameter-values to valid values for calculation\n self.t_0 = old_t_0[valid_index[0]]\n self.p_0 = old_p_0[valid_index[0]]\n self.t_ex = old_t_ex[valid_index[0]]\n self.p_ex = old_p_ex[valid_index[0]]\n\n # squeezing the arrays is necessary since the setter-function for\n # tau, omega and NormBRDF automatically adds an axis to the arrays!\n self.V.tau = np.squeeze(old_tau[valid_index[0]])\n if np.array(self.V.omega).size != 1:\n self.V.omega = np.squeeze(old_omega[valid_index[0]])\n if np.array(self.SRF.NormBRDF).size != 1:\n self.SRF.NormBRDF = np.squeeze(old_NN[valid_index[0]])\n\n # calculate volume and interaction term where tau-values are valid\n _Ivol = self.volume()\n if self.int_Q is True:\n _Iint = self.interaction()\n else:\n _Iint = np.full_like(self.t_0, 0.)\n\n # reset parameter values to old values\n self.t_0 = old_t_0\n self.p_0 = old_p_0\n self.t_ex = old_t_ex\n self.p_ex = old_p_ex\n\n # squeezing the arrays is necessary since the setter-function for\n # tau, omega and NormBRDF automatically add an axis to the arrays!\n self.V.tau = np.squeeze(old_tau)\n self.V.omega = np.squeeze(old_omega)\n self.SRF.NormBRDF = np.squeeze(old_NN)\n\n # combine calculated volume-contributions for valid tau-values\n # with zero-arrays for invalid tau-values\n Ivol = np.ones_like(self.t_0)\n Ivol[valid_index[0]] = _Ivol\n Ivol[inval_index[0]] = np.ones_like(Ivol[inval_index[0]]) * 0.\n\n # combine calculated interaction-contributions for valid tau-values\n # with zero-arrays for invalid tau-values\n if self.int_Q is True:\n Iint = np.ones_like(self.t_0)\n Iint[valid_index[0]] = _Iint\n Iint[inval_index[0]] = np.ones_like(Iint[inval_index[0]]) * 0.\n else:\n Iint = np.full_like(self.t_0, 0.)\n\n return Isurf + Ivol + Iint, Isurf, Ivol, Iint", "def magnitude(v):\n return math.sqrt(sum_of_squares(v))", "def testGetVegaMag(self):\n std = MKIDStd.MKIDStd()\n vegaFlux = std.load(\"vega\")\n bd17Flux = std.load(\"bd17\")\n for filter in ['U','B','V','R','I']:\n aFilter = std.filters[filter] \n mag = std.getVegaMag(vegaFlux, aFilter)\n self.assertAlmostEqual(0.03, mag, msg=\"filter=%s mag=%f\"%(filter,mag))", "def get_axis_ratio(self, vel_disp):\n\t\tscale = self.a*vel_disp + self.b\n\t\tq = 0.0\n\t\twhile q < self.lower:\n\t\t\tq = 1.0 - np.random.rayleigh(scale, size=None)\n\t\treturn q", "def test_spectral_density_vega_bp(filename, fluxd, to, tol):\n fn = get_pkg_data_filename(os.path.join(\n '..', '..', 'photometry', 'data', filename))\n bp = synphot.SpectralElement.from_file(fn)\n\n v = fluxd.to(to.unit, spectral_density_vega(bp))\n assert v.unit == to.unit\n if to.unit in (VEGAmag, JMmag):\n assert np.isclose(v.value, to.value, atol=tol)\n else:\n assert np.isclose(v.value, to.value, rtol=tol)", "def fraction_of_infectious_virus(self) -> _VectorisedFloat:\n return self.virus.viable_to_RNA_ratio * (1 - self.host_immunity)", "def magnitude(self):\n return self.real ** 2 + numpy.inner(self.pure, self.pure)", "def test_mag_form_fac():\n ion = MagneticFormFactor('Fe')\n formfac, _temp = ion.calc_mag_form_fac(q=1.)[0], ion.calc_mag_form_fac(q=1.)[1:]\n del _temp\n assert (abs(formfac - 0.932565) < 1e-6)", "def calculate_rf_size(rf_size, downsample):\n h = 61 # 24\" monitor\n d = 10 # 10cm from the right eye\n r = 1080 / downsample # Vertical resolution\n d_px = np.degrees(math.atan2(h / 2, d)) / (r / 2)\n return rf_size * d_px", "def calculate_ft(self):\n \n # Create a function which is able to evaluate B**2\n ffunc = scipy.interpolate.interp1d(self.psigrid, self.e.getF()[self.tind])\n def b2_func(R, Z, psi):\n bt = ffunc(psi)/R\n br = -self.psifunc.ev(R, Z, dy=1)/R\n bz = self.psifunc.ev(R, Z, dx=1)/R\n \n return bt**2 + br**2 + bz**2\n \n\n def b_bmax2(R,Z,psi):\n b2 = b2_func(R,Z,psi)\n return b2 / np.max(b2)\n \n def b_bmax(R,Z,psi):\n return np.sqrt(b_bmax2(R,Z,psi))\n \n # Evaluate the flux-surface averaged h^2 and h, as required\n fsa_h2 = self.fs_average(b_bmax2)\n fsa_h = self.fs_average(b_bmax)\n \n # This is the function which gets flux-surface averaged in equation (7)\n def ftl_func(R,Z,psi):\n h = b_bmax(R,Z,psi)\n h2 = b_bmax2(R,Z,psi)\n \n return (1 - (np.sqrt(1 - h) * (1 + 0.5 * h)))/h2\n \n \n # Equation 6, 7 in Lin-Liu\n fs_ftu = 1 - fsa_h2 / fsa_h**2 * (1 - np.sqrt(1 - fsa_h) * (1 + 0.5 * fsa_h))\n fs_ftl = 1 - fsa_h2 * self.fs_average(ftl_func)\n # Equation 18, 19 \n om = 0.75\n self.fs_ft = om*fs_ftu + (1-om)*fs_ftl", "def EDD_VDIF_Frame_Size(sampling_rate):\n bw_GHz = sampling_rate / 2E9\n\n rate_Msps = bw_GHz*2000\n rate_Mbps = 2*rate_Msps # % 2-bit\n log.debug('Bandwidth {:.3f} GHz --> {:.3f} Msps --> {:.3f} Mbit/sec'.format(bw_GHz, rate_Msps, rate_Mbps))\n\n vdifGranularity = 8 # % VDIF specs, granularity of payload size\n\n num = np.arange(1024, 9001, vdifGranularity) * 8*1000 # % bits per frame, various payload sizes\n den = rate_Mbps #; % datarate bits per sec\n fitting_payloads = num[num % den == 0]/(8*1000); # % corresponding frame payloads in byte\n\n rate_Bps = rate_Mbps*1e6/8 #;\n final_payloads = fitting_payloads[rate_Bps % fitting_payloads == 0] #;\n final_fpss = rate_Bps / final_payloads;\n final_framens = final_payloads*4*1e3 / rate_Msps;\n\n return final_payloads, final_fpss, final_framens", "def getMagnitudes(self):\n return self._bmag, self._vmag, self._jmag, self._hmag, self._kmag", "def width_v_v_v(model: SingleRhNeutrinoModel, genv: Generation):\n mx = model.mx\n u = 0.5 * np.tan(2 * model.theta)\n w = parameters.GF**2 * mx**5 / (768 * np.pi**3) * u**2\n pre = 2 if genv == model.gen else 1.0\n return pre * w", "def Sizes(self, with_sign=False):\n\n self.__do_essential_memebers_exist__()\n\n try:\n from Florence import DisplacementFormulation\n except ImportError:\n raise ValueError(\"This functionality requires Florence's support\")\n\n if self.element_type != \"line\":\n # FOR LINE ELEMENTS THIS APPROACH DOES NOT WORK AS JACOBIAN IS NOT WELL DEFINED\n formulation = DisplacementFormulation(self)\n sizes = np.zeros(self.nelem)\n if not with_sign:\n for elem in range(self.nelem):\n LagrangeElemCoords = self.points[self.elements[elem,:],:]\n sizes[elem] = formulation.GetVolume(formulation.function_spaces[0],\n LagrangeElemCoords, LagrangeElemCoords, False, elem=elem)\n else:\n for elem in range(self.nelem):\n LagrangeElemCoords = self.points[self.elements[elem,:],:]\n sizes[elem] = formulation.GetSignedVolume(formulation.function_spaces[0],\n LagrangeElemCoords, LagrangeElemCoords, False, elem=elem)\n return sizes\n\n else:\n warn(\"Sizes of line elements could be incorrect if the mesh is curvilinear\")\n return self.Lengths()", "def _get_rupture_dimensions(src, mag, nodal_plane):\n area = src.magnitude_scaling_relationship.get_median_area(\n mag, nodal_plane.rake)\n rup_length = math.sqrt(area * src.rupture_aspect_ratio)\n rup_width = area / rup_length\n seismogenic_layer_width = (src.lower_seismogenic_depth\n - src.upper_seismogenic_depth)\n max_width = (seismogenic_layer_width\n / math.sin(math.radians(nodal_plane.dip)))\n if rup_width > max_width:\n rup_width = max_width\n rup_length = area / rup_width\n return rup_length, rup_width", "def vFrmE(E):\n Ej=E*1.6021*10**-22\n m=1.674929*10**-27\n v=np.sqrt((2.*Ej)/m)\n return(v)", "def compute_volume(self) -> float:\n return (\n (1 if self.clockwise else -1)\n * np.sum(\n np.linalg.det(\n np.dstack(\n (\n self.vertices[self._faces[:, 0]],\n self.vertices[self._faces[:, 1]],\n self.vertices[self._faces[:, 2]],\n )\n )\n )\n )\n / 6\n )", "def ret_vol_ratio(self) -> float:\n return self.geo_ret / self.vol", "def read_voltage_stepsize(self):\n function_string = 'DELTAV' + self.output + '?'\n return self.scpi_comm(function_string)", "def magnetisation(field):\n norm_field = df.Field(field.mesh, dim=1, value=(field.norm.array != 0))\n volume = df.integral(norm_field * df.dV, direction='xyz')\n return df.integral(field * df.dV / volume, direction='xyz')", "def test_magnitude(self):\n\n # test small magnitudes with regular unit vectors\n u1 = (1,)\n u2 = (0, 1/2, 0, 1/2, 1/2, 0, 0, 0, 1/2)\n u3 = (12/13, 4/13, 3/13)\n for k in (0, -1, 1):\n s = space(fake_curvature=k)\n for d in (0, 1, 1/3, 3/2):\n for n in (u1, u2, u3):\n p = s.make_point(n, d)\n self.assertTrue(isclose(\n abs(p),\n d\n ))\n self.assertTrue(isclose(\n s.distance_between(p, s.make_origin(len(n))),\n d\n ))\n\n # test direction vector normalization\n v1 = (73733,)\n v2 = tuple(range(30))\n v3 = (-11, 1, 0, -1, 11, 1/11)\n for k in (0, -1, 1):\n s = space(fake_curvature=k)\n for d in (0, 1, 1/3, 3/2):\n for n in (v1, v2, v3):\n p = s.make_point(n, d, normalize=True)\n self.assertTrue(isclose(\n abs(p),\n d\n ))\n self.assertTrue(isclose(\n s.distance_between(p, s.make_origin(len(n))),\n d\n ))\n \n # test elliptic space looping property\n pi_ref = 3.14159265358979323846264338327933\n for r in (1, 2, 3, 1/3):\n k = 1/r\n s = space(fake_curvature=k)\n for j, d in ((2, pi_ref - 2), (pi_ref, 0)):\n j *= r\n d *= r\n for n in (u1, u2, u3):\n p = s.make_point(n, j)\n self.assertTrue(isclose(\n abs(p),\n d,\n abs_tol = 1e-15\n ))\n self.assertTrue(isclose(\n s.distance_between(p, s.make_origin(len(n))),\n d,\n abs_tol = 1e-15\n ))", "def effectivedb_size_percentage(self):\n if self._vsanobj.id is None:\n raise VsanNotPresent(\"Vsan \" + str(self._vsanobj._id) + \" is not present on the switch.\")\n out = self.__show_zone_status()\n retout = out.get(get_key(zonekeys.EFFDB_PER, self._SW_VER), None)\n if retout is not None:\n return str(retout) + \"%\"\n return None", "def effectivedb_size(self):\n if self._vsanobj.id is None:\n raise VsanNotPresent(\"Vsan \" + str(self._vsanobj._id) + \" is not present on the switch.\")\n out = self.__show_zone_status()\n retout = out.get(get_key(zonekeys.EFFDB_SIZE, self._SW_VER), None)\n if retout is not None:\n return int(retout)\n return None", "def v_multiplier(self):\n return (4./3)*np.pi*(self.bins[:, 1]/2)**3", "def normalized_effect_size(self):\n mus = self.mu + self.absolute_effects\n pop_mu = (mus * self.test_splits).sum()\n sigma2_m = (self.test_splits * np.square(mus - pop_mu)).sum()\n f = np.sqrt(sigma2_m) / self.sigma\n return f", "def velocity_field(xt,yt,x0,y0,Vinf,dia,rot,chord,B,param=None,veltype='all',integration='simp',m=220,n=200):\n rad = dia/2.\n tsr = rad*fabs(rot)/Vinf\n solidity = (chord*B)/rad\n\n # Translating the turbine position\n x0t = x0 - xt\n y0t = y0 - yt\n\n coef0,coef1,coef2,coef3,coef4,coef5,coef6,coef7,coef8,coef9 = coef_val()\n\n # Calculating EMG distribution parameters (based on polynomial surface fitting)\n if param is None:\n loc1 = _parameterval(tsr,solidity,coef0)\n loc2 = _parameterval(tsr,solidity,coef1)\n loc3 = _parameterval(tsr,solidity,coef2)\n spr1 = _parameterval(tsr,solidity,coef3)\n spr2 = _parameterval(tsr,solidity,coef4)\n skw1 = _parameterval(tsr,solidity,coef5)\n skw2 = _parameterval(tsr,solidity,coef6)\n scl1 = _parameterval(tsr,solidity,coef7)\n scl2 = _parameterval(tsr,solidity,coef8)\n scl3 = _parameterval(tsr,solidity,coef9)\n\n else:\n # Reading in EMG distribution parameters\n loc1 = param[0]\n loc2 = param[1]\n loc3 = param[2]\n spr1 = param[3]\n spr2 = param[4]\n skw1 = param[5]\n skw2 = param[6]\n scl1 = param[7]\n scl2 = param[8]\n scl3 = param[9]\n\n ###################################\n if veltype == 'vort':\n # VORTICITY CALCULATION (NO INTEGRATION)\n if x0t < 0.:\n vel = 0.\n else:\n vel = _vawtwake.vorticitystrength(x0t,y0t,dia,loc1,loc2,loc3,spr1,spr2,skw1,skw2,scl1,scl2,scl3)/rot\n ###################################\n else:\n # Integration of the vorticity profile to calculate velocity\n if integration == 'simp':\n # SIMPSON'S RULE INTEGRATION (must use polynomial surface coefficients from VAWTPolySurfaceCoef.csv)\n inte = 1 # Simpson's Rule\n # inte = 2 # Trapezoidal Rule (optional ability of the code-- faster but less accurate)\n\n if param is not None:\n print \"**** Using polynomial surface coefficients from VAWTPolySurfaceCoef.csv for Simpson's rule integration ****\"\n\n vel_xs,vel_ys = _vawtwake.vel_field(xt,yt,x0,y0,dia,rot,chord,B,Vinf,coef0,coef1,coef2,coef3,coef4,coef5,coef6,coef7,coef8,coef9,m,n,inte)\n\n if veltype == 'all':\n vel = sqrt((vel_xs*Vinf + Vinf)**2 + (vel_ys*Vinf)**2)/Vinf\n elif veltype == 'x':\n vel = (vel_xs*Vinf + Vinf)/Vinf\n elif veltype == 'y':\n vel = vel_ys\n elif veltype == 'ind':\n vel = np.array([vel_xs,vel_ys])\n ###################################\n elif integration == 'gskr':\n # 21-POINT GAUSS-KRONROD RULE QUADRATURE INTEGRATION\n xbound = (scl3+5.)*dia\n argval = (x0t,y0t,dia,loc1,loc2,loc3,spr1,spr2,skw1,skw2,scl1,scl2,scl3)\n if veltype == 'all' or veltype == 'x' or veltype == 'ind':\n vel_x = _dblquad(_vawtwake.integrandx,0.,xbound,lambda x: -1.*dia,lambda x: 1.*dia,args=argval)\n vel_xs = (vel_x[0]*fabs(rot))/(2.*pi)\n if veltype == 'all' or veltype == 'y' or veltype == 'ind':\n vel_y = _dblquad(_vawtwake.integrandy,0.,xbound,lambda x: -1.*dia,lambda x: 1.*dia,args=argval)\n vel_ys = (vel_y[0]*fabs(rot))/(2.*pi)\n\n if veltype == 'all':\n vel = sqrt((vel_xs + Vinf)**2 + (vel_ys)**2)/Vinf\n elif veltype == 'x':\n vel = (vel_xs + Vinf)/Vinf\n elif veltype == 'y':\n vel = vel_ys/Vinf\n elif veltype == 'ind':\n vel = np.array([vel_xs,vel_ys])/Vinf\n ###################################\n\n return vel", "def vector_length(self, x: float, y: float, z: float) -> float:\n A = 2.0 * (x * y * self.aga + x * z * self.bbe + y * z * self.cal)\n return sqrt(x ** 2 * self.asq + y ** 2 * self.bsq + z ** 2 * self.csq + A)", "def calc_mag(self):\n mag = np.sum(self.box)\n return mag", "def _calc_energy( self, V_a, eos_d ):\n pass", "def magnitude(frame):\n sobelx = lambda im: cv2.Sobel(im, cv2.CV_64F, 1, 0, ksize=3)\n sobely = lambda im: cv2.Sobel(im, cv2.CV_64F, 0, 1, ksize=3)\n dxabs = cv2.convertScaleAbs(sobelx(frame))\n dyabs = cv2.convertScaleAbs(sobely(frame))\n\n return cv2.addWeighted(dxabs, 0.5, dyabs, 0.5, 0)", "def magnitude(p):\n return sqrt((p**2).sum())", "def mag_length(B, q=q_e):\n\n return np.sqrt(hbar/(q * B))", "def estimate_focal_length(self):\n fl = (self.fiber_diameter / 2) / np.tan(np.deg2rad(self.fov / 2))\n\n return fl", "def calc_eta_FC(Q_load_W, Q_design_W, phi_threshold, approach_call):\n phi = 0.0\n\n ## Approach A - NREL Approach\n if approach_call == \"A\":\n\n phi = float(Q_load_W) / float(Q_design_W)\n eta_max = 0.425 # from energy.gov\n\n if phi >= phi_threshold: # from NREL-Shape\n eta_el = eta_max - ((1 / 6.0 * eta_max) / (1.0 - phi_threshold)) * abs(phi - phi_threshold)\n\n if phi < phi_threshold:\n if phi <= 118 / 520.0 * phi_threshold:\n eta_el = eta_max * 2 / 3 * (phi / (phi_threshold * 118 / 520.0))\n\n if phi < 0.5 * phi_threshold and phi >= 118 / 520.0 * phi_threshold:\n eta_el = eta_max * 2 / 3.0 + \\\n eta_max * 0.25 * (phi - phi_threshold * 118 / 520.0) / (phi_threshold * (0.5 - 118 / 520.0))\n\n if phi > 0.5 * phi_threshold and phi < phi_threshold:\n eta_el = eta_max * (2 / 3.0 + 0.25) + \\\n 1 / 12.0 * eta_max * (phi - phi_threshold * 0.5) / (phi_threshold * (1 - 0.5))\n\n eta_therm_max = 0.45 # constant, after energy.gov\n\n if phi < phi_threshold:\n eta_therm = 0.5 * eta_therm_max * (phi / phi_threshold)\n\n else:\n eta_therm = 0.5 * eta_therm_max * (1 + eta_therm_max * ((phi - phi_threshold) / (1 - phi_threshold)))\n\n ## Approach B - Empiric Approach\n if approach_call == \"B\":\n\n if Q_design_W > 0:\n phi = float(Q_load_W) / float(Q_design_W)\n\n else:\n phi = 0\n\n eta_el_max = 0.39\n eta_therm_max = 0.58 # * 1.11 as this source gives eff. of HHV\n eta_el_score = -0.220 + 5.277 * phi - 9.127 * phi ** 2 + 7.172 * phi ** 3 - 2.103 * phi ** 4\n eta_therm_score = 0.9 - 0.07 * phi + 0.17 * phi ** 2\n\n eta_el = eta_el_max * eta_el_score\n eta_therm = eta_therm_max * eta_therm_score\n\n if phi < 0.2:\n eta_el = 0\n\n return eta_el, eta_therm", "def element_size(self):\n vecs = (\n self.nodes[self.elements[:, :4], :][:, 1:, :]\n - self.nodes[self.elements[:, :4], :][:, 0, None, :]\n )\n return np.abs(np.linalg.det(vecs)) / 6", "def betP(self, element):\n if element.is_empty():\n return 0\n\n if self.is_empty():\n return 0\n\n if not element.is_compatible(next(iter(self.focals))):\n return 0\n\n result = 0\n for focal, value in self.items():\n if not focal.is_empty():\n result += value * focal.conjunction_unsafe(element).cardinal / focal.cardinal\n return round(result, 6)", "def test_velocity(self):\n sol = Mader(p_cj=3.0e11, d_cj=8.0e5, gamma=3.0, u_piston=0.0)\n # r must contain 2 elements, otherwise the density and pressure are nan\n r = np.array([0.7, 0.8])\n t = 6.25e-6\n solrt = sol(r, t)\n np.testing.assert_allclose(solrt.velocity[0], 144000.0)", "def E2V(E):\n# for energy in mev returns velocity in m/s\n return sqrt(E/5.227e-6)", "def magnitude(self):\n return sqrt(self & self)", "def effective_width(self, intrinsic_width, dm, bandwidth, freq):\n a = sqrt(pow(intrinsic_width, 2) + pow((8.3e6 * fabs(dm) * (bandwidth / pow(freq, 3))), 2))\n return a", "def compute_Flocal(config):\n \n vlow = config['vlow']\n vhigh = config['vhigh']\n vdef = config['vdef']\n lo_restfreq = config[\"DOPPLERTRACKFREQ\"]\n\n velocity = (vlow + vhigh) * 0.5\n vd = Vdef()\n vd.compute_local_frame_with_vdef(vdef, velocity,\n lo_restfreq, velocity)\n # this better be the same as vlow since i sent in the avg\n cur_vhigh = vd.get_vhigh()\n cur_vlow = vd.get_vlow()\n if cur_vhigh != cur_vlow:\n \"PANIC: How can the avg velocities differ!!!!!\"\n \n return cur_vhigh", "def getRatio(probe_num, position_vector, shot_range, dir, day ='050119r'):\n ratio_x = 0\n ratio_y = 0\n ratio_z = 0\n # helm_B = [0,0,0]\n divideby = 0\n for shot in range(shot_range[0], shot_range[1]+1):\n print( 'On shot ', day+str(shot), ' for probe ',probe_num)\n x,y,z, currmax,helmB_new = probe_calib(day+str(shot), probe_num, position_vector,dir)\n ratio_x = ratio_x + x\n ratio_y = ratio_y + y\n ratio_z = ratio_z + z\n # helm_B = [helm_B[i] + helmB_new[i] for i in len(helmB)]\n divideby = divideby + 1 #averaging over the number of shots\n ratio_Bx = ratio_x/divideby\n ratio_By = ratio_y/divideby\n ratio_Bz = ratio_z/divideby\n # helmB = [helm_B]/divideby\n # print ratio_Bx, ratio_By, ratio_Bz, helmB\n # print(\"ratio_Bx %f, ratio_By %f, ratio_Bz %f, helmB%s\"%(ratio_Bx, ratio_By, ratio_Bz, helmB))\n Bx_sqr =ratio_x**2\n By_sqr =ratio_y**2\n Bz_sqr =ratio_z**2\n B = Bx_sqr + By_sqr+ Bz_sqr\n norm_factor = np.sqrt(B)\n ratio_Bx, ratio_By, ratio_Bz = [ratio_Bx, ratio_By, ratio_Bz]/norm_factor\n\n return (ratio_Bx, ratio_By, ratio_Bz, norm_factor)", "def fD(self, vpd):\n\t if vpd < 0.1:\n\t return 1.\n\t else:\n\t return 3/13./sqrt(vpd/1000.)", "def mag(field):\n return np.sqrt(np.sum(field**2, axis=0, keepdims=True))", "def volume_unit_ball(d_dimensions: int, norm=2) -> float:\n\n # get ball\n if norm == 0:\n b = float(\"inf\")\n elif norm == 1:\n b = 1.0\n elif norm == 2:\n b = 2.0\n else:\n raise ValueError(f\"Unrecognized norm: {norm}\")\n\n return (np.pi ** (0.5 * d_dimensions)) ** d_dimensions / gamma(b / d_dimensions + 1)", "def testscfenergydim(self):\r\n count_scfenergies = self.data.scfenergies.shape[0] - self.extrascfs\r\n count_atomcoords = self.data.atomcoords.shape[0] - self.extracoords\r\n assert count_scfenergies == count_atomcoords", "def getFluxSize(self,flux=0.5,frac=True,mode='radial',cen=(0,0),v0=1,\n minfunc='fmin',intkwargs=None,**kwargs):\n import scipy.optimize\n\n fmin = getattr(scipy.optimize,minfunc)\n\n if intkwargs is None:\n intkwargs = {}\n\n if mode == 'radial':\n if cen != (0,0):\n raise NotImplementedError('radial profiles must be centered on (0,0)')\n if frac:\n total = self.integrateCircular(np.inf,**intkwargs)\n flux = flux * total\n def f(r):\n intres = self.integrateCircular(r,**intkwargs)-flux\n return intres*intres\n\n if np.isscalar(v0):\n v0 = (v0,)\n elif mode == 'square':\n x0,y0 = cen\n if frac:\n total = self.integrateCartesian(-np.inf,np.inf,-np.inf,np.inf,**intkwargs)\n flux = flux * total\n def f(l):\n intres = self.integrateCartesian(x0-l,x0+l,x0-l,x0+l,**intkwargs)-flux\n return intres*intres\n\n if np.isscalar(v0):\n v0 = (v0,)\n elif mode == 'rectangular':\n x0,y0 = cen\n if frac:\n total = self.integrateCartesian(-np.inf,np.inf,-np.inf,np.inf,**intkwargs)\n flux = flux * total\n def f(ls):\n lx,ly = ls\n intres = self.integrateCartesian(x0-lx,x0+lx,y0-ly,y0+ly,**intkwargs)-flux\n return intres*intres\n\n if np.isscalar(v0):\n v0 = (v0,v0)\n else:\n raise ValueError('unrecognized mode')\n\n if minfunc!='brent':\n res = fmin(f,v0,full_output=1,**kwargs)\n else:\n res = fmin(f,full_output=1,**kwargs)\n self.lastfluxsize = res\n val = res[0]\n\n return val.ravel()[0] if val.size == 1 else tuple(val)", "def V_magEarth(alpha,a_p,d):\n V = 5.*np.log10(a_p*d) - 3.99 - 1.060e-3*alpha + 2.054e-4*alpha**2.\n return V", "def calculations():\r\n\t\r\n\tpayload, avionics, booster = weight_input()\r\n\r\n\tdrogue_size, drogue_force = drogue_calc()\r\n\tmain_size, main_force = main_calc(avionics, booster, drogue_force) #total mass, payload detaches\r\n\r\n\tprint(\"Drogue is diameter is \" + str(drogue_size) + \" inches\")\r\n\tprint(\"Main is diameter is \" + str(main_size) + \" inches\")", "def test_jam_axi_rms():\n np.random.seed(123)\n xbin, ybin = np.random.uniform(low=[-55, -40], high=[55, 40], size=[1000, 2]).T\n\n inc = 60. # Assumed galaxy inclination\n r = np.sqrt(xbin**2 + (ybin/np.cos(np.radians(inc)))**2) # Radius in the plane of the disk\n a = 40 # Scale length in arcsec\n vr = 2000*np.sqrt(r)/(r+a) # Assumed velocity profile\n vel = vr * np.sin(np.radians(inc))*xbin/r # Projected velocity field\n sig = 8700/(r+a) # Assumed velocity dispersion profile\n rms = np.sqrt(vel**2 + sig**2) # Vrms field in km/s\n\n surf = np.array([39483., 37158., 30646., 17759., 5955.1, 1203.5, 174.36, 21.105, 2.3599, 0.25493])\n sigma = np.array([0.153, 0.515, 1.58, 4.22, 10, 22.4, 48.8, 105, 227, 525])\n qObs = np.full_like(sigma, 0.57)\n\n distance = 16.5 # Assume Virgo distance in Mpc (Mei et al. 2007)\n mbh = 1e8 # Black hole mass in solar masses\n beta = np.full_like(surf, 0.3)\n\n surf_lum = surf # Assume self-consistency\n sigma_lum = sigma\n qobs_lum = qObs\n surf_pot = surf\n sigma_pot = sigma\n qobs_pot = qObs\n\n sigmapsf = 0.6\n pixsize = 0.8\n goodbins = r > 10 # Arbitrarily exclude the center to illustrate how to use goodbins\n\n # The model is similar but not identical to the adopted kinematics!\n rmsModel, ml, chi2, flux = jam_axi_rms(\n surf_lum, sigma_lum, qobs_lum, surf_pot, sigma_pot, qobs_pot,\n inc, mbh, distance, xbin, ybin, plot=True, rms=rms, sigmapsf=sigmapsf,\n beta=beta, pixsize=pixsize, tensor='zz', goodbins=goodbins)\n plt.pause(0.01)", "def calc_wvf(pedestrian, key, face, mesh_area,radius): \n normal = envuo.py3dmodel.construct.make_vector((0,0,0),envuo.py3dmodel.calculate.face_normal(face))\n surf2ped = envuo.py3dmodel.construct.make_vector(key,pedestrian)\n sa_ped = 4.0*np.pi*radius**2\n theta = normal.Angle(surf2ped)\n h = surf2ped.Magnitude()/radius \n phi = np.arctan(1/h)\n threshold = np.pi/2.0 - phi\n\n if abs(h*np.cos(theta)) > 1:\n F = abs(np.cos(theta))/h**2; \n else:\n x = np.sqrt(h**2-1)/np.tan(theta) #np.sqrt(h**2-1)\n y = np.sqrt(1-x**2) #-x/np.tan(theta) #\n F = (np.pi - abs(np.cos(x)) - x*y*np.tan(theta)**2)*abs(np.cos(theta))/(np.pi*h**2) + np.arctan(y*abs(np.cos(theta))/x)/np.pi; \n print pedestrian,' passes threshold'\n return mesh_area*F/sa_ped", "def __q2v_ff(self, vm, beta, q):\n return float((vm * beta - np.sqrt(np.power(vm * beta, 2) - 4 * vm * beta * q)) / (2 * vm))", "def ve(self) -> float:\n a = np.sum(np.abs(self.predicted - self.true))\n b = np.sum(self.true)\n return float(1 - (a / b))", "def fraction_of_infectious_virus(self) -> _VectorisedFloat:\n return 1.", "def _pred_mag(self,params: ndarray, times: ndarray) -> ndarray:\n tE = np.exp(params[0])\n A0 = np.exp(params[1])\n deltaT = np.exp(params[2])\n fbl = params[3]\n mb = params[4]\n\n u0 = np.sqrt((2*A0/np.sqrt(A0**2-1))-2)\n u = np.sqrt(u0**2+((times-deltaT-self.alert_time)/tE)**2)\n Amp = (u**2+2) / (u*np.sqrt(u**2+4))\n pred_mag = mb - 2.5*np.log10(fbl*(Amp-1)+1)\n\n return pred_mag", "def flux_ratio(self):\n return self._flux_ratio", "def _feet_2_meter(item_in_feet):\n # vfunc_model = np.vectorize(spherical)\n try:\n return item_in_feet / 3.28084\n except TypeError:\n return float(item_in_feet) / 3.28084", "def calc_vol_vfrac(self, r_cool, PD, c):\n # core and reflector volume required for depletion calculation\n self.core_vol = self.r**2 * math.pi * self.z\n self.refl_vol = ((self.r + self.refl_t)**2 - self.r**2)*math.pi * self.z\n \n pitch = 2*r_cool*PD\n # calculate 'volumes' for fixed length\n v_cool = (r_cool ** 2 * math.pi)\n # clad volume fraction\n v_clad = ((r_cool + c)**2 - r_cool**2)*math.pi\n # fuel volume fraction\n v_cermet = (math.sqrt(3)*pitch**2 / 2.0) - (r_cool + c) ** 2 * math.pi \n\n self.cell_vol = v_cool + v_clad + v_cermet\n # calculate normalized vfracs from total cell volume\n self.vfrac_cool = v_cool / self.cell_vol\n self.vfrac_clad = v_clad / self.cell_vol\n self.vfrac_cermet = v_cermet / self.cell_vol", "def aperture_phot(self,data,x,y,v):\n r = np.sqrt((x-self.avg_map_fits['Values'][1])**2 + (y-self.avg_map_fits['Values'][3])**2)\n \n inner = (r < 8./60.) & np.isfinite(data) \n outer = (r > 8.5/60.) & (r < 12./60.) & np.isfinite(data)\n\n annu = np.nanmedian(data[outer])\n annu_rms = np.nanstd(data[outer])\n flux = np.sum(data[inner]) - annu*np.sum(inner)\n\n c = 3e8\n kb=1.38e-23\n beam = (1./60.*np.pi/180.)**2\n factor = 2*kb*(v*1e9/c)**2 * beam * 1e26\n return flux*factor, annu_rms*np.sqrt(np.sum(inner))*factor", "def Q(self):\n return np.array(list(self.center_frequencies)) \\\n / np.array(list(self.bandwidths))", "def magnitude(self): # @todo @caution check: something wrong?\n\n return (math.sqrt(reduce(lambda x, y: x+y,\n [x**2 for x in self.vector])))", "def _calc_bvf(self, points, bead_radius, shell_radius):\n dists = distance.cdist(points, points, 'euclidean')\n\n '''\n Check for intersection. If three spheres intersect we cannot\n (easily) accurately determine the shared volume and therefore\n cannot calculate the bead volume fraction.\n '''\n for i, point_distances in enumerate(dists):\n overlaps = np.where(np.logical_and(np.less(point_distances,\n bead_radius * 2), np.not_equal(point_distances, 0)))[0]\n for combo in itertools.combinations(overlaps, 2):\n positions = [points[idx] for idx in combo + (i,)]\n if self._intersected(positions, bead_radius):\n return 1.0\n\n dists = dists[np.nonzero(dists)]\n\n r_min = shell_radius - bead_radius\n r_max = shell_radius + bead_radius\n vol_shell = (4/3) * np.pi * (r_max**3 - r_min**3)\n vol_beads = len(points) * (4/3) * np.pi * bead_radius**3\n '''\n The total volume taken up by beads is the volume of all of the beads\n minus the volume of the bead intersections. Since each bead-bead\n distance will be present twice within `dists` we divide by two here\n to compensate.\n '''\n vol_overlap = np.sum([self._overlap_volume(bead_radius, dist)\n for dist in dists]) / 2\n\n return (vol_beads - vol_overlap) / vol_shell", "def magnitude_of_vector(v):\n return math.sqrt(sum_of_squares(v))", "def velocity_field(xt,yt,x0,y0,velf,dia,tsr,solidity):\n rad = dia/2.\n rot = tsr*velf/rad\n\n # Calculating EMG distribution parameters\n loc,spr,skw,scl = vorticity(tsr,solidity)\n \n # Translating the turbine position\n x0t = x0 - xt\n y0t = y0 - yt\n \n # Integration of the vorticity profile using Fortran code (vorticity.f90; _vortrun.so)\n vel_vs = dblquad(_vortmodel.integrand,0.,35.*dia,lambda x: -4.*dia,lambda x: 4.*dia, args=(x0t,y0t,dia,loc[0],loc[1],loc[2],spr[0],spr[1],skw[0],skw[1],scl[0],scl[1],scl[2]))\n \n # Calculating velocity deficit\n vel = (vel_vs[0]*(rot))/(2.*pi)\n vel = (vel + velf)/velf # normalization of velocity\n \n return vel", "def _Fqt_comp(vh,q):\n r_scale = 6.45/60\n edges,count,x_lim = vh\n # make sure that vh is normalized\n count = count/np.sum(count)\n\n return np.sum(count * np.exp(1j*q*edges*r_scale))", "def computeMaggiesErr(instFluxErr, instFlux, calibrationErr, calibration, flux):\n return flux*np.hypot(instFluxErr/instFlux, calibrationErr/calibration)", "def finite_size_scale(standard, ssize, primordial, fsize, psize=[1,1,1],writefile=True):\n \n # Check if the input sizes work out with the desired final size\n padding = [0,0,0]\n srcon = [0,0,0]\n for i in range(3):\n diff = fsize[i] - ssize[i]\n if diff < 0:\n raise RuntimeError('Desired final size of the structure must be larger than existing defect structure size. Defect Size = '+repr(ssize)+' Final Size = '+repr(fsize))\n elif diff >= 0:\n if math.fmod(diff,psize[i]):\n raise RuntimeError('Primordial structure and defect structure sizes cannot be used to form desired final size. Reduce size of primordial structure. Defect Size = '+repr(ssize)+' Final Size = '+repr(fsize)+' Primordial size = '+repr(psize))\n else:\n padding[i] = diff/psize[i]\n \n # Load the defect structure and primordial structure\n defst = read(standard)\n pst = read(primordial)\n \n # Pad the structure\n positions = pst.get_positions()\n syms = pst.get_chemical_symbols()\n final = defst.copy()\n lv = [one/ssize for one in defst.get_cell()]\n vect = []\n for m0 in range(padding[0]):\n for m1 in numpy.arange(0,fsize[1],psize[1]):\n for m2 in numpy.arange(0,fsize[2],psize[2]):\n vect.append([ssize[0]+m0*psize[0],m1,m2])\n\n for m1 in range(padding[1]):\n for m0 in numpy.arange(0,ssize[0],psize[0]):\n for m2 in numpy.arange(0,fsize[2],psize[2]):\n vect.append([m0,ssize[1]+m1*psize[1],m2])\n\n for m2 in range(padding[2]):\n for m0 in numpy.arange(0,ssize[0],psize[0]):\n for m1 in numpy.arange(0,ssize[1],psize[1]):\n vect.append([m0,m1,ssize[2]+m2*psize[2]])\n\n for m0,m1,m2 in vect:\n npos = positions + numpy.dot((m0, m1, m2), lv)\n for i in range(len(npos)):\n final.append(Atom(symbol=syms[i],position=npos[i]))\n \n final.set_cell(numpy.array([fsize[c] * lv[c] for c in range(3)]))\n \n # Write output as POSCAR\n if writefile:\n write('POSCAR_Final', final)\n \n return final", "def get_vqvae_top_resolution_n() -> int:\n global vqvae\n assert vqvae is not None\n global transformer_top\n assert transformer_top is not None\n global spectrograms_helper\n assert spectrograms_helper is not None\n global DEVICE\n assert DEVICE is not None\n dummy_codes_top = torch.zeros(transformer_top.shape,\n dtype=torch.long).to(DEVICE).unsqueeze(0)\n dummy_codes_bottom = torch.zeros(transformer_bottom.shape,\n dtype=torch.long).to(DEVICE).unsqueeze(0)\n decoded_audio = spectrograms_helper.to_audio(\n vqvae.decode_code(dummy_codes_top, dummy_codes_bottom))\n _, duration_top = transformer_top.shape\n return decoded_audio.shape[-1] // duration_top", "def test_9(self):\n for _ in range(1000):\n num_free = np.random.randint(1, 100)\n values = np.random.uniform(-1000.0, 1000.0, size=num_free)\n py = get_scales_magnitudes(values)\n f90 = fort_debug.wrapper_get_scales_magnitude(values, num_free)\n assert_almost_equal(py, f90)", "def aperture_fields(horn_width, eplane_effective_length, hplane_effective_length, frequency, x, y):\n # Calculate the wavenumber\n k = 2.0 * pi * frequency / c\n\n # Calculate the wave impedance\n eta = sqrt(mu_0 / epsilon_0)\n\n # Define the x-component of the electric field\n e_x = 0.0\n\n # Define the y-component of the electric field\n e_y = cos(pi * x / horn_width) * exp(-1j * k * 0.5 * (x ** 2 / hplane_effective_length +\n y ** 2 / eplane_effective_length))\n\n # Define the z-component of the electric field\n e_z = 0.0\n\n # Define the x-component of the magnetic field\n h_x = -cos(pi * x / horn_width) / eta * exp(-1j * k * 0.5 * (x ** 2 / hplane_effective_length +\n y ** 2 / eplane_effective_length))\n\n # Define the y-component of the magnetic field\n h_y = 0.0\n\n # Define the z-component of the magnetic field\n h_z = 0.0\n\n # Return all six components of the aperture field\n return e_x, e_y, e_z, h_x, h_y, h_z", "def _osLen(self):\n return int(np.ceil(self.minOverscan * self.sampleRate / self.downsample) * self.downsample)\n\n #osv = self.osVector\n #return np.ceil(np.linalg.norm(osv) / self.pixelWidth)", "def getMagBoundary(self):\n\n # Get the boundary of magnitude based on the filter\n lowMagnitude = nan\n highMagnitude = nan\n if (self.filter == self.FilterU):\n lowMagnitude = 7.94\n highMagnitude = 14.80\n\n elif (self.filter == self.FilterG):\n lowMagnitude = 9.74\n highMagnitude = 16.17\n\n elif (self.filter == self.FilterR):\n lowMagnitude = 9.56\n highMagnitude = 15.73\n\n elif (self.filter == self.FilterI):\n lowMagnitude = 9.22\n highMagnitude = 15.26\n\n elif (self.filter == self.FilterZ):\n lowMagnitude = 8.83\n highMagnitude = 14.68\n \n elif (self.filter == self.FilterY):\n lowMagnitude = 8.02\n highMagnitude = 13.76\n\n return lowMagnitude, highMagnitude", "def get_ptf10iuv(colorplt = False):\n z = 0.0251485\n ebv = 0.0371 # SFD\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n print (\"adopt g band t_max estimated by myself\")\n t_max = 55357.387 \n tb = pd.read_csv('../data/otherSN/Kasliwal2012/PTF10iuv', sep='\\t')\n tb = tb.drop(columns=[\"Unnamed: 4\"])\n tb = tb.rename(columns={'Filter' : 'filter',\n 'MJD': 'mjd'})\n tb = tb[~np.array([x[0]=='>' for x in tb['Mag'].values])]\n tb['mag'] = np.array([float(x.split(\" +or-\")[0]) for x in tb['Mag'].values])\n tb['emag'] = np.array([float(x.split(\" +or-\")[1]) for x in tb['Mag'].values])\n tb = tb.drop(columns=[\"Mag\"])\n \n ixg = tb['filter'].values == \"g\"\n ixr = tb['filter'].values == \"r\"\n ixi = tb['filter'].values == \"i\"\n ixz = tb['filter'].values == \"z\"\n ixB = tb['filter'].values == \"B\"\n tb['wave'] = np.zeros(len(tb))\n tb['wave'].values[ixB] = 4359\n tb['wave'].values[ixg] = 4814\n tb['wave'].values[ixr] = 6422\n tb['wave'].values[ixi] = 7883\n tb['wave'].values[ixz] = 9670\n \n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)\n tb = tb.sort_values(by = \"mjd\")\n if colorplt==False:\n return tb\n \n else:\n tb = add_datecol(tb)\n ix = np.in1d(tb[\"filter\"].values, np.array(['g', 'r', 'i']))\n tb = tb[ix]\n tb = tb[tb.mjd > 55352.5]\n tb = tb[tb.mjd < 55593.5]\n \n dates = get_date_span(tb)\n datesave = []\n for i in range(len(dates)):\n x = dates[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n if len(tbsub)!=0:\n flts = tbsub['filter'].values\n if \"r\" in flts and np.sum(np.unique(flts))!=1:\n datesave.append(x)\n datesave = np.array(datesave)\n \n mcolor = []\n mcolor_unc = []\n mjds = []\n colorname = []\n for i in range(len(datesave)):\n x = datesave[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n gtb = tbsub[tbsub[\"filter\"].values==\"g\"]\n rtb = tbsub[tbsub[\"filter\"].values==\"r\"]\n itb = tbsub[tbsub[\"filter\"].values==\"i\"]\n if len(gtb)!=0:\n gmjds = gtb[\"mjd\"].values\n gmags = gtb[\"mag0\"].values\n gemags = gtb[\"emag\"].values\n gwtgs = 1/gemags**2\n gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)\n gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)\n gemag = 1/ np.sqrt(np.sum(gwtgs))\n if len(rtb)!=0:\n rmjds = rtb[\"mjd\"].values\n rmags = rtb[\"mag0\"].values\n remags = rtb[\"emag\"].values\n rwtgs = 1/remags**2\n rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)\n rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)\n remag = 1/ np.sqrt(np.sum(rwtgs))\n if len(itb)!=0:\n imjds = itb[\"mjd\"].values\n imags = itb[\"mag0\"].values\n iemags = itb[\"emag\"].values\n iwtgs = 1/iemags**2\n imag = np.sum(imags * iwtgs) / np.sum(iwtgs)\n imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)\n iemag = 1/ np.sqrt(np.sum(iwtgs))\n if len(gtb)!=0 and len(rtb)!=0:\n mcolor.append(gmag - rmag)\n mjds.append( 0.5 * (gmjd + rmjd) )\n mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )\n colorname.append(\"gmr\")\n if len(rtb)!=0 and len(itb)!=0:\n mcolor.append(rmag - imag)\n mjds.append( 0.5 * (rmjd + imjd) )\n mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )\n colorname.append(\"rmi\")\n \n ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],\n names = [\"mjd\", \"c\", \"ec\", \"cname\"])\n \n ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)\n ctb = ctb.to_pandas()\n return ctb", "def q_div(self, PFC, MHD, q):\n psi = PFC.psimin\n xyz = PFC.centers\n\n R_div,Z_div,phi_div = tools.xyz2cyl(xyz[:,0],xyz[:,1],xyz[:,2])\n\n R_omp = self.map_R_psi(psi,PFC)\n Z_omp = np.zeros(R_omp.shape)\n # Dot product between surface normal and B field\n #self.HFincidentAngle(PFC, MHD)\n # Calculate Magnitude of B at Divertor\n Bp_div = PFC.ep.BpFunc.ev(R_div,Z_div)\n Bt_div = PFC.ep.BtFunc.ev(R_div,Z_div)\n B_div = np.sqrt(Bp_div**2 + Bt_div**2)\n # Evaluate B at outboard midplane\n Bp_omp = PFC.ep.BpFunc.ev(R_omp,Z_omp)\n Bt_omp = PFC.ep.BtFunc.ev(R_omp,Z_omp)\n B_omp = np.sqrt(Bp_omp**2 + Bt_omp**2)\n\n# Bt_omp = MHD.ep.BtFunc.ev(R_omp,Z_omp)\n# BR_omp = MHD.ep.BRFunc.ev(R_omp,Z_omp)\n# BZ_omp = MHD.ep.BZFunc.ev(R_omp,Z_omp)\n# B_omp = np.sqrt(Bt_omp**2 + BR_omp**2 + BZ_omp**2)\n#\n# Bt_div = MHD.ep.BtFunc.ev(R_div,Z_div)\n# BR_div = MHD.ep.BRFunc.ev(R_div,Z_div)\n# BZ_div = MHD.ep.BZFunc.ev(R_div,Z_div)\n# B_div = np.sqrt(Bt_div**2 + BR_div**2 + BZ_div**2)\n\n\n #For Debugging, plot Bfield Ratio\n #import matplotlib.pyplot as plt\n #testB_div = B_div.reshape(self.grid['Nphi'],self.grid['Nswall']).T\n #testB_omp = B_omp.reshape(self.grid['Nphi'],self.grid['Nswall']).T\n #B_ratio = testB_div/testB_omp\n #CS = plt.contourf(self.grid['phi'], self.grid['Swall'],B_ratio,levels=30,cmap=plt.cm.cool)\n #plt.colorbar(CS, label=r'$B Ratio$')\n #plt.show()\n #Divertor heat flux\n q_div = np.zeros((len(xyz)))\n use = np.where(PFC.shadowed_mask == 0)[0]\n\n #Matt's method\n# q_div[use] = q[use] * B_div[use]/B_omp * PFC.bdotn[use]\n #Menard's Method\n q_div[use] = q[use] * B_div[use] * PFC.bdotn[use]\n\n #for i in range(len(q_div)):\n #\tif q_div[i] > 8.0: q_div[i] = 0.0\n #Plot q|| and qdiv\n #import matplotlib.pyplot as plt\n #plt.scatter(self.grid['Swall'][:,0], q_div[0:self.grid['Nswall']], label='qdiv')\n #plt.scatter(self.grid['Swall'][:,0], q[0:self.grid['Nswall']], label='q||')\n #plt.legend()\n #plt.show()\n return np.abs(q_div)" ]
[ "0.6291265", "0.5848869", "0.5809261", "0.5802869", "0.57336414", "0.5729062", "0.569543", "0.56734866", "0.5662906", "0.56628376", "0.56435436", "0.56359833", "0.5614053", "0.5611046", "0.55810857", "0.55808634", "0.5556087", "0.55491066", "0.55242413", "0.5521659", "0.55142677", "0.5494072", "0.5483656", "0.5480256", "0.5479738", "0.5459442", "0.5450414", "0.54472464", "0.5447242", "0.54394233", "0.5439184", "0.5437157", "0.5435416", "0.5420653", "0.5419178", "0.54076374", "0.5406825", "0.5406578", "0.5367682", "0.5362832", "0.5360413", "0.5342483", "0.53373945", "0.5329687", "0.53285646", "0.5324419", "0.532067", "0.5311715", "0.5307666", "0.5306026", "0.5296108", "0.52959025", "0.52913", "0.5285341", "0.5284511", "0.52835536", "0.527975", "0.5277954", "0.5276111", "0.5266926", "0.5266647", "0.52586734", "0.5255235", "0.525495", "0.5254056", "0.5247706", "0.5247305", "0.5245982", "0.5244649", "0.5242518", "0.5241943", "0.5240536", "0.52401316", "0.5232331", "0.52309704", "0.5222893", "0.52148956", "0.52144456", "0.5212836", "0.52127105", "0.52109665", "0.52105427", "0.5208686", "0.5206257", "0.52050334", "0.52041185", "0.5203874", "0.52024287", "0.52021873", "0.5194008", "0.5193397", "0.51898986", "0.51889235", "0.5187252", "0.518399", "0.5182107", "0.5175448", "0.51717776", "0.5167737", "0.51635885", "0.51635134" ]
0.0
-1
Parse name and seed for uci regression data. E.g. yacht_2 is the yacht dataset with seed 2.
def _parse_uci_regression_dataset(name_str): pattern_string = "(?P<name>[a-z]+)_(?P<seed>[0-9]+)" pattern = re.compile(pattern_string) matched = pattern.match(name_str) if matched: name = matched.group("name") seed = matched.group("seed") return name, seed return None, None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_uci_regression_dataset(name,\n split_seed,\n train_fraction=0.9,\n data_dir=\"uci_datasets\"):\n path = os.path.join(data_dir,\n _UCI_REGRESSION_FILENAMES[UCIRegressionDatasets(name)])\n data_arr = onp.load(path)\n x, y = data_arr[\"x\"], data_arr[\"y\"]\n\n indices = jax.random.permutation(jax.random.PRNGKey(split_seed), len(x))\n indices = onp.asarray(indices)\n x, y = x[indices], y[indices]\n\n n_train = int(train_fraction * len(x))\n x_train, y_train = x[:n_train], y[:n_train]\n x_test, y_test = x[n_train:], y[n_train:]\n\n def normalize_with_stats(arr, arr_mean=None, arr_std=None):\n return (arr - arr_mean) / arr_std\n\n def normalize(arr):\n eps = 1e-6\n arr_mean = arr.mean(axis=0, keepdims=True)\n arr_std = arr.std(axis=0, keepdims=True) + eps\n return normalize_with_stats(arr, arr_mean, arr_std), arr_mean, arr_std\n\n x_train, x_mean, x_std = normalize(x_train)\n y_train, y_mean, y_std = normalize(y_train)\n x_test = normalize_with_stats(x_test, x_mean, x_std)\n y_test = normalize_with_stats(y_test, y_mean, y_std)\n\n data_info = {\"y_scale\": float(y_std)}\n\n return (x_train, y_train), (x_test, y_test), data_info", "def load_data(y_name='Species'):\n train_path = tf.keras.utils.get_file(args.TRAIN_URL.split('/')[-1], args.TRAIN_URL)\n test_path = tf.keras.utils.get_file(args.TEST_URL.split('/')[-1], args.TEST_URL)\n\n train = pd.read_csv(train_path, names=args.CSV_COLUMN_NAMES, header=0)\n train_x, train_y = train, train.pop(y_name)\n\n test = pd.read_csv(test_path, names=args.CSV_COLUMN_NAMES, header=0)\n test_x, test_y = test, test.pop(y_name)\n\n return (train_x, train_y), (test_x, test_y)", "def dataset(name):\n t = \"unknown\"\n if name ==\"boston\":\n # regression (506x13feat)\n from sklearn.datasets import load_boston\n X, y = load_boston(return_X_y=True)\n t = \"R\"\n #X,y = shap.datasets.boston()\n #return X,y\n elif name == \"iris\":\n # classification (150x4featx3classes)\n from sklearn.datasets import load_iris\n data = load_iris()\n X = data.data\n y = data.target\n t = \"C\"\n elif name == \"diabetes\":\n # regression (442x10feat)\n from sklearn.datasets import load_diabetes\n X, y = load_diabetes(return_X_y=True)\n t = \"R\"\n elif name == \"digits\":\n # classification (1797x64featx10classes)\n from sklearn.datasets import load_digits\n X, y = load_digits(return_X_y=True)\n t = \"C\"\n elif name == \"wine\":\n # classification (178x13featuresx3classes)\n from sklearn.datasets import load_wine\n X, y = load_wine(return_X_y=True)\n t = \"C\"\n elif name == \"breast_cancer\":\n # classification (569x30featx2classes)\n from sklearn.datasets import load_breast_cancer\n X, y = load_breast_cancer(return_X_y=True)\n t = \"C\"\n elif name ==\"nhanesi\":\n X,y = shap.datasets.nhanesi()\n t = \"R\"\n elif name == \"segments\":\n X,y = make_led()\n t = \"C\"\n elif name == \"segments_sampled\":\n X,y = make_led_sample()\n t = \"C\"\n elif name == \"friedman1\":\n from sklearn.datasets import make_friedman1\n X,y= make_friedman1(n_samples=500, random_state=0)\n print('Done')\n X = pd.DataFrame(X, columns=list(range(X.shape[1])))\n t = 'R'\n elif name == \"friedman2\":\n from sklearn.datasets import make_friedman2\n X,y= make_friedman2(random_state=0)\n t = 'R'\n elif name == 'linear':\n X, y, t = draw_linear_function()\n elif name == \"linear2\":\n importlib.reload(lreg)\n X,y,t = lreg.lf_dataset(nsamples=5000, with_vimp=False)\n elif name == 'friendman3':\n X, y, t = friedman_modified()\n else:\n raise ValueError(\"dataset `{}` not implemented\".format(name))\n return X,y,t", "def dataset_preparation():\r\n with open('../data/patterns_num.txt', 'r') as f:\r\n data = f.readlines()\r\n X, Y = [], []\r\n for line in data:\r\n x, y = line.split('\\t')\r\n if len(x) > 5 and x not in X: # better results are achieved excluding short query patterns\r\n X.append(x.replace(\"X\", \"\").replace(\"Y\", \"\").lower())\r\n Y.append(int(y.replace('\\n', '')))\r\n test_size = 0.2\r\n # print('Test size:', test_size, '\\nWrong classifications:\\n')\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state=42, stratify=Y)\r\n return X_train, y_train, X_test, y_test", "def test_load_UCR_UEA_dataset():\n X, y = load_UCR_UEA_dataset(name=\"UnitTest\")\n assert isinstance(X, pd.DataFrame) and isinstance(y, np.ndarray)\n assert X.shape == (42, 1) and y.shape == (42,)", "def load_data():\n df = pd.read_csv(\"../../Data/breast_cancer_data/data.csv\")\n\n cols = df.columns\n X = df[cols[2:-1]].to_numpy()\n y = df[cols[1]].to_numpy()\n y = (y=='M').astype(np.int) * 2 - 1\n\n train_X = X[:-150]\n train_y = y[:-150]\n\n test_X = X[-150:]\n test_y = y[-150:]\n\n return train_X, train_y, test_X, test_y", "def data(dataset=\"bio_eventrelated_100hz\"):\n # TODO: one could further improve this function with like\n # selectors 'ecg=True, eda=True, restingstate=True' that would\n # find the most appropriate dataset\n\n dataset = dataset.lower()\n\n # TODO: change this path back to \"master\"\n path = \"https://raw.githubusercontent.com/neuropsychology/NeuroKit/dev/data/\"\n\n # Signals as vectors =======================\n if dataset in [\"eeg\", \"eeg_150hz\", \"eeg.txt\"]:\n return pd.read_csv(path + \"eeg.txt\").values[:, 0]\n\n if dataset in [\"rsp\", \"rsp_1000hz\", \"rsp_1000hz.txt\"]:\n return pd.read_csv(path + \"rsp_1000hz.txt\", header=None).values[:, 0]\n\n if dataset in [\"ecg\", \"ecg_1000hz\", \"ecg_1000hz.csv\"]:\n return pd.read_csv(path + \"ecg_1000hz.csv\")[\"ECG\"].values\n\n if dataset in [\"ecg_3000hz\", \"ecg_3000hz.csv\"]:\n return pd.read_csv(path + \"ecg_1000hz.csv\")[\"ECG\"].values\n\n if dataset in [\"eog\", \"veog\", \"eog_100hz\", \"eog_100hz.csv\"]:\n return pd.read_csv(path + \"eog_100hz.csv\")[\"vEOG\"].values\n\n # Dataframes ===============================\n if dataset == \"iris\":\n info = sklearn_datasets.load_iris()\n data = pd.DataFrame(\n info.data, columns=[\"Sepal.Length\", \"Sepal.Width\", \"Petal.Length\", \"Petal.Width\"]\n )\n data[\"Species\"] = info.target_names[info.target]\n return data\n\n if dataset in [\"eogs\", \"eogs_200hz\", \"eog_200hz\", \"eog_200hz.csv\"]:\n return pd.read_csv(path + \"eog_200hz.csv\")\n\n # Add extension\n if dataset in [\"bio_resting_8min_200hz\"]:\n dataset += \".json\"\n\n # Specific case for json file\n if dataset.endswith(\".json\"):\n if \"https\" not in dataset:\n data = pd.read_json(path + dataset, orient=\"index\")\n else:\n data = pd.read_json(dataset, orient=\"index\")\n df = {}\n for participant, row in data.iterrows():\n for _, data_string in row.items():\n data_list = json.loads(data_string)\n data_pd = pd.DataFrame(data_list)\n df[participant] = data_pd\n\n return df\n\n # TODO: Add more EEG (fif and edf datasets)\n if dataset in [\"eeg_1min_200hz\"]:\n\n return pickle.load(\n urllib.request.urlopen(\n \"https://github.com/neuropsychology/NeuroKit/blob/dev/data/eeg_1min_200hz.pickle?raw=true\"\n )\n )\n\n # General case\n file, ext = os.path.splitext(dataset) # pylint: disable=unused-variable\n if ext == \"\":\n df = pd.read_csv(path + dataset + \".csv\")\n else:\n if \"https\" not in dataset:\n df = pd.read_csv(path + dataset)\n else:\n df = pd.read_csv(dataset)\n return df", "def parse_input(giant_string):\r\n X_train_part, Y_train_part, X_test_part = giant_string.split(\"XXX\")\r\n\r\n X_train_row_strings = X_train_part.split(\"S\")\r\n X_train_rows = [[float(x) for x in row.split(\",\")] for row in X_train_row_strings]\r\n X_train = np.array(X_train_rows)\r\n\r\n Y_train = concatenated_string_to_array(Y_train_part)\r\n\r\n X_test_row_strings = X_test_part.split(\"S\")\r\n X_test_rows = [[float(x) for x in row.split(\",\")] for row in X_test_row_strings]\r\n X_test = np.array(X_test_rows)\r\n\r\n return X_train, Y_train, X_test", "def load_data():\n data = pd.read_csv(\"https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data\", header=None)\n\n # utiliza somente as duas primeiras classes\n data = data[:100]\n # transforma as classes em 0 e 1\n data[4] = np.where(data.iloc[:, -1] == 'Iris-setosa', 0, 1)\n data = np.asmatrix(data, dtype='float64')\n return data", "def __parse_sample_name(self):\n pattern = '(.*)(P53)(XR|NT)(\\d+)([A-Z]?|Ctr)?.*'\n vals = re.findall(pattern, self.sample_name.replace('_', ''))[0]\n self.cell_type = vals[0]\n self.treatment_type = vals[2]\n self.treatment_time = vals[3]\n if vals[3]:\n self.treatment_repeat = vals[4]", "def parse_input(giant_string):\n X_train_part, Y_train_part, X_test_part = giant_string.split(\"XXX\")\n\n X_train_row_strings = X_train_part.split(\"S\")\n X_train_rows = [[float(x) for x in row.split(\",\")] for row in X_train_row_strings]\n X_train = np.array(X_train_rows)\n\n Y_train = concatenated_string_to_array(Y_train_part)\n\n X_test_row_strings = X_test_part.split(\"S\")\n X_test_rows = [[float(x) for x in row.split(\",\")] for row in X_test_row_strings]\n X_test = np.array(X_test_rows)\n\n return X_train, Y_train, X_test", "def load_data(): \n\tdf = pandas.read_csv('data/iris.data', header=None)\n\ty = df.iloc[0:df.shape[0], 4].values\n\ty = np.where(y == 'Iris-setosa', 0, y)\n\ty = np.where(y == 'Iris-versicolor', 1, y)\n\ty = np.where(y == 'Iris-virginica', 2, y)\n\tx = df.iloc[0:df.shape[0], 0:4].values\n\tx = tuple(x)\n\ty = tuple(y)\n\ttraining_inputs = x[0:40] + x[50:90] + x[100:140]\n\ttraining_results = y[0:40] + y[50:90] + y[100:140]\n\ttraining_data = (training_inputs, training_results)\n\ttest_inputs = x[40:50] + x[90:100] + x[140:150]\n\ttest_results = y[40:50] + y[90:1000] + y[140:150]\n\ttest_data = (test_inputs, test_results)\n\treturn (training_data, test_data)", "def build_data(seed):\n rs = np.random.RandomState(seed)\n\n def y(x):\n \"\"\" y(x) = 1 + 0.3 * x_1 - 0.6 * x_2^2 - 0.2 * x_3^3 + 0.5 x_4^4 \"\"\"\n x1, x2, x3, x4 = x[:, 0], x[:, 1], x[:, 2], x[:, 3]\n return 1 + 0.3 * x1 - 0.6 * x2 ** 2 - 0.2 * x3 ** 3 + 0.5 * x4 ** 4\n\n xtrain = rs.rand(10000, 4)\n xtest = rs.rand(1000, 4)\n ytrain = y(xtrain) + rs.rand(10000) / 10\n ytest = y(xtest) + rs.rand(1000) / 10\n return xtrain, xtest, ytrain, ytest", "def esm1_t6_43M_UR50S():\n return load_model_and_alphabet_hub(\"esm1_t6_43M_UR50S\")", "def _load_vowel_test():\n vowel_data = np.loadtxt(_VOWEL_TEST_PATH, delimiter=',', skiprows=1)\n X = vowel_data[:, -10:]\n y = vowel_data[:, 1].astype(int)\n return (X, y)", "def load():\n filepath = dirname(abspath(__file__))\n##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####\n data = recfromtxt(open(filepath + '/spector.csv',\"rb\"), delimiter=\" \",\n names=True, dtype=float, usecols=(1,2,3,4))\n names = list(data.dtype.names)\n endog = array(data[names[3]], dtype=float)\n endog_name = names[3]\n exog = column_stack(data[i] for i in names[:3]).astype(float)\n exog_name = names[:3]\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name = endog_name, exog_name=exog_name)\n return dataset", "def esm1_t34_670M_UR100():\n return load_model_and_alphabet_hub(\"esm1_t34_670M_UR100\")", "def feature_extraction(_data):\n # Find the digits in the given string Example - data='18-20' digits = '1820'\n digits = str(''.join(c for c in _data if c.isdigit()))\n # calculate the length of the string\n len_digits = len(digits)\n # splitting digits in to values example - digits = '1820' ages = [18, 20]\n ages = [int(digits[i:i + 2]) for i in range(0, len_digits, 2)]\n # checking for special character in the given data\n special_character = '.+-<>?'\n spl_char = ''.join([c for c in list(special_character) if c in _data])\n # handling decimal age data\n if len_digits == 3:\n spl_char = '.'\n age = \"\".join([str(ages[0]), '.', str(ages[1])])\n # normalizing\n age = int(float(age) - 0.5)\n ages = [age]\n # Finding the maximum, minimum, average age values\n max_age = 0\n min_age = 0\n mean_age = 0\n if len(ages):\n max_age = max(ages)\n min_age = min(ages)\n if len(ages) == 2:\n mean_age = int((max_age + min_age) / 2)\n else:\n mean_age = max_age\n # specially added for 18 years cases\n only_18 = 0\n is_y = 0\n if ages == [18]:\n only_18 = 1\n if 'y' in _data or 'Y' in _data:\n is_y = 1\n under_18 = 0\n if 1 < max_age < 18:\n under_18 = 1\n above_65 = 0\n if mean_age >= 65:\n above_65 = 1\n # verifying whether digit is found in the given string or not.\n # Example - data='18-20' digits_found=True data='????' digits_found=False\n digits_found = 1\n if len_digits == 1:\n digits_found = 1\n max_age, min_age, mean_age, only_18, is_y, above_65, under_18 = 0, 0, 0, 0, 0, 0, 0\n elif len_digits == 0:\n digits_found, max_age, min_age, mean_age, only_18, is_y, above_65, under_18 = -1, -1, -1, -1, -1, -1, -1, -1\n \n feature = {\n 'ages': tuple(ages),\n 'len(ages)': len(ages),\n 'spl_chr': spl_char,\n 'is_digit': digits_found,\n 'max_age': max_age,\n 'mean_age': mean_age,\n 'only_18': only_18,\n 'is_y': is_y,\n 'above_65': above_65,\n 'under_18': under_18\n }\n\n return feature", "def import_data(seed: object = 42) -> object:\n\n # Read input data\n df = pd.read_csv(\"x_train_gr_smpl.csv\").astype(int)\n\n # label data-frame rows based on sample data\n for x in range(10):\n index = ~pd.read_csv(\"y_train_smpl_%s.csv\" % x, squeeze=True).astype(bool) # reversed flags (~)\n df.loc[index, 'label'] = str(x)\n\n input_data_ordered = df.iloc[:, 0:2304].to_numpy()\n output_data_ordered = df.iloc[:, 2304].to_numpy()\n\n # Randomise instance order (forcing the same result each time)\n np.random.seed(seed)\n permutation = np.random.permutation(df.shape[0])\n\n # Create base input and output arrays\n input_data = input_data_ordered[permutation]\n output_data = output_data_ordered[permutation]\n\n return input_data, output_data, df, input_data_ordered, output_data_ordered", "def load_data():\n data = pd.read_csv('datasets/housing.csv')\n prices = data['MEDV']\n features = data.drop(['MEDV'], axis=1) # remove it from data as we need to predict it\n print(data.head()) # prints top columns 5 for ex\n return [features, prices]", "def reading_data(fname,goal):\n \n #Reading of the EEG data\n data = pd.read_csv(fname)\n events_fname = fname.replace('_data','_events')\n labels= pd.read_csv(events_fname)\n\n if goal==\"training\":\n data=data.drop(['id' ], axis=1)#remove id\n labels=labels.drop(['id' ], axis=1)#remove id\n elif goal==\"testing\":\n labels=labels.drop(['id' ], axis=1)\n else:\n raise SystemExit(\"The goal variable is unknown for the function\")\n\n return data, labels", "def parse_IAU_name(name):\n # First see if there is a source type acronym\n if diag:\n print \"parse_IAU_name: received\",name\n parts = name.split()\n if len(parts) == 1:\n designation = parts[0]\n elif len(parts) == 2:\n acronym, designation = parts\n else:\n raise(\"Invalid format: \"+name)\n # Now process the designation\n flag = designation[0].upper()\n if flag == \"G\":\n # Galactic coordinates\n longitude,latitude,sign = split_on_sign(name[1:])\n X = parse_decimal_angle(longitude)\n Y = parse_decimal_angle(latitude)\n elif flag == \"J\":\n # Julian epoch celestial coordinates\n ra,dec,sign = split_on_sign(name[1:])\n X = parse_sexagesimal_angle(ra)\n Y = parse_sexagesimal_angle(dec)\n elif flag == \"B\":\n # Besselian epoch celestial coordinates\n ra,dec,sign = split_on_sign(name[1:])\n X = parse_sexagesimal_angle(ra)\n Y = parse_sexagesimal_angle(dec)\n elif designation[0].isdigit():\n # This should be Besselian but who knows?\n # If it is Besselian there should be at least four digits in RA\n # otherwise it could be galactic\n x,y,sign = split_on_sign(name)\n if len(x) > 3:\n X = parse_sexagesimal_angle(x)\n Y = parse_sexagesimal_angle(y)\n flag = \"B\"\n else:\n X = parse_decimal_angle(x)\n Y = parse_decimal_angle(y)\n flag = \"G\"\n else:\n return \"?\",None,None\n if sign == \"-\":\n Y = -Y\n return flag,X,Y", "def _process_input_seed(self):\n\n Tcmb = 2.72548 * u.K # 0.00057 K\n Tfir = 70 * u.K\n ufir = 0.2 * u.eV / u.cm ** 3\n Tnir = 5000 * u.K\n unir = 0.2 * u.eV / u.cm ** 3\n\n # Allow for seed_photon_fields definitions of the type 'CMB-NIR-FIR' or 'CMB'\n if type(self.seed_photon_fields) != list:\n self.seed_photon_fields = self.seed_photon_fields.split('-')\n\n self.seeduf = {}\n self.seedT = {}\n self.seedisotropic = {}\n self.seedtheta = {}\n for idx, inseed in enumerate(self.seed_photon_fields):\n if isinstance(inseed, six.string_types):\n if inseed == 'CMB':\n self.seedT[inseed] = Tcmb\n self.seeduf[inseed] = 1.0\n self.seedisotropic[inseed] = True\n elif inseed == 'FIR':\n self.seedT[inseed] = Tfir\n self.seeduf[inseed] = (ufir / (ar * Tfir ** 4)).decompose()\n self.seedisotropic[inseed] = True\n elif inseed == 'NIR':\n self.seedT[inseed] = Tnir\n self.seeduf[inseed] = (unir / (ar * Tnir ** 4)).decompose()\n self.seedisotropic[inseed] = True\n else:\n log.warning('Will not use seed {0} because it is not '\n 'CMB, FIR or NIR'.format(inseed))\n raise TypeError\n elif type(inseed) == list and (len(inseed) == 3 or len(inseed) == 4):\n isotropic = len(inseed) == 3\n\n if isotropic:\n name, T, uu = inseed\n self.seedisotropic[name] = True\n else:\n name, T, uu, theta = inseed\n self.seedisotropic[name] = False\n self.seedtheta[name] = validate_scalar('{0}-theta'.format(name),\n theta, physical_type='angle')\n\n validate_scalar('{0}-T'.format(name), T, domain='positive',\n physical_type='temperature')\n self.seed_photon_fields[idx] = name\n self.seedT[name] = T\n if uu == 0:\n self.seeduf[name] = 1.0\n else:\n # pressure has same physical type as energy density\n validate_scalar('{0}-u'.format(name), uu,\n domain='positive', physical_type='pressure')\n self.seeduf[name] = (uu / (ar * T ** 4)).decompose()\n else:\n log.warning(\n 'Unable to process seed photon field: {0}'.format(inseed))\n raise TypeError", "def mk_test(input_data):\r\n\r\n\ttrend, h, p, z, Tau, s, var_s, slope, intercept = mk.original_test(input_data)\r\n\r\n\treturn trend, h, p, z, Tau, s, var_s, slope, intercept", "def prepare_data_train(fname):\n # Read data\n data = pd.read_csv(fname)\n # events file\n events_fname = fname.replace('_data','_events')\n # read event file\n labels= pd.read_csv(events_fname)\n clean=data.drop(['id' ], axis=1)#remove id\n labels=labels.drop(['id' ], axis=1)#remove id\n return clean,labels", "def prepare_data_train(fname):\n # Read data\n data = pd.read_csv(fname)\n # events file\n events_fname = fname.replace('_data','_events')\n # read event file\n labels= pd.read_csv(events_fname)\n clean=data.drop(['id' ], axis=1)#remove id\n labels=labels.drop(['id' ], axis=1)#remove id\n return clean,labels", "def prepare_data_train(fname):\n # Read data\n data = pd.read_csv(fname)\n # events file\n events_fname = fname.replace('_data','_events')\n # read event file\n labels= pd.read_csv(events_fname)\n clean=data.drop(['id' ], axis=1)#remove id\n labels=labels.drop(['id' ], axis=1)#remove id\n return clean,labels", "def load_demo():\n\tprint(\"\"\"\n\tBreast Cancer Wisconsin dataset. It contains a total of 569 samples of tumor and malignant cells. \n\tData labeled 1 corresponds to malignant cells, while data labeled 0 corresponds to benign cells. \n\tThe 30 characteristics contain real values obtained from images of cell nuclei. For more information:\n\n\t\t\thttp://archive.ics.uci.edu/ml/datasets/breast+cancer+wisconsin+(diagnostic)\n\n\n\tThe returned value is a dictionary where 'x_data' are the predictor variables, 'y_data' the class \n\tlabels and 'features' the name of the characteristics.\n\t\"\"\")\n\tpath = '/'.join(os.path.abspath(pywinEA.__file__).split('/')[:-1])\n\t\n\tdata = pd.read_csv(path+'/dataset/data/BreastCancerWisconsin.csv', index_col=0)\n\tx_data = data.iloc[:, 1:].values\n\ty_data = data.iloc[:, 0].values\n\tfeatures = data.columns[1:].values\n\n\t# Transform labels\n\ty_data[np.where(y_data == 'M')] = 1\n\ty_data[np.where(y_data == 'B')] = 0\n\ty_data = y_data.astype(int)\n\n\treturn {'x_data': x_data, 'y_data': y_data, 'features': features}", "def parse_modelname(string,labellist,ensemblesfolder):\n ## We need to account for two different prefixes now. \n split_ens_temp = ensemble_template.split(\"{f}\")\n template_prefix = split_ens_temp[0]\n\n template_seedind = split_ens_temp[1].split(\"{s}\")[0]\n if string.startswith(template_prefix): ## TODO or other prefix\n frames,seedext = string.split(template_prefix)[-1].split(template_seedind)\n seed=seedext.split(\"results.json\")[0]\n return {\"name\":string,\n \"frames\":int(frames),\n \"seed\":int(seed),\n \"template\":ensemble_template,\n \"outliers\":determine_outliers(labellist,int(seed),int(frames)),\n }", "def fixture_microbial_sample_name():\n return \"microbial_name_test\"", "def get_uci_datasets(\n name, split_seed=0, test_fraction=0.10, train_frac=1.0, combine_val_train=False\n):\n # load full dataset\n load_funs = {\n \"naval\": _load_naval,\n \"protein\": _load_protein,\n \"crime\": _load_crime,\n \"energy\": _load_app_energy,\n }\n print(\"Loading dataset {}....\".format(name))\n if name == \"depth\":\n (X_train, y_train), (X_test, y_test) = load_funs[name]()\n y_scale = np.array([[1.0]])\n return (X_train, y_train), (X_test, y_test), y_scale\n\n X, y = load_funs[name]()\n X = X.astype(np.float32)\n y = y.astype(np.float32)\n\n # We create the train and test sets with 90% and 10% of the data\n\n if split_seed == -1: # Do not shuffle!\n permutation = range(X.shape[0])\n else:\n rs = np.random.RandomState(split_seed)\n permutation = rs.permutation(X.shape[0])\n\n size_train = int(np.round(X.shape[0] * (1 - test_fraction)))\n index_train = permutation[0:size_train]\n index_test = permutation[size_train:]\n\n X_train = X[index_train, :]\n X_test = X[index_test, :]\n if name == \"depth\":\n y_train = y[index_train]\n y_test = y[index_test]\n else:\n y_train = y[index_train, None]\n y_test = y[index_test, None]\n\n if train_frac != 1.0:\n rs = np.random.RandomState(split_seed)\n permutation = rs.permutation(X_train.shape[0])\n n_train = int(train_frac * len(X_train))\n X_train = X_train[:n_train]\n y_train = y_train[:n_train]\n\n if split_seed == -1: # Do not shuffle!\n permutation = range(X_train.shape[0])\n else:\n rs = np.random.RandomState(split_seed)\n permutation = rs.permutation(X_train.shape[0])\n\n if combine_val_train:\n val_fraction = 0.0\n else:\n val_fraction = 0.10\n size_train = int(np.round(X_train.shape[0] * (1 - val_fraction)))\n index_train = permutation[0:size_train]\n index_val = permutation[size_train:]\n\n X_new_train = X_train[index_train, :]\n X_val = X_train[index_val, :]\n\n y_new_train = y_train[index_train]\n y_val = y_train[index_val]\n\n print(\"Done loading dataset {}\".format(name))\n\n def standardize(data):\n mu = data.mean(axis=0, keepdims=1)\n scale = data.std(axis=0, keepdims=1)\n scale[scale < 1e-10] = 1.0\n\n data = (data - mu) / scale\n return data, mu, scale\n\n # Standardize\n X_new_train, x_train_mu, x_train_scale = standardize(X_new_train)\n X_test = (X_test - x_train_mu) / x_train_scale\n y_new_train, y_train_mu, y_train_scale = standardize(y_new_train)\n y_test = (y_test - y_train_mu) / y_train_scale\n X_val = (X_val - x_train_mu) / x_train_scale\n y_val = (y_val - y_train_mu) / y_train_scale\n\n train = TensorDataset(\n torch.Tensor(X_new_train).type(torch.float64),\n torch.Tensor(y_new_train).type(torch.float64),\n )\n\n val = TensorDataset(\n torch.Tensor(X_val).type(torch.float64),\n torch.Tensor(y_val).type(torch.float64),\n )\n\n test = TensorDataset(\n torch.Tensor(X_test).type(torch.float64),\n torch.Tensor(y_test).type(torch.float64),\n )\n in_size = X_train[0].shape\n target_size = y_train[0].shape\n\n return train, val, test, in_size, target_size, y_train_scale", "def setup(self):\n (self.X, self.Y) = load_iris(problem=\"label_ranking\")", "def autogen_dataset():\n return TabularDataset.autogen('tests/data/dummy_tabular/train.csv',\n seed=42,\n sep=',')", "def load_UCR_dataset(path, dataset):\n train_file = os.path.join(path, dataset, dataset + \"_TRAIN.tsv\")\n test_file = os.path.join(path, dataset, dataset + \"_TEST.tsv\")\n train_df = pandas.read_csv(train_file, sep='\\t', header=None)\n test_df = pandas.read_csv(test_file, sep='\\t', header=None)\n train_array = numpy.array(train_df)\n test_array = numpy.array(test_df)\n\n # Move the labels to {0, ..., L-1}\n labels = numpy.unique(train_array[:, 0])\n transform = {}\n for i, l in enumerate(labels):\n transform[l] = i\n\n train = numpy.expand_dims(train_array[:, 1:], 1).astype(numpy.float64)\n train_labels = numpy.vectorize(transform.get)(train_array[:, 0])\n test = numpy.expand_dims(test_array[:, 1:], 1).astype(numpy.float64)\n test_labels = numpy.vectorize(transform.get)(test_array[:, 0])\n\n # Normalization for non-normalized datasets\n # To keep the amplitude information, we do not normalize values over\n # individual time series, but on the whole dataset\n if dataset not in [\n 'AllGestureWiimoteX',\n 'AllGestureWiimoteY',\n 'AllGestureWiimoteZ',\n 'BME',\n 'Chinatown',\n 'Crop',\n 'EOGHorizontalSignal',\n 'EOGVerticalSignal',\n 'Fungi',\n 'GestureMidAirD1',\n 'GestureMidAirD2',\n 'GestureMidAirD3',\n 'GesturePebbleZ1',\n 'GesturePebbleZ2',\n 'GunPointAgeSpan',\n 'GunPointMaleVersusFemale',\n 'GunPointOldVersusYoung',\n 'HouseTwenty',\n 'InsectEPGRegularTrain',\n 'InsectEPGSmallTrain',\n 'MelbournePedestrian',\n 'PickupGestureWiimoteZ',\n 'PigAirwayPressure',\n 'PigArtPressure',\n 'PigCVP',\n 'PLAID',\n 'PowerCons',\n 'Rock',\n 'SemgHandGenderCh2',\n 'SemgHandMovementCh2',\n 'SemgHandSubjectCh2',\n 'ShakeGestureWiimoteZ',\n 'SmoothSubspace',\n 'UMD'\n ]:\n return train, train_labels, test, test_labels\n mean = numpy.nanmean(numpy.concatenate([train, test]))\n var = numpy.nanvar(numpy.concatenate([train, test]))\n train = (train - mean) / math.sqrt(var)\n test = (test - mean) / math.sqrt(var)\n return train, train_labels, test, test_labels", "def load_unicef_data():\n fname = 'SOWC_combined_simple.csv'\n \n \n # Uses pandas to help with string-NaN-numeric data.\n data = pd.read_csv(fname, na_values='_', encoding='latin1')\n \n \n # Strip countries title from feature names.\n features = data.axes[1][1:]\n # Separate country names from feature values.\n countries = data.values[:,0]\n values = data.values[:,1:]\n # Convert to numpy matrix for real.\n values = np.asmatrix(values,dtype='float64')\n # Modify NaN values (missing values).\n mean_vals = nanmean(values, axis=0)\n inds = np.where(np.isnan(values))\n values[inds] = np.take(mean_vals, inds[1])\n return (countries, features, values)", "def parse_user_selections(self):\n if \"model2\" in sys.argv:\n self.model_choice = \"model2\"\n else:\n self.model_choice = \"model1\"\n\n if \"Virginia\" in sys.argv:\n self.region = \"Virginia\"\n self.region_name = 'us-east-1'\n elif \"California\" in sys.argv:\n self.region = \"California\"\n self.region_name = 'us-west-1'\n else:\n self.region = \"Oregon\"\n self.region_name = 'us-west-2'\n\n if self.verbose_mode:\n print \"** will run the Machine Learning %s\" % self.model_choice\n print \"\\n** Running on %s Elastic Map Reduce server\" % self.region", "def main(self, data):\n\t\ttokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\", do_lower_case=True)\n\t\teval_features = self.get_features(data, self.labels, tokenizer, self.max_seq_length)\n\t\tlabel, prob = self.predict(eval_features)\n\t\treturn label, prob", "def data_prep(data, y, dropna=False):\n\n rand_state = 10 # Setting random state for later cv\n df = pd.read_pickle(data) # Reading in data\n if dropna is True:\n df.dropna(axis=0, inplace = True)\n else:\n pass\n X = df.drop(y, axis=1) # Assigning the feature space to X\n y = df[y] # Class labels to predict\n\n return X, y, rand_state", "def __init__(self, seed = None):\n self.data_dir = pkg_resources.resource_filename('logistic_control_variate', 'data/')\n self.generate_data(seed)\n # Holds logistic regression object for this example\n self.lr = None", "def _parse_input(self):\n #temperature\n regex = re.compile(\"TEMP=(\\d+\\.\\d*|\\d+)\")\n r = regex.search(self.file_dic['input'])\n if r:\n self.temperature = r.groups()[0]\n else:\n self.temperature = 298.15\n #theory\n regex = re.compile('(\\$contrl.+\\$end|\\$basis.+ \\$end)')\n temp_theory = regex.findall(self.file_dic['input'])\n contrl = temp_theory[0][:-4][7:].strip()\n basis = temp_theory[1][:-4][6:].strip()\n self.theory = contrl + ' ' + basis", "def esm1_t34_670M_UR50D():\n return load_model_and_alphabet_hub(\"esm1_t34_670M_UR50D\")", "def easydatagen():\n\n # Reading in the training file\n data = pd.read_json('train.json')\n\n # The set of different cuisines\n cuisines = data.cuisine.unique()\n\n # To find the different ingredients, we need to clean them up a little.\n def clean(string) :\n s = string.replace('-',' ') # read low-fat the same as low fat\n s = string.replace('&', 'and') # read & and and as the same\n s = re.sub('\\((.*?)\\)', '', s) # remove everythin g in brackets\n s = re.sub('\\d{1,2}\\%', '', s) # remove things of the form d% or dd%, where d is a digit\n s = ' '.join(s.split()) # remove extra white spaces\n\n return s\n\n ing_list = data.ingredients.values.tolist()\n raw_ingredients = [clean(x) for ing in ing_list for x in ing]\n\n ingredients = sorted(set(raw_ingredients))\n\n # build a dictionary that to each ingredient assigns its index\n ingredient_index = {}\n for i in range(0,len(ingredients)) :\n ingredient_index[ingredients[i]] = i\n\n # the same for cuisines\n cuisine_index = {}\n for i in range(0, len(cuisines)) :\n cuisine_index[cuisines[i]] = i\n\n def ingredients_to_vector(ings) :\n vect = np.zeros(len(ingredients))\n for ing in ings :\n vect[ingredient_index[clean(ing)]] = 1\n\n return vect\n\n def cuisine_to_vector(cus) :\n vect = np.zeros(20)\n vect[cuisine_index[cus]] = 1\n return vect\n\n vect_list = [ingredients_to_vector(ing) for ing in ing_list]\n target_list = [cuisine_to_vector(cus) for cus in data.cuisine.values.tolist()]\n\n # Define training data\n X = np.c_[vect_list]\n Y = np.c_[target_list]\n\n Y_num = np.zeros((Y.shape[0]))\n for i in range(Y.shape[0]):\n Y_num[i] = np.argmax(Y[i])\n\n x_train, x_test, y_train, y_test = train_test_split(X, Y_num, test_size = 0.2)\n\n return x_train, x_test, y_train, y_test", "def esm1_t34_670M_UR50S():\n return load_model_and_alphabet_hub(\"esm1_t34_670M_UR50S\")", "def get_emulator_training_info(filename):\n seed_found, ntr_found = None, None\n ret_str = \"\"\n # search for seed\n match = search(r\"seed_[0-9]*\", filename)\n if match:\n seed_found = match.group()\n ret_str += seed_found\n # search for number of training points\n match = search(r\"ntr_[0-9]*\", filename)\n if match:\n match_found = match.group()\n ret_str += \"_\" + match_found\n\n return ret_str", "def __prepareDataSet(fileName):\n\n labels = []\n utterances = []\n\n with open(fileName) as f:\n lines = f.readlines()\n\n for line in lines:\n try:\n act = line[:line.index(\" \")]\n utterance = line[line.index(\" \"):line.index(\"\\n\")]\n\n try:\n labels.append(act.strip())\n utterances.append(utterance.strip())\n\n except KeyError:\n pass\n\n except ValueError:\n pass\n\n return labels, utterances", "def main():\n\n clues_file = \"data/part1-clues.txt\"\n parsed_clues_file = \"data/part1-parsedclues.txt\"\n cp = ClueParser()\n\n clues = loadList(clues_file)\n gold_parsed_clues = loadList(parsed_clues_file)\n assert(len(clues) == len(gold_parsed_clues))\n\n cp.train(clues, gold_parsed_clues)\n parsed_clues = cp.parseClues(clues)\n cp.evaluate(parsed_clues, gold_parsed_clues)", "def nnRegression(data):", "def learn(filePath):\n filename = filePath.stem\n processedJAFFE = load(str(filePath))\n processedDF = pd.DataFrame(processedJAFFE)\n processedDF.columns = ['name', 'data', 'emotion']\n processedDF = processedDF.sort_values(by=['name', 'emotion'])\n grouped = processedDF.groupby(['name', 'emotion'])\n train = grouped.nth([0, 1])\n test = grouped.nth([2, 3, 4])\n\n yTrain = train.index.get_level_values(1).tolist()\n xTrain = train.values.ravel().tolist()\n yTest = test.index.get_level_values(1).tolist()\n xTest = test.values.ravel().tolist()\n\n parameters = {\n 'C': [\n 1.00E-12, 1.00E-11, 1.00E-10, 1.00E-09, 1.00E-08, 1.00E-07, 1.00E-06,\n 1.00E-05, 1.00E-04, 1.00E-03, 2.00E-03, 1.00E-02, 1.00E-01, 1.00,\n 1.00E+01, 1.00E+02, 1.00E+03, 1.00E+04, 1.00E+05\n ],\n 'gamma': [\n 1.00E00,\n 1.00E-01,\n 1.00E-02,\n 1.00E-03,\n 5.00E-04, 2.00E-04, 1.50E-04, 1.10E-04, 1.05E-04, 1.00E-04,\n 9.50E-05, 9.00E-05, 7.00E-05, 5.00E-05, 1.90E-05, 1.00E-05,\n 1.00E-06,\n 1.00E-07,\n ],\n }\n\n models = []\n models.append(['gamma \\\\ C', 1.00E-12, 1.00E-11, 1.00E-10, 1.00E-09, 1.00E-08,\n 1.00E-07, 1.00E-06, 1.00E-05, 1.00E-04, 1.00E-03, 2.00E-03, 1.00E-02,\n 1.00E-01, 1.00, 1.00E+01, 1.00E+02, 1.00E+03, 1.00E+04, 1.00E+05 ])\n gridTimeStart = time()\n numIteration = len(parameters['gamma']) * len(parameters['C'])\n iteration = 0\n meanTime = 0\n for gamma in parameters['gamma']:\n row = [gamma]\n for C in parameters['C']:\n print('C = %s \\t gamma = %s'%(C, gamma))\n timeStart = time()\n svc = OneVsRestClassifier(SVC(random_state=0, decision_function_shape='ovr',\n C=C, kernel='rbf', gamma=gamma), n_jobs=4)\n svc.fit(xTrain, yTrain)\n yTrue, yPred = yTest, svc.predict(xTest)\n yTrue = np.array(yTrue, dtype=np.unicode_)\n yPred = np.array(yPred, dtype=np.unicode_)\n correct = np.sum(yTrue == yPred)\n \n print(\"accuracy: %d/%d = \"%(correct, len(yTrue)),\n D('%.2f'%(correct/len(yTrue)*100)))\n row.append(D('%.2f'%(correct/len(yTrue)*100)))\n \n iterTime = time()-timeStart\n iteration = iteration + 1\n meanTime = meanTime * (iteration-1)/iteration + iterTime/iteration\n remainingTime = (numIteration-iteration)*meanTime\n print('--------------------------(%d sec)--remaining: %s'%\n (iterTime, str(timedelta(seconds=int(remainingTime)))))\n models.append(row)\n gridTime = time() - gridTimeStart\n gridTime = timedelta(seconds=int(gridTime))\n print('time: %s'%str(gridTime))\n print('saving file: %s.csv'%filename)\n with open('../csv/%s.csv'%filename, 'w') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerows(models)", "def setup(self):\n (self.X, self.Y) = load_iris(problem=\"partial_label_ranking\")", "def get_karate_club_data():\n\n # Edge list of Zachary's karate club.\n edge_list = [\n (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8),\n (0, 10), (0, 11), (0, 12), (0, 13), (0, 17), (0, 19), (0, 21), (0, 31),\n (1, 2), (1, 3), (1, 7), (1, 13), (1, 17), (1, 19), (1, 21), (1, 30),\n (2, 3), (2, 7), (2, 8), (2, 9), (2, 13), (2, 27), (2, 28), (2, 32),\n (3, 7), (3, 12), (3, 13), (4, 6), (4, 10), (5, 6), (5, 10), (5, 16),\n (6, 16), (8, 30), (8, 32), (8, 33), (9, 33), (13, 33), (14, 32), (14, 33),\n (15, 32), (15, 33), (18, 32), (18, 33), (19, 33), (20, 32), (20, 33),\n (22, 32), (22, 33), (23, 25), (23, 27), (23, 29), (23, 32), (23, 33),\n (24, 25), (24, 27), (24, 31), (25, 31), (26, 29), (26, 33), (27, 33),\n (28, 31), (28, 33), (29, 32), (29, 33), (30, 32), (30, 33), (31, 32),\n (31, 33), (32, 33)\n ]\n\n # Student-teacher assignment (before split) as in Zachary (1977).\n # Part-time karate instructor: Mr. Hi, node 0 (labeled as 0).\n # President: John A., node 33 (labeled as 1).\n node_labels = jnp.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0,\n 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n \n return create_graph_data(edge_list=edge_list, node_labels=node_labels)", "def generate_data(groups):\n # get path list for the intended classification problem\n input_paths = generate_input_list(groups) \n X_lst = []\n y = []\n for p in input_paths:\n dp = pd.read_csv(p, sep = '\\t') #datapoint\n # Normalization \n # norm = lambda x: (x - x.mean()) / x.std()\n # dp = dp.apply(norm)\n # Min-Max scaling \n #dp_norm = (dp - dp.min()) / (dp.max() - dp.min())\n #dp = dp_norm.values\n if dp.isnull().sum().sum()>0:\n# print(p, dp.isnull().sum().sum())\n continue\n dp = dp.drop(['time'], axis = 1) \n dp = dp.iloc[:1600:4]\n\n if dp.isnull().sum().sum()>0:\n# print('after norm',p, dp.isnull().sum().sum())\n continue\n dp = dp.values\n\n X_lst.append(dp)\n sample_y = get_target(p, text= True)\n y.append(sample_y)\n X = np.stack(X_lst, axis=0)\n \n # convert y into int 0 and 1\n encoder = LabelEncoder()\n encoder.fit(y)\n y = encoder.transform(y)\n y_dummy = y\n # convert y into one-hot encoding\n if len(groups)>2:\n y_dummy = pd.get_dummies(y)\n y_dummy = y_dummy.values\n return X, y , y_dummy", "def read_random_data_from_csv(\n file_name, training_set_size, unlabeled_set_size, holdout_set_size, validation_set_size):\n data = samp_file_to_arr(\n file_name, training_set_size + unlabeled_set_size + holdout_set_size + validation_set_size)\n y_raw = np.array([x[0] for x in data])\n x_all = np.array([x[1:] for x in data])\n # Now transform so that the lower label is -1, always. \n uq = np.unique(y_raw) # Assumed to be only two unique labels!\n y_all = np.zeros(len(y_raw))\n y_all[np.where(y_raw == uq[0])[0]] = -1\n y_all[np.where(y_raw == uq[1])[0]] = 1\n xtrhoval, x_unl, ytrhoval, y_unl = sklearn.model_selection.train_test_split(\n x_all, y_all, test_size=unlabeled_set_size)\n x_trho, x_validate, y_trte, y_validate = sklearn.model_selection.train_test_split(\n xtrhoval, ytrhoval, test_size=validation_set_size)\n x_train, x_out, y_train, y_out = sklearn.model_selection.train_test_split(\n x_trho, y_trte, test_size=holdout_set_size)\n return (x_train, y_train, x_unl, y_unl, x_out, y_out, x_validate, y_validate)", "def test_dataset_from_file(train_dataset):\n dummy = \"justo. Praesent luctus. Curabitur egestas nunc sed libero. Proin sed\"\n assert train_dataset[0][0] == dummy\n assert train_dataset[0][1] == '6'", "def load_yield_data():\n data = pd.read_csv(\"soybean_model_data_2017.csv\",dtype={'FIPS':str})\n \n data['soybean_percent'] = data['area']/data['land_area']\n \n # Add logical filter to the yield Data\n area_con = data['area'].notnull()\n data = data[area_con]\n \n # Add Rainfed yield\n # rainfed_con: counties without irrigation, the yield is rainfed\n rainfed_con = ~data['FIPS'].isin(data.loc[data['yield_irr'].notnull(),'FIPS'].unique())\n data['yield_rainfed'] = data['yield_noirr']\n data['area_rainfed'] = data['area_noirr']\n \n \n # For counties with irrigation, only the rainfed yield is added to irrigated yield\n data.loc[rainfed_con, 'yield_rainfed'] = data.loc[rainfed_con, 'yield']\n data.loc[rainfed_con, 'area_rainfed'] = data.loc[rainfed_con, 'area']\n\n # add growing season\n data['tave56789']= data.loc[:,'tave5':'tave9'].mean(axis=1)\n data['vpdave56789']= data.loc[:,'vpdave5':'vpdave8'].mean(axis=1)\n data['precip56789']= data.loc[:,'precip5':'precip9'].sum(axis=1)\n \n \n # Add z-score\n county_std = data.groupby('FIPS').std()['precip56789'].to_frame('precip_gs_std').reset_index()\n county_mean = data.groupby('FIPS').mean()['precip56789'].to_frame('precip_gs_mean').reset_index()\n \n data = data.merge(county_mean, on='FIPS').merge(county_std, on='FIPS')\n \n data['precip_gs_z'] = (data['precip56789'] - data['precip_gs_mean'])/data['precip_gs_std']\n\n # The 12 core states \n data_12 = data[data['State'].isin(data.loc[data['evi6'].notnull(),'State'].unique())]\n\n # Detrend yield\n global trend_rainfed, trend_irrigated, trend_all\n trend_rainfed = yield_trend(data_12, yield_type='rainfed')\n trend_irrigated = yield_trend(data_12, yield_type='irrigated')\n trend_all = yield_trend(data_12, yield_type='all')\n \n data_12.loc[:,'yield_ana'] = (data_12['yield'] - trend_all.predict(data_12[['year','yield']]))\n data_12.loc[:,'yield_rainfed_ana'] = (data_12['yield_rainfed'] - trend_rainfed.predict(data_12[['year','yield_rainfed']])) \n data_12.loc[:,'yield_irr_ana'] = (data_12['yield_irr'] - trend_irrigated.predict(data_12[['year','yield_irr']])) \n \n return data_12", "def parseDataUniform(line):\n if b'(' in line:\n return np.array([float(x) for x in line.split(b'(')[1].split(b')')[0].split()])\n return float(line.split(b'uniform')[1].split(b';')[0])", "def setup(measurement_uncertainty):\n\tdata = import_data('dataset.txt', measurement_uncertainty)\n\tdesign = get_design_matrix(data['x'])\n\tA = design / measurement_uncertainty\n\tlikelihood_fisher = get_likelihood_fisher_matrix(A)\n\tprior_fisher = get_prior_fisher_matrix()\n\tposterior_fisher = get_posterior_fisher_matrix(likelihood_fisher, prior_fisher)\n\tb = data['y'] / measurement_uncertainty\n\tmle = get_mle(likelihood_fisher, A, b)\n\tposterior_mean = get_posterior_mean(likelihood_fisher, posterior_fisher, mle)\n\n\tcovariance = np.linalg.inv(posterior_fisher)\n\n\tposterior_stats = {'fisher': posterior_fisher, 'mean': posterior_mean, 'covar': covariance}\n\n\treturn data, posterior_stats", "def random_forest_test_Data(strat_test_set):\n logging.info(\"Random forest.....\")\n X_test = strat_test_set.drop(\"median_house_value\", axis=1)\n y_test = strat_test_set[\"median_house_value\"].copy()\n X_test_num = X_test.drop(\"ocean_proximity\", axis=1)\n imputer = SimpleImputer(strategy=\"median\")\n imputer.fit(X_test_num)\n X_test_prepared = imputer.transform(X_test_num)\n X_test_prepared = pd.DataFrame(\n X_test_prepared, columns=X_test_num.columns, index=X_test.index\n )\n X_test_prepared = feature_eng2(X_test_prepared, X_test)\n return X_test_prepared, y_test", "def define_gender(name_input):\n if not os.path.isfile('train_set.txt') and not os.path.isfile('test_set'):\n \"\"\"\n We take a sample of male and female names and mix\n them in order to create a training set and testing set\n \"\"\"\n labeled_names = ([(name, 'male') for name in names.words('male.txt')] +\n [(name, 'female') for name in names.words(\n 'female.txt')])\n random.shuffle(labeled_names)\n\n \"\"\"\n We train the classifier and return the gender of the name\n \"\"\"\n featuresets = [(gender_features(n), gender) for (n, gender)\n in labeled_names]\n train_set, test_set = featuresets[-500:], featuresets[:500]\n classifier = nltk.NaiveBayesClassifier.train(train_set)\n with open('train_set.txt', 'wb') as handle:\n pickle.dump(train_set, handle)\n with open('test_set.txt', 'wb') as handle:\n pickle.dump(test_set, handle)\n with open('classifier.txt', 'wb') as handle:\n pickle.dump(classifier, handle)\n\n with open('train_set.txt', 'rb') as handle:\n train_set = pickle.load(handle)\n with open('test_set.txt', 'rb') as handle:\n test_set = pickle.load(handle)\n with open('classifier.txt', 'rb') as handle:\n classifier = pickle.load(handle)\n\n classifier = nltk.NaiveBayesClassifier.train(train_set)\n# accuracy = nltk.classify.accuracy(classifier, test_set)\n# classifier.show_most_informative_features(10)\n# print accuracy\n\n \"\"\"\n Accuracy: .804\n Most Informative Features\n last_letter = u'a' female : male = 44.0 : 1.0\n last_letter = u'd' male : female = 23.7 : 1.0\n last_two_letters = u'on' male : female = 11.0 : 1.0\n first_two_letters = u'ha' male : female = 7.8 : 1.0\n last_two_letters = u'ta' female : male = 7.0 : 1.0\n last_letter = u't' male : female = 6.7 : 1.0\n last_letter = u'o' male : female = 6.0 : 1.0\n last_two_letters = u'll' male : female = 4.7 : 1.0\n first_two_letters = u'te' male : female = 4.7 : 1.0\n last_two_letters = u'an' male : female = 4.1 : 1.0\n \"\"\"\n\n return classifier.classify(gender_features(name_input))", "def load_data(url: str, target_var: str) -> tuple:\n # link = \"http://archive.ics.uci.edu/ml/machine-learning-databases/00519/heart_failure_clinical_records_dataset.csv\"\n data = pd.read_csv(url)\n\n X = data.copy()\n y = X.pop(target_var)\n columns = X.columns\n return X.values, y.values, columns", "def test_from_inchi_name(self):\n mol = Molecule.from_inchi(\"InChI=1S/C2H6O/c1-2-3/h3H,2H2,1H3\")\n assert mol.name == \"\"\n mol = Molecule.from_inchi(\"InChI=1S/C2H6O/c1-2-3/h3H,2H2,1H3\", name=\"bob\")\n assert mol.name == \"bob\"", "def feature_engineering(data):\n ft, nt, pruefung, training, version, vt, zt = get_testposition(data[\"Testposition\"])\n HA, Self, HA_nt, HA_vt, HA_zt = get_HA(data[\"HA\"])\n wochentag, ist_schulzeit = get_datetime_fields()\n sex_m, sex_w = get_sex(data[\"Sex\"])\n jahredabei = get_jahre_dabei(data[\"UserID\"])\n beendet = get_beendet(data[\"beendet\"])\n klassenstufe = get_klassenstufe(data[\"Klassenstufe\"])\n\n dataset = [\n [\n data[\"UserID\"],\n data[\"UebungsID\"],\n data[\"satzID\"],\n data[\"Erstloesung\"],\n data[\"Schwierigkeit\"],\n data[\"Art\"],\n data[\"AufgabenID\"],\n wochentag,\n ist_schulzeit,\n data[\"MehrfachFalsch\"],\n ft,\n nt,\n pruefung,\n training,\n version,\n vt,\n zt,\n beendet,\n data[\"Fehler\"],\n HA,\n Self,\n HA_nt,\n HA_vt,\n HA_zt,\n klassenstufe,\n jahredabei,\n sex_m,\n sex_w,\n ]\n ]\n\n df = pd.DataFrame(\n dataset,\n columns=[\n \"UserID\",\n \"UebungsID\",\n \"satzID\",\n \"Erstloesung\",\n \"Schwierigkeit\",\n \"Art\",\n \"AufgabenID\",\n \"Wochentag\",\n \"ist_Schulzeit\",\n \"MehrfachFalsch\",\n \"Testposition__FT\",\n \"Testposition__nt\",\n \"Testposition__pruefung\",\n \"Testposition__training\",\n \"Testposition__version\",\n \"Testposition__vt\",\n \"Testposition__zt\",\n \"beendet\",\n \"Fehler\",\n \"HA__HA\",\n \"HA__Self\",\n \"HA__nt\",\n \"HA__vt\",\n \"HA__zt\",\n \"Klassenstufe\",\n \"Jahredabei\",\n \"Sex__m\",\n \"Sex__w\",\n ],\n )\n\n # merge data with historical data\n global df_hisotorical\n result = pd.merge(df, df_hisotorical, on=\"UserID\")\n result = result.drop(columns=[\"UserID\", \"UebungsID\", \"satzID\", \"AufgabenID\", \"Art\"])\n return result", "def extract_critic_input(self, data):\n return data[1]", "def get_dataset(dataset_name):\n if dataset_name == \"Iris\":\n data = datasets.load_iris()\n elif dataset_name == \"Breast Cancer\":\n data = datasets.load_breast_cancer()\n else:\n data = datasets.load_wine()\n\n X = data.data\n y = data.target\n return X, y", "def get_name():\n return \"SVMd+ - simplified approach\"", "def initializing():\n data = np.array(pd.read_csv('data.csv'))[:,1:]\n\n X = data[:,1:-1].astype(int)\n y = data[:,-1].astype(int)\n y_binary = (y == 1).astype(int)\n\n X_train, X_test, y_train, y_test = train_test_split(\n X, \n y_binary, \n test_size=0.25, \n )\n scaler = StandardScaler()\n X_train = scaler.fit_transform(X_train)\n X_test = scaler.transform(X_test)\n return (X_train, X_test, y_train, y_test, X, y_binary)", "def parse_dataset(self, data):\n pass", "def read_datasets(data_string):\n if type(data_string) is dict:\n features_file = data_string[\"features\"]\n target_file = data_string[\"meta\"]\n if data_string.get(\"target_col\"):\n target_col = data_string.get(\"target_col\")\n else:\n target_col = \"target\"\n if data_string.get(\"train_test_col\"):\n train_test_col = data_string.get(\"train_test_col\")\n else:\n train_test_col = \"group\"\n elif type(data_string) is tuple:\n features_file = data_string[0]\n target_file = data_string[1]\n target_col = \"target\"\n train_test_col = \"group\"\n\n else:\n raise Exception(\n \"Data has to be expressed in either a tuple (features,target) or dictionary {\\\"features\\\":\\\"your_features\\\",\" +\n \"\\\"target\\\":\\\"your_target\\\"\")\n # opening data\n data_directory = os.path.join(project_dir,\"data/processed/\")\n try:\n X = pd.read_csv(data_directory + features_file, index_col=0)\n y = pd.read_csv(data_directory + target_file, index_col=0, encoding=\"ISO-8859-1\")\n except FileNotFoundError:\n print(\"Files not in data/preprocessed, searching for them in the application's directory. You should run the\" +\n \" program from its directory: python program.py instead of python /somewhere/else/program.py\")\n X = pd.read_csv(features_file, index_col=0)\n y = pd.read_csv(target_file, index_col=0, encoding=\"ISO-8859-1\")\n except pd.errors.ParserError as e:\n print(\"Pandas seams to be unable to read this file. Make sure it's a csv\")\n raise e\n except UnicodeDecodeError as e:\n print(\"The encoding of either the features or the targets is not encoded using UTF-8 or ISO-8859-1\")\n raise e\n # Check to see if columns exist and return them\n target_col = checking_columns(y, target_col, x=target_col)\n\n # Get group column\n train_test_col = checking_columns(y, train_test_col, x=train_test_col, handle=lambda x: target_col)\n\n return features_file, target_file, X, y, target_col, train_test_col", "def load_unicef_data():\n fname = 'SOWC_combined_simple.csv'\n\n # Uses pandas to help with string-NaN-numeric data.\n data = pd.read_csv(fname, na_values='_')\n # Strip countries title from feature names.\n features = data.axes[1][1:]\n # Separate country names from feature values.\n countries = data.values[:,0]\n values = data.values[:,1:]\n # Convert to numpy matrix for real.\n values = np.asmatrix(values,dtype='float64')\n\n # Modify NaN values (missing values).\n mean_vals = np.nanmean(values, axis=0)\n inds = np.where(np.isnan(values))\n values[inds] = np.take(mean_vals, inds[1])\n return (countries, features, values)", "def main():\n df_titanic = pd.read_csv('train.csv', header=None)\n print df_titanic.describe()", "def baseline(x_data, y_data, stra = \"uniform\"):\r\n x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.2)\r\n dummy = DummyClassifier(strategy= stra)\r\n dummy.fit(x_train, y_train)\r\n y_pred = dummy.predict(x_test)\r\n accu = accuracy_score(y_test, y_pred)\r\n return accu", "def iris_data():\n X, y = load_iris()['data'], load_iris()['target']\n y[y == 2.] = 0 # N.B. make binary, TODO simulate a competition dataset\n return BasicExamplesProvider(X, y)", "def _read_txt(self, expected_col_names):\n\n try:\n # Read data\n data = pd.read_csv(self.source)\n\n # Check number of columns\n if data.shape[1] != len(expected_col_names):\n raise ValueError(\n \"Unexpected number of columns. Expected {}.\".format(\n len(expected_col_names)))\n # Check column names\n for item in data.columns:\n if item not in expected_col_names:\n raise ValueError(\"Unexpected column name. Expected:{}\"\\\n .format(expected_col_names))\n\n # Convert data\n for column in data.columns:\n data[column] = pd.to_numeric(data[column])\n\n # Generate output\n if self.coordinate_system == CoordinateSystem.GEOGRAPHIC:\n def generate_utm(row):\n return UtmCoordinate.create_from_geographic(\n row['latitude'],\n row['longitude'],\n row['elevation'])\n data['UTM'] = data.apply(generate_utm, axis=1)\n data['easting'] = data.apply(lambda row: row['UTM'].easting,\n axis=1)\n data['northing'] = data.apply(lambda row: row['UTM'].northing,\n axis=1)\n data['x'] = data['easting'] - data['easting'].min()\n data['y'] = data['northing'] - data['northing'].min()\n data['z'] = data['elevation'] - data['elevation'].min()\n\n elif self.coordinate_system == CoordinateSystem.UTM:\n data['x'] = data['easting'] - data['easting'].min()\n data['y'] = data['northing'] - data['northing'].min()\n data['z'] = data['elevation'] - data['elevation'].min()\n\n elif self.coordinate_system == CoordinateSystem.CARTESIAN:\n data['elevation'] = data['z'] # keeping return values consitent\n data['z'] = data['elevation'] - data['elevation'].min()\n\n else:\n raise ValueError('Unknown coordinate system.')\n\n selection = ['x', 'y', 'z', 'elevation']\n return data[selection]\n except Exception as exception:\n raise exception", "def test_excalibur_name():\n assert I07Nexus.excalibur_detector_2021 == \"excroi\"\n assert I07Nexus.excalibur_04_2022 == \"exr\"", "def _read_samples(self):\n\n logging.debug(\"Start file parsing.\")\n data = pd.read_csv(self._source_file, header=None)\n \n data = pd.read_csv(self._source_file, header=None)\n header = pd.read_csv(self._header_file, delimiter=':', skiprows=1, header=None)\n header.columns = ['column', 'column_type']\n\n data.columns = header.column.tolist() + ['attack']\n data['attack'] = data['attack'].str.replace('.', '')\n data['label'] = 1\n data.loc[data['attack'] == 'normal', 'label'] = 0\n\n symbolic_columns = header.loc[header.column_type == ' symbolic.'].column.tolist()\n # print(symbolic_columns)\n\n for scol in symbolic_columns:\n data[scol] = pd.Categorical(data[scol])\n one_hot_cols = pd.get_dummies(data[scol], prefix=scol)\n data = pd.concat([data, one_hot_cols], axis=1)\n\n data = data.drop(columns=symbolic_columns)\n data = data.drop(columns=['attack'])\n\n # data.loc[data.attack != 'normal' , ['attack', 'label']].head(20)\n\n data_normal = data.loc[data['label'] == 0]\n data_abnormal = data.loc[data['label'] == 1]\n\n data_normal_train = data_normal.sample(frac=0.7)\n data_normal_test = data_normal.loc[~data_normal.index.isin(data_normal_train.index)]\n\n data_normal_train = data_normal_train.drop(columns=['label']).values\n data_normal_test = data_normal_test.drop(columns=['label']).values\n data_abnormal = data_abnormal.drop(columns=['label']).values\n \n scaler = MinMaxScaler()\n _ = scaler.fit(data_normal_train)\n data_normal_train = scaler.transform(data_normal_train)\n data_normal_test = scaler.transform(data_normal_test)\n data_abnormal = scaler.transform(data_abnormal)\n \n logging.debug('Normal {}; Train {}; Test{}'.format(data_normal.shape, data_normal_train.shape, data_normal_test.shape))\n logging.debug('Abnormal {}'.format(data_abnormal.shape))\n\n samples = {}\n samples['NORMAL'] = data_normal_train\n samples['NORMAL_TEST'] = data_normal_test\n samples['ABNORMAL_TEST'] = data_abnormal\n\n logging.debug(\"End file parsing.\")\n\n return samples", "def split_data(name, is_train = True):\r\n data = pd.read_csv(name, header = 0, encoding = 'ISO-8859-1')\r\n X = data['text']\r\n if is_train:\r\n Y = data['polarity']\r\n return X, Y\r\n return X", "def load_data():\n print(\"PARSING TRAIN\")\n ys_train, x_train, ids_train = load_pickle_data(\"ys_train\"), load_pickle_data(\"x_train\"), load_pickle_data(\n \"ids_train\")\n if ys_train is None or x_train is None or ids_train is None:\n ys_train, x_train, ids_train = load_csv_data(\"{}/train.csv\".format(DATA_DIR))\n dump_pickle_data(ys_train, \"ys_train\")\n dump_pickle_data(x_train, \"x_train\")\n dump_pickle_data(ids_train, \"ids_train\")\n\n print(\"PARSING TEST\")\n x_test, ids_test = load_pickle_data(\"x_test\"), load_pickle_data(\"ids_test\")\n if x_test is None or ids_test is None:\n _, x_test, ids_test = load_csv_data(\"{}/test.csv\".format(DATA_DIR))\n dump_pickle_data(x_test, \"x_test\")\n dump_pickle_data(ids_test, \"ids_test\")\n\n return ys_train, x_train, ids_train, x_test, ids_test", "def produce_init(filename):\n training_dataset = pd.read_csv(f'../Modified Data/{filename}')\n test_dataset = pd.read_csv(f'../Raw Data/test.csv')\n features = list(training_dataset.columns)\n features.remove('SalePrice')\n predict_feature = ['SalePrice']\n\n # Produce Test Data\n test_X = test_dataset.loc[:, features]\n ids_test = test_dataset.loc[:, 'Id']\n\n for column in features:\n if str(training_dataset.loc[:, column].dtype) == 'object':\n # Initialize encoder\n labelencoder = LabelEncoder()\n # Encode Train Data\n training_dataset.loc[:, column] = training_dataset.loc[:, column].fillna('Missing')\n training_dataset.loc[:, column] = pd.Series(labelencoder.fit_transform(training_dataset.loc[:, column]))\n # Encode Test Data\n test_X.loc[:, column] = test_X.loc[:, column].fillna('Missing')\n test_X.loc[:, column] = pd.Series(labelencoder.fit_transform(test_X.loc[:, column]))\n else:\n # Fix missing values for train data\n training_dataset.loc[:, column] = training_dataset.loc[:, column].fillna(int(training_dataset.loc[:, column].mean()))\n # Fix missing values for test data\n test_X.loc[:, column] = test_X.loc[:, column].fillna(int(test_X.loc[:, column].mean()))\n\n return training_dataset, test_X, ids_test", "def setUp(self):\r\n # Single sample, 6 observations, one of which isn't observed in sample.\r\n self.biom_table1 = parse_biom_table(biom_table_str1)\r\n self.estimator1 = ObservationRichnessEstimator(self.biom_table1,\r\n Chao1MultinomialPointEstimator)", "def read_iris_data():\n\n # Tomamos los datos del dataset\n # Esta es la parte en la que copio codigo de la fuente mencionada\n iris_dataset = datasets.load_iris()\n\n # Separamos caracteristicas de las clases\n data = iris_dataset.data\n classes = iris_dataset.target\n feature_names = iris_dataset.feature_names # Para saber el nombre de las caracteristicas\n target_names = iris_dataset.target_names # Los nombres de las flores que consideramos:\n # Son los nombres de las clases\n\n # Nos quedamos solo con la primera y tercera caracteristica que corresponden\n # a los indices 0 y 2\n data = [data[indx][0:3:2] for indx in range(len(data))]\n\n # Del mismo modo solo me quedo con los nombres de las caracteristicas con\n # las que me quedo en el paso anterior\n feature_names = [feature_names[0], feature_names[1]]\n\n return data, classes, feature_names, target_names", "def load_crime():\n\n # LOAD DATA FROM FILE.\n # filename = \"resources\\CommViolPredUnnormalizedData.csv\"\n filename = os.path.join('resources', 'CommViolPredUnnormalizedData.csv')\n data = pd.read_csv(filename, header=0, sep=';', na_values='?', skipinitialspace=True)\n data = data.sample(frac=1, random_state=42)\n\n targets = ['violentPerPop']\n pfeatures = ['race']\n\n # Drop rows with no associated attribute to be predicted.\n dataset = data.dropna(subset=targets, axis=0).reset_index(drop=True)\n\n # Keep only features that have more than 95% of points with associated value.\n features_to_drop = list()\n n_points = len(dataset)\n acc_rate = 0.95\n\n for c in dataset.columns:\n tot_values = np.sum(dataset[c].isna())\n if tot_values >= (1 - acc_rate) * n_points:\n features_to_drop.append(c)\n\n dataset = dataset.drop(features_to_drop, axis=1)\n\n # Remove features that are either correlated with the target or useless.\n feat_to_remove = [\n 'fold',\n 'communityname',\n 'state',\n 'murders',\n 'murdPerPop',\n 'rapes',\n 'rapesPerPop',\n 'robberies',\n 'robbbPerPop',\n 'assaults',\n 'assaultPerPop',\n 'burglaries',\n 'burglPerPop',\n 'larcenies',\n 'larcPerPop',\n 'autoTheft',\n 'autoTheftPerPop',\n 'arsons',\n 'arsonsPerPop',\n 'nonViolPerPop'\n ]\n\n feat_to_remove += targets + pfeatures\n\n # Prepare the feature dataset.\n features = [f for f in dataset.columns if f not in feat_to_remove]\n dataset = dataset[features + pfeatures + targets]\n\n # Last check on Nan values.\n dataset = dataset.dropna(axis=0).reset_index(drop=True)\n\n # Force all types to float.\n for c in dataset.columns:\n dataset[c] = dataset[c].astype(float)\n\n # Features selection.\n top_features = utils.get_top_features(dataset[features], dataset[targets], n=15)\n\n for pfeat in pfeatures:\n if pfeat in top_features:\n print(\"Protected feature \" + pfeat + \" in top features!\")\n\n x, xp, y = dataset[top_features].values, dataset[pfeatures].values, dataset[targets].values\n\n return x, xp, y", "def __init__(self, model_name_or_path, max_length=1024, device='cuda:0', cache_dir=None):\n self.scorer = UniEvaluator(\n model_name_or_path='MingZhong/unieval-sum' if model_name_or_path == \"\" else model_name_or_path,\n max_length=max_length,\n device=device,\n cache_dir=cache_dir)\n self.task = 'data2text'\n self.dimensions = ['naturalness', 'informativeness']", "def fetchAndCleanDataframe(self):\n\n df = pd.read_csv('/Users/apple4u/Desktop/goksel tez/results_with_scenarios.csv')\n df.insider_label.fillna(0, inplace=True) # replaces null fields with 0\n df = df.drop(columns=['employee_name', 'scenario', 'role'])\n df = df.rename(columns={'insider_label':'label'})\n #df['label'] = df['insider_label'].astype('int64')\n #df.drop(columns='insider_label', inplace=True)\n df.set_index('user_id', inplace=True)\n X = df.iloc[:, :5].values #fetch all records first 5 columns\n y = df.label.values\n print(df.head())\n return X, y", "def get_test_data():\n\n # test set\n test = pd.read_csv(\"test.csv\")\n\n return test", "def fixture_name(self):\n return \"coding_dna_substitution\"", "def test_intro_model_n_amd():\n prep = DataPrep(filepath='/home/ubuntu/ca_bills_project/data/extra/intro_data_w_content_5_22.csv')\n n=100\n prep.prepare(n_components=n, use_cached_tfidf='/home/ubuntu/ca_bills_project/data/extra/cached_tfidf_real_05-23-17-05-28.pkl')\n features = [\n\n u'days_since_start',\n u'vote_required',\n u'nterms', u'success_rate',\n u'n_amd', u'session_type',\n u'party_ALL_DEM', u'party_ALL_REP',\n u'party_BOTH', u'party_COM',\n u'urgency_No', u'urgency_Yes',\n u'appropriation_No', u'appropriation_Yes',\n u'taxlevy_No', u'taxlevy_Yes',\n u'fiscal_committee_No', u'fiscal_committee_Yes']\n topic_features = [\"topic_\"+str(k) for k in range(n)]\n features += topic_features\n X_train, y_train = prep.subset(features, dep_var='n_amd')\n\n baseline = DummyRegressor()\n\n gb = GradientBoostingRegressor()\n\n mc = ModelChooser([baseline, gb])\n mc.fit_predict(X_train, y_train, regressor=True)\n mc.print_results(regressor=True)", "def get_dataset(dataset_name):\n if dataset_name == \"Iris\":\n data = datasets.load_iris()\n\n elif dataset_name == \"Breast Cancer\":\n data = datasets.load_breast_cancer()\n\n elif dataset_name == \"Wine Dataset\":\n data = datasets.load_wine()\n\n elif dataset_name == \"MNIST\":\n data = datasets.load_digits()\n\n #elif dataset_name == \"Boston Housing Price\":\n # data = datasets.load_boston()\n\n X = data.data\n y = data.target\n\n return X, y", "def parse_data_uniform(line):\n if b'(' in line:\n return np.array([float(x) for x in line.split(b'(')[1].split(b')')[0].split()])\n return float(line.split(b'uniform')[1].split(b';')[0])", "def create_dataset():\n x_old, y_old = clean_scores_version1()\n\n # delete duplicates\n x_old = np.unique(x_old, axis=0)\n\n file = open('/Users/kira/Desktop/uni/Connect4/agents/agent_supervised_ml/unlabeled2.txt', \"a\")\n\n for row in x_old:\n string = ''\n move_seq = row[row != 0]\n for move in move_seq:\n string = string + str(move)\n for i in range(1, 8):\n file.write(string + str(i) + '\\n')\n\n file.close()", "def esm1v_t33_650M_UR90S_2():\n return load_model_and_alphabet_hub(\"esm1v_t33_650M_UR90S_2\")", "def prepare_data():\n #data, label = load_ta_data(), load_ta_target()\n data, label = load_own_data(), load_own_target()\n tra_x, tst_x = split_samples(data)\n tra_y, tst_y = split_samples(label)\n return (tra_x, tst_x, tra_y, tst_y)", "def load_dataset_test():\n df_test = load_csv_file(\"31_test.csv\")\n return df_test.values", "def prepare_data_test(fname):\n # Read data\n data = pd.read_csv(fname)\n return data", "def load_cup_data(train=True):\n type = \"TR\" if train else \"TS\"\n csv_file = path_data / Path(f\"ML_CUP/ML-CUP20-{type}.csv\")\n return pd.read_csv(csv_file, skiprows=7, header=None, index_col=0)", "def generate_data(self,seed):\n X, y = make_classification( n_samples = 250, random_state = seed )\n # Add bias term\n X = np.concatenate( ( np.ones( ( 250, 1 ) ), X ), axis = 1 )\n self.X_train, self.X_test, self.y_train, self.y_test = train_test_split( \n X, y, test_size = 50, random_state = seed )", "def load_data(label_name='Species'):\n\n # Create a local copy of the training set.\n train_path = tf.keras.utils.get_file(fname=TRAIN_URL.split('/')[-1],\n origin=TRAIN_URL)\n # train_path now holds the pathname: (训练集和测试集路径) ~/.keras/datasets/iris_training.csv\n\n # Parse the local CSV file.(解析)\n train = pd.read_csv(filepath_or_buffer=train_path,\n names=CSV_COLUMN_NAMES, # list of column names\n header=0 # ignore the first row of the CSV file.\n )\n # train now holds a pandas DataFrame, which is data structure\n # analogous to a table.\n\n # 1. Assign the DataFrame's labels (the right-most column) to train_label.\n # 2. Delete (pop) the labels from the DataFrame.\n # 3. Assign the remainder of the DataFrame to train_features\n print(\"-\")\n train_features, train_label = train, train.pop(label_name)\n\n # Apply the preceding logic to the test set.\n test_path = tf.keras.utils.get_file(TEST_URL.split('/')[-1], TEST_URL)\n test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)\n test_features, test_label = test, test.pop(label_name)\n\n # Return four DataFrames.\n return (train_features, train_label), (test_features, test_label)", "def prep_data(df):\n y = df.target\n X = df.drop([\"target\"], axis=1)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n return X_train, X_test, y_train, y_test", "def load_seed(self) -> np.ndarray:\n return np.loadtxt(CONFIG_DIR / self.name_seed).view(complex).reshape(-1, 1)", "def get_train_data():\n # train set\n train = pd.read_csv(\"train.csv\")\n\n return train", "def create_random_human_identity_from_dataset(self):\n # Set the identity seed. this is used to sample the indentity that generates\n # the human gender, texture, and body shape\n identity_rng = np.random.RandomState(randint(1, 1000))\n # Collecting Humanav dataset\n dataset = HumanAppearance.dataset\n if(dataset is None):\n print('\\033[31m', \"ERROR: can't find Surreal Dataset\", '\\033[0m')\n exit(1) # Failure condition\n # Using the SBPD dataset to generate a random gender, texture, and body shape\n human_gender, human_texture, body_shape = \\\n dataset.get_random_human_gender_texture_and_body_shape(\n identity_rng)\n return human_gender, human_texture, body_shape", "def test_dataset_autogen(autogen_dataset):\n train_dummy = \"eget, venenatis a, magna. Lorem ipsum dolor sit amet, consectetuer\"\n val_dummy = \"leo. Vivamus nibh dolor, nonummy ac, feugiat non, lobortis quis,\"\n test_dummy = \"turpis egestas. Aliquam fringilla cursus purus. Nullam scelerisque neque sed\"\n\n assert autogen_dataset.train[0][0] == train_dummy\n assert autogen_dataset.train[0][1] == '8'\n assert len(autogen_dataset.train) == 64\n\n assert autogen_dataset.val[0][0] == val_dummy\n assert autogen_dataset.val[0][1] == '1'\n assert len(autogen_dataset.val) == 16\n\n assert autogen_dataset.test[0][0] == test_dummy\n assert autogen_dataset.test[0][1] == '6'\n assert len(autogen_dataset.test) == 20" ]
[ "0.6082497", "0.5352037", "0.5263292", "0.5021193", "0.49601898", "0.48798177", "0.4859456", "0.48388806", "0.48280886", "0.48230565", "0.48205665", "0.4811698", "0.4808677", "0.4778711", "0.477406", "0.4759555", "0.47592923", "0.47398236", "0.47390524", "0.4733845", "0.47287226", "0.46976715", "0.468585", "0.46762756", "0.46732804", "0.46732804", "0.46732804", "0.46675482", "0.46493044", "0.46306705", "0.46248496", "0.4623197", "0.4617823", "0.46141237", "0.46132454", "0.46007624", "0.45884776", "0.4587554", "0.4586943", "0.45773402", "0.4571271", "0.45499396", "0.45422903", "0.4539875", "0.45366368", "0.45321104", "0.45316443", "0.45254108", "0.45239326", "0.45219713", "0.45142153", "0.4514057", "0.4510401", "0.45056033", "0.4503797", "0.44974226", "0.44911733", "0.44893008", "0.44784573", "0.44744673", "0.44706452", "0.44705704", "0.44581494", "0.44528955", "0.4444527", "0.4444217", "0.44430792", "0.4442398", "0.44420296", "0.44414136", "0.44335654", "0.44325536", "0.44291747", "0.44254175", "0.44197193", "0.44181392", "0.441609", "0.44153017", "0.44140187", "0.44007158", "0.43995535", "0.4396111", "0.4394548", "0.43938193", "0.4389231", "0.43891245", "0.43877634", "0.43874934", "0.43805286", "0.43797562", "0.43764937", "0.4372705", "0.43679857", "0.4366206", "0.43641385", "0.43610975", "0.43592668", "0.43526947", "0.4352457", "0.4352281" ]
0.730633
0
Converts a string of text into a numerical vector of features based on the word embedding LTM.
def vectorize(self,text): lv_active = set() words = word_tokenize(text) for word in words: if word in self.tree: ancestors = self.tree.word_ancestors(word) lv_active.update(ancestors) return self.nl.isin(lv_active).values
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_vector(text, model, idf, is_tokenized=False):\n if not is_tokenized: text= text.split() # splits the text by space and returns a list of words\n vec = np.zeros(300) # creates an empty vector of 300 dimensions\n for word in text: # iterates over the sentence\n if (word in model) & (word in idf): # checks if the word is both in the word embedding and the tf-idf model\n vec += model[word]*idf[word] # adds every word embedding to the vector\n if np.linalg.norm(vec) > 0:\n return vec / np.linalg.norm(vec) # divides the vector by their normal\n else:\n return vec", "def to_vector(texto,model,idf):\n tokens = normalizer(texto).split() # splits the text by space and returns a list of words\n vec = np.zeros(300) # creates an empty vector of 300 dimensions\n for word in tokens: # iterates over the sentence\n if (word in model) & (word in idf): # checks if the word is both in the word embedding and the tf-idf model\n vec += model[word]*idf[word] # adds every word embedding to the vector\n if np.linalg.norm(vec) > 0:\n return vec / np.linalg.norm(vec) # divides the vector by their normal\n else:\n return vec", "def tokenize(text):\n tokens = nltk.word_tokenize(text)\n lemmatizer = nltk.WordNetLemmatizer()\n \n lemmatized_words = []\n for word in tokens:\n lemmatized_words.append(lemmatizer.lemmatize(word).lower().strip())\n \n return lemmatized_words", "def txt2vec(self, text: str) -> List[int]:\n itr: List[int] = []\n for token in self.tokenize(str(text)):\n itr.append(self._word_lookup(token))\n return itr", "def lemitization(text_vector):\n\n text_vector = postag_doc(text_vector)\n global lemmatizer\n tokenised_document = [lemmatizer.lemmatize(word, pos=map_postags(\n postag)) for word, postag in text_vector]\n return tokenised_document", "def get_text_features(text, word_features):\n words = word_tokenize(text)\n features = {}\n for w in word_features:\n features[w] = (w in words)\n\n return features", "def tokenize(text):\n \n text.lower() # convert to lowercase\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text) #remove punctuation\n words = word_tokenize(text) # tokenize by individual word\n words = [w for w in words if w not in stopwords.words(\"english\")] #remove stop words\n lemmed = [WordNetLemmatizer().lemmatize(w) for w in words] #lemminization\n \n return words", "def tokenize(text):\n\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n return [lemmatizer.lemmatize(token).lower().strip() for token in tokens]", "def tokenize(text):\n tokens = word_tokenize(text)\n words = [token for token in tokens if re.match(\"[a-zA-Z0-9]\", token)]\n no_stopwords = [word for word in words if word not in stopwords.words(\"english\")]\n lowercase_words = [word.lower() for word in no_stopwords]\n pos_tagged_words = pos_tag(lowercase_words)\n lemmatized_words = [WordNetLemmatizer().lemmatize(word, pos=convert_pos_tag(pos)) for word, pos in pos_tagged_words]\n return lemmatized_words", "def get_text_features() -> np.array:\r\n # Universal sentence encoder model\r\n # Original model by Google could be loaded from: https://tfhub.dev/google/universal-sentence-encoder/4\r\n # In this notebook the model is loaded from a public dataset on Kaggle\r\n # at https://www.kaggle.com/dimitreoliveira/universalsentenceencodermodels\r\n text_model = tf.keras.Sequential(\r\n [KerasLayer(txt_model_path, input_shape=[], dtype=tf.string, # Pretrained model\r\n output_shape=[512], trainable=False),\r\n tf.keras.layers.Layer(512, dtype='float16')] # This layer reduces precision of float numbers\r\n )\r\n\r\n # Convert all texts to vectors\r\n features = text_model.predict(data['title'],\r\n batch_size=BATCH_SIZE,\r\n use_multiprocessing=True,\r\n workers=-1)\r\n print('Text features extracted. Shape:', features.shape)\r\n\r\n return features", "def txt2vectors(self, txt, is_html):\n words = txt2words(txt)\n words = [w for w in words if w in self._model]\n if len(words) != 0:\n for w in words:\n yield self._model[w]", "def get_dataset_features(text):\n return model.extract(text)", "def normalize_func(text: str) -> List[str]:\n tokens = nltk.word_tokenize(text) # need to be consistent with the basic tokenize used in other functions\n return [lemmatizer.lemmatize(w.lower(), get_wordnet_pos(w.lower())) for w in tokens]", "def review_to_vec(words, model, num_features , index2word_set):\n \n feature_vec = np.zeros((num_features), dtype=\"float32\")\n word_count = 0\n \n \n \n for word in words:\n if word in index2word_set: \n word_count += 1\n feature_vec += model[word]\n\n if word_count == 0:\n word_count = 1\n\n feature_vec /= word_count\n\n return feature_vec", "def pre_process(text):\n # replace (,.'\") with ''\n text = text.replace(',', '')\n text = text.replace('.', '')\n text = text.replace(\"'\", '')\n text = text.replace(\"\\\"\", '')\n\n # tokenize into words\n tokens = [word for sent in sent_tokenize(text) for word in word_tokenize(sent)]\n\n # remove stopwords\n stop = stopwords.words('english')\n tokens = [token for token in tokens if token not in stop]\n\n # remove words less than three letters\n tokens = [word for word in tokens if len(word) >= 3]\n\n # lower capitalization\n tokens = [word.lower() for word in tokens]\n\n # lemmatize\n lmtzr = WordNetLemmatizer()\n tokens = [lmtzr.lemmatize(word) for word in tokens]\n\n return tokens", "def parse(self, text):\n return self.dict.txt2vec(text)", "def tokenize(text):\n #Clean data, remove all character except character and number,such as punctuation etc.\n text = re.sub(r'[^a-zA-Z0-9]', ' ', text.lower())\n tokens = word_tokenize(text)\n tokens = [WordNetLemmatizer().lemmatize(word) for word in tokens if word not in ST_english]\n return tokens", "def lstm_infer_vector(lstm_model, txt, stopwords,word_indices, maxlen=10, taillemax=300) :\n \n txt_prep = gensim.utils.simple_preprocess(txt, deacc=True)\n txt_wo_uw = remove_unknown_words(txt_prep, word_indices)\n txt_wo_ws = remove_stopwords(txt_wo_uw, stopwords)\n \n if len(txt_wo_ws)>taillemax:\n sentence = txt_wo_ws[-taillemax:]\n \n if len(txt_wo_ws)<maxlen :\n #cas du texte trop court\n sentence = txt_wo_ws\n X = np.zeros((1, maxlen, len(word_indices)), dtype=np.bool)\n y = np.zeros((1, len(word_indices)), dtype=np.bool)\n for t, word in enumerate(sentence):\n X[0, t, word_indices[word]] = 1\n preds = lstm_model.predict(X, verbose=0)[0]\n else :\n \n for current_part in range(len(txt_wo_ws)/maxlen):\n sentence = txt_wo_ws[current_part*maxlen:(current_part+1)*maxlen]\n X = np.zeros((1, maxlen, len(word_indices)), dtype=np.bool)\n y = np.zeros((1, len(word_indices)), dtype=np.bool)\n for t, word in enumerate(sentence):\n X[0, t, word_indices[word]] = 1\n preds = lstm_model.predict(X, verbose=0)[0]\n \n\n return preds", "def doc2vec(self, text: str) -> np.array:\n # tfidf_matrix = self.tfidf.transform([text])\n # vectors = []\n # for token in self.tokenize(text):\n # if token in self.word2vec and token in self.feature_names:\n # tfidf_score = tfidf_matrix[0, self.feature_names.index(token)]\n # vectors.append(self.word2vec[token] * tfidf_score)\n vectors = [self.word2vec[token] for token in self.tokenize(text) if token in self.word2vec]\n if not vectors:\n return np.zeros(300)\n return np.mean(vectors, axis=0)", "def transform(self, strings):\n\n logger.debug(\"Converting {} strings into lists of \"\n \"sentences.\".format(len(strings)))\n\n tokenized_strings = []\n for text in strings:\n tokenized_strings.append(text_to_wordlist(text, remove_stopwords=True))\n\n # Pre-allocate a 2D numpy array, for speed\n feature_vecs = np.zeros((len(tokenized_strings), self.num_features),\n dtype=\"float32\")\n\n # Loop through the strings\n for counter, word_list in enumerate(tokenized_strings):\n\n # Call the function (defined above) that makes average feature vectors\n feature_vecs[counter] = self._make_feature_vec(word_list)\n\n # For DEBUG only\n if np.isnan(feature_vecs[counter][0]):\n import ipdb;ipdb.set_trace()\n\n\n return feature_vecs", "def lem_and_stem(text, stopwords):\n\n\tlemmatizer = WordNetLemmatizer()\n\tstemmer = PorterStemmer()\n\tprocessed_text = []\n\tfor token, pos in text:\n\t\tpos = map_pos_tag(pos)\n\t\tif not (pos == wn.NOUN):\n\t\t\tcontinue\n\t\tif token not in stopwords and len(token) > 3:\n\t\t\tprocessed_token = stemmer.stem(lemmatizer.lemmatize(token, pos=pos))\n\t\t\tif processed_token not in stopwords:\n\t\t\t\tprocessed_text.append(processed_token)\n\treturn processed_text", "def word_to_vector_list(self, word, numeric=False, xsampa=False, normalize=True):\n if xsampa:\n word = self.xsampa.convert(word)\n segs = self.word_fts(word, normalize or xsampa)\n if numeric:\n tensor = [x.numeric() for x in segs]\n else:\n tensor = [x.strings() for x in segs]\n return tensor", "def tokenize(text):\n tokens=word_tokenize(text)\n lemmatizer=WordNetLemmatizer()\n \n clean_tokens=[]\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n return clean_tokens\n pass", "def tokenize(text):\n\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n \n clean_tokens =[]\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "def makeFeatureVec(words, model, num_features):\n featureVec = np.zeros((num_features,),dtype=\"float32\")\n num_words = 0.\n index2word_set = set(model.wv.index2word)\n for word in words:\n if word in index2word_set:\n num_words += 1\n featureVec = np.add(featureVec,model[word]) \n featureVec = np.divide(featureVec,num_words)\n return featureVec", "def __call__(self, text):\r\n if self.use_pos_tagging:\r\n return [self.wnl.lemmatize(t, self.pos(t)) for t in word_tokenize(self.clean(text))]\r\n else:\r\n return [self.wnl.lemmatize(t) for t in word_tokenize(self.clean(text))]", "def lemmatize(text):\n\n lem = WordNetLemmatizer()\n return ' '.join(list(map(lambda x: lem.lemmatize(x, 'v'),\n text.split())))", "def text_to_vecs(self):\n # convert word strings into word vectors\n sent_vec = []\n for w in self.sentence:\n if w in self.word_vectors.getVocab():\n sent_vec.append( self.word_vectors.getWordVectors()[w] )\n else:\n sent_vec.append( self.word_vectors.getOOVWordVector() )\n \n assert(len(self.sentence) == len(sent_vec)) \n self.sent_vec = sent_vec", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for token in tokens:\n clean_token = lemmatizer.lemmatize(token).lower().strip()\n clean_tokens.append(clean_token)\n \n return clean_tokens", "def preprocess_lines(movie_line):\n\ttokens = tokenizer.tokenize(movie_line)\n\twords = [word for word in tokens if word not in stopwords_set]\n\tstemmed_terms = [porter_stemmer.stem(word) for word in words]\n\tlemmatized_terms = [wordnet_lemmatizer.lemmatize(word) for word in stemmed_terms]\n\treturn lemmatized_terms", "def _make_feature_vec(self, word_list):\n\n # Pre-initialize an empty numpy array (for speed)\n feature_vec = np.zeros((self.num_features,), dtype=\"float32\")\n\n # index2word is a list that contains the names of the words in\n # the model's vocabulary. Convert it to a set, for speed.\n index2word_set = set(self.w2v_model.index2word)\n\n # Loop over each word in the word_list and, if it is in the model's\n # vocabulary, add its feature vector to the total\n nwords = 0\n for word in word_list:\n # NOTE: Careful there, if all words are in caps in the article,\n # this function will return nan values and blow up the forest.\n word = word.lower()\n if word in index2word_set:\n nwords += 1\n feature_vec = np.add(feature_vec, self.w2v_model[word])\n\n # Divide the result by the number of words to get the average\n feature_vec = np.divide(feature_vec, nwords)\n return feature_vec", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n return clean_tokens", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n \n return clean_tokens", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for token in tokens:\n clean_token = lemmatizer.lemmatize(token).lower().strip()\n clean_tokens.append(clean_token)\n\n return clean_tokens", "def test_lemmatization():\n normalizer = TextNormalizer(stopwords=False, lemmatize=True)\n X = normalizer.transform([[\"start running better old friend\"]])\n assert X[\"corpus\"][0] == [\"start\", \"run\", \"well\", \"old\", \"friend\"]", "def feature_extraction(inputFile, text, label):\r\n df = pd.read_csv(inputFile, encoding=\"utf8\")\r\n df[text].replace(np.nan, '', inplace=True)\r\n for idx, line in df.iterrows():\r\n try:\r\n words = line[text]\r\n newWords = ''.join(words.split())\r\n df.set_value(idx, text, newWords)\r\n except:\r\n pass\r\n tf = TfidfVectorizer(analyzer='char', encoding=\"utf8\", min_df=10)\r\n\r\n x = tf.fit_transform(df[text])\r\n x = x.toarray()\r\n print(x.shape)\r\n y = df[label]\r\n\r\n return x, y", "def tokenize(text):\n text = text.lower()\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text)\n words = word_tokenize(text)\n words = [w for w in words if w not in stopwords.words(\"english\")]\n stemmed = [WordNetLemmatizer().lemmatize(w) for w in words]\n return(stemmed)", "def text_prepare(txt):\n print(txt)\n txt = re.sub(r\"[^\\w\\s]\", \" \", str(txt).lower().strip())\n txt = txt.split()\n nltk.corpus.stopwords.words(\"english\")\n txt = [word for word in txt if word not in nltk.corpus.stopwords.words(\"english\")]\n lem = nltk.stem.wordnet.WordNetLemmatizer()\n txt = [lem.lemmatize(word) for word in txt]\n txt = \" \".join(txt)\n return txt", "def text2vec(doc_tok, model, dim=300):\n doc_embedding = np.zeros(dim)\n valid_words = 0\n for word in doc_tok:\n if word in model:\n valid_words += 1\n doc_embedding += model.query(word)\n else:\n continue\n if valid_words > 0:\n return doc_embedding / valid_words\n else:\n return doc_embedding", "def _get_word2vec_features(x, word2vec, all_words_per_tweet, max_tweet_len):\n\n features = np.zeros((len(x), max_tweet_len, word2vec.vector_size))\n\n for i, tweet_words in enumerate(all_words_per_tweet):\n tweet_repr = np.array(\n [word2vec.wv[r] if r in word2vec.wv.vocab else np.zeros(word2vec.vector_size) for r in tweet_words])\n features[i][:len(tweet_repr), :word2vec.vector_size] = tweet_repr\n\n return features", "def tokenize(text):\n # This experiment convinced me to lemmatize only rather than lemmatize and\n # stem. I also got this nifty URL detector there.\n #https://gist.github.com/rajatsharma369007/de1e2024707ad90a73226643c314b118\n\n # initialization\n lemmatizer = WordNetLemmatizer()\n stop = stopwords.words(\"english\")\n\n # Replaced all URLs with 'urlplaceholder'\n url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|'+\\\n '(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n for url in re.findall(url_regex, text):\n text = text.replace(url, \"urlplaceholder\")\n\n # tokenize and lemmatize\n tokens = word_tokenize(text)\n tokens = [lemmatizer.lemmatize(token).lower().strip() for\n token in tokens if token not in stop]\n\n return tokens", "def makeFeatureVec(words, model, num_features):\n\t# Initialize an empty numpy array (for speed) \n\tfeatureVec = np.zeros((num_features,), dtype=\"float32\")\n\t# Initialize a counter (number of words)\n\tnwords = 0.\n\t \n\t# Index2word is a list that contains the names of the words in the model's vocabulary. \n\tindex2word_set = set(model.index2word)\n\t# \n\t# Loop over each word in the review and, if it is in the model's vocaublary, add \n\t# its feature vector to the total \n\tfor word in words:\n\t\tif word in index2word_set:\n\t\t\tnwords = nwords + 1.\n\t\t\tfeatureVec = np.add(featureVec,model[word])\n\t# \n\t# Divide the result by the number of words to get the average \n\tfeatureVec = np.divide(featureVec,nwords)\n\treturn featureVec", "def token_by_lemma(text):\n lemmatizer = WordNetLemmatizer()\n word_list = word_tokenize(text)\n\n lemmatized_wrds = [lemmatizer.lemmatize(w) for w in word_list]\n return lemmatized_wrds", "def compute_user_input_embedding(txt, model):\r\n embeddings = []\r\n tokens = txt.split(\" \")\r\n for word in tokens:\r\n embeddings.append(model.wv[word])\r\n sentence_embedding = compute_average(embeddings)\r\n return sentence_embedding", "def vectorize_text(text):\n\n def remove_punctuation(text):\n \"\"\"Removes special characters from text.\"\"\"\n return re.sub('[,.?\";:\\-!@#$%^&*()]', '', text)\n\n def remove_common_words(text_vector):\n \"\"\"Removes 50 most common words in the uk english.\n\n source: http://www.bckelk.ukfsn.org/words/uk1000n.html\n\n \"\"\"\n common_words = set(['the', 'and', 'to', 'of', 'a', 'I', 'in', 'was',\n 'he', 'that', 'it', 'his', 'her', 'you', 'as', 'had', 'with',\n 'for', 'she', 'not', 'at', 'but', 'be', 'my', 'on', 'have', 'him',\n 'is', 'said', 'me', 'which', 'by', 'so', 'this', 'all', 'from',\n 'they', 'no', 'were', 'if', 'would', 'or', 'when', 'what', 'there',\n 'been', 'one', 'could', 'very', 'an', 'who'])\n return [word for word in text_vector if word not in common_words]\n\n text = text.lower()\n text = remove_punctuation(text)\n words_list = text.split()\n words_list = remove_common_words(words_list)\n\n return words_list", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "def embed(text: str) -> np.ndarray:\n n = nlp(text)\n return n.vector", "def _featurize_py_func(text):\n label = np.array(text[-1], dtype=np.int32)\n words = word_tokenize(text[:-2])\n chars = np.zeros([max_sentence_length, max_word_length], dtype=np.int32)\n for i, word in enumerate(words):\n ids = [char_to_int.get(char, -1) for char in word]\n chars[i,:len(ids)] = ids\n return chars", "def get_text_feature(texts,\n labels=None,\n nrow_train=None,\n vec='bow',\n lowercase=False,\n analyzer='word',\n single_token=True,\n ngram_range=(1, 1),\n stop_words=None,\n min_df=2,\n binary=True,\n select_k=None):\n from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n from sklearn.feature_selection import SelectKBest, chi2\n\n # keep single char as word\n if single_token:\n token_pattern = r\"\\b\\w+\\b\"\n else:\n token_pattern = r\"(?u)\\b\\w\\w+\\b\"\n\n # choose vec\n if vec is 'bow':\n vec = CountVectorizer(\n lowercase=lowercase,\n analyzer=analyzer,\n ngram_range=ngram_range,\n stop_words=stop_words,\n min_df=min_df,\n token_pattern=token_pattern,\n binary=binary)\n elif vec is 'tfidf':\n vec = TfidfVectorizer(\n lowercase=lowercase,\n analyzer=analyzer,\n ngram_range=ngram_range,\n stop_words=stop_words,\n min_df=min_df,\n token_pattern=token_pattern,\n sublinear_tf=True)\n else:\n raise ValueError('vec must be bow or tfidf!')\n\n # get word vector\n feature = vec.fit_transform(texts)\n feature_names = vec.get_feature_names()\n\n # feature select\n if (labels is not None) and (select_k is not None):\n if nrow_train is not None:\n x_train = feature[:nrow_train, :]\n x_test = feature[nrow_train:, :]\n y_train = labels[:nrow_train]\n\n feature_selector = SelectKBest(chi2, k=select_k)\n x_train = feature_selector.fit_transform(x_train, y_train)\n feature_names = np.array(feature_names)[feature_selector.get_support()]\n\n x_test = feature_selector.transform(x_test)\n\n # combine train test\n import scipy.sparse as sp\n feature = sp.vstack([x_train, x_test])\n\n else:\n feature_selector = SelectKBest(chi2, k=select_k)\n feature = feature_selector.fit_transform(feature, labels)\n feature_names = np.array(feature_names)[feature_selector.get_support()]\n\n return feature, list(feature_names)", "def featurize(self, data):\n \n features = []\n\n # tokens = data.split()\n\n #Modification 1: Normalization: All lowercase\n #Removing this did not seem to have any performance boost\n #but it did nothing negative either\n data = data.lower()\n\n #Modification 2: Normalization: Tokenizing using NLTK\n #Keep this\n # tokens = word_tokenize(data)\n tokens = data.split()\n\n #Modification 3: Word List: Removing stop words using NLTK\n #Keep this\n stop_words = set(stopwords.words('english'))\n tokens_filtered = []\n\n for t in tokens:\n if t not in stop_words:\n tokens_filtered.append(t)\n\n tokens = tokens_filtered\n\n #Modification 4: Pre-Processing Lemmization using NLTK\n #Surprisingly does not appear to impact performance\n # for t in tokens:\n # t = self.wordnet_lemmatizer.lemmatize(t)\n\n capital = 0\n average_word_length = 5 #It's 4.7, but we'll use 5\n short_words = 0\n long_words = 0\n\n for t in tokens:\n\n #Feature 1: Bag of words\n features.append((t, True))\n\n if(t.isupper()):\n capital += 1\n\n #Feature 3: Long or short word counter, intentionally ignoring length 4\n #and 5 as those are close to average\n #Very important that stop words were removed\n if(len(t) > average_word_length):\n long_words += 1\n elif(len(t) < average_word_length - 1):\n short_words += 1\n \n #Feature 2: Lots of capital\n #Remove this. It only appears to be a rough count of sentence number vs.\n #Capturing any sentiment. Does not impact F1 score in given train/dev sets\n # if(capital > 2):\n # features.append((\"LOTS_OF_CAPITAL\", True))\n\n #Feature 3: Long or short words\n # if(long_words > short_words):\n # features.append((\"LOTS_OF_LONG_WORDS\", True))\n\n\n\n return features", "def creating_feature_vector():\r\n\twordlist = []\r\n\tlabel = \"\"\r\n\tfw = open(\"feature_vector.txt\", \"w+\", encoding = \"utf-8\")\r\n\twith open(\"D:\\\\Python_Prac\\\\wordstag\\\\modules\\\\HI_EN_TRAIN.txt\", \"r\", encoding = \"utf-8\") as f:\r\n\t\tfor line in f:\r\n\t\t\twordlist.append(line)\r\n\t\tfor index, line in enumerate(wordlist):\r\n\t\t\tif line == \"\\n\":\r\n\t\t\t\tcontinue\r\n\t\t\tcontext = line.split(\"\\t\")\r\n\t\t\tlabel = context[1]\r\n\t\t\tfeature_vector = label+\" \"\r\n\t\t\tngram_vector = ngram_frequency(str(context[0]))\r\n\t\t\tfor vector in ngram_vector:\r\n\t\t\t\tfeature_vector += str(vector)+\" \"\r\n\t\t\tfeature_vector += str(is_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_hindi(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_abbr(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_hindi(context[0]))+\" \"\r\n\t\t\tbefore = [0,0,0]\r\n\t\t\tafter = [0,0,0]\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index-i) < 0 or (index-i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tbefore[2-i] = get_word_context(wordlist[index-i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index+i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tafter[2-i] = get_word_context(wordlist[index+i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in before:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfor i in after:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfeature_vector += \"\\n\"\r\n\t\t\tfw.write(feature_vector)\r\n\t\t\tprint(\"Proceeding...\"+str(index+1)+\" of 16683\")\r\n\r\n\tfw.close()", "def tokenize(text):\n text = re.sub('[^A-Za-z0-9]', ' ', text)\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n return clean_tokens", "def TransformData(text):\n global COUNT_VECTORIZER\n if COUNT_VECTORIZER is None:\n COUNT_VECTORIZER = CountVectorizer(analyzer = 'word', lowercase = True)\n COUNT_VECTORIZER.fit(text)\n features = COUNT_VECTORIZER.transform(text)\n features_nd = features.toarray() # for easy usage\n global TFIDF\n if TFIDF is None:\n TFIDF = TfidfTransformer(use_idf=False)\n TFIDF.fit(features_nd)\n text_tfidf = TFIDF.transform(features_nd)\n return text_tfidf", "def proc_text(self, text):\n\n lemmas = []\n tokens = []\n doc = self(text)\n for tokObj in doc:\n if self._remove_punct and tokObj.is_punct:\n continue\n lemma = tokObj.lemma_\n text = tokObj.text\n if self._keep_only_alpha_num and not is_alpha_num(text):\n continue\n tok1 = text.lower()\n tok2 = lemma.lower()\n if tok1 in self._stopwords or tok2 in self._stopwords:\n continue\n\n if self._lower_case:\n text = text.lower()\n lemma = lemma.lower()\n\n lemmas.append(lemma)\n tokens.append(text)\n\n return ' '.join(lemmas), ' '.join(tokens)", "def preprocessing(raw_text):\n words_list = tokenize(raw_text)\n words_list = remove_stop_words(words_list)\n words_list = remove_punctuations(words_list)\n words_list = lemmatization(words_list)\n return words_list", "def extractFeatures(self, data, tf=False):\n tfidf_training_matrix, tfidf_terms = self.useTfidfVectorizer(data)\n \n if tf:\n tf_vectorizer = CountVectorizer(max_df=0.5, min_df=2, max_features=10000,\n stop_words='english')\n \n tf_training_matrix = tf_vectorizer.fit_transform(data)\n tf_terms = tf_vectorizer.get_feature_names()\n \n return tfidf_training_matrix, tfidf_terms, tf_training_matrix, tf_terms\n \n else:\n return tfidf_training_matrix, tfidf_terms", "def create_text_sequence_feature(fl, sentence, sentence_len, vocab):\n sentence_transformed = transform_sentence(sentence, vocab)\n for word_id in sentence_transformed:\n fl.feature.add().int64_list.value.extend([word_id])\n return fl", "def tokenize(text):\n text = re.sub(r\"[^a-zA-Z0-9]+\", \" \", text.lower())\n\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "def process_text(self, text, lemma=False):\n processed_text = TextGraph.nlp(text.lower())\n words = [t.text.strip() if not lemma else t.lemma_ for t in processed_text if not t.is_punct]\n return words", "def process(self, sentence):\n\n # selects onlt alphanumeric words\n words = self.tokenizer.tokenize(sentence)\n\n # lemmatize the words\n words = [self.lemmatizer.lemmatize(word) for word in words]\n\n # lowercase all the words and remove single characters\n words = [word.lower() for word in words if len(word) > 1]\n\n # remove the stopwords using NLTK\n words = [word for word in words if word not in stopwords.words('english')]\n\n return words", "def training_examples_to_vec(test_file, embeddings_file, num_words, word_dim):\n x = []\n ignore_words = stopwords.words('english')\n lemmatizer = WordNetLemmatizer()\n stemmer = SnowballStemmer('english')\n word_idx, word_vectors = hf.create_indices_for_vectors(embeddings_file, return_vectors=True)\n with open(test_file, 'r') as f:\n for line in f:\n stemmedWords = set([])\n long_string = line.split(' ')\n total_words = int(len(long_string) / 2)\n total_example_vec = np.empty([num_words, word_dim], dtype=np.float32)\n if total_words - 1 <= num_words:\n continue\n count = 0\n\n for i in range(1, total_words):\n word = long_string[2 * i].split(\"'\")[0]\n\n if (word in ignore_words) or (len(word) <= 3):\n continue\n\n if not word.isalpha():\n continue\n\n try:\n stem = stemmer.stem(word)\n lemma = lemmatizer.lemmatize(word)\n except UnicodeDecodeError:\n continue\n\n if stem in stemmedWords:\n continue\n\n try:\n idx_num = word_idx[word]\n except KeyError:\n\n try:\n idx_num = word_idx[lemma]\n except KeyError:\n\n try:\n idx_num = word_idx[stem]\n except KeyError:\n continue\n\n word_vec = word_vectors[idx_num]\n total_example_vec[count] = word_vec\n stemmedWords.add(stem)\n count += 1\n if count >= num_words:\n break\n x.append(total_example_vec)\n return x", "def word2vec(self, words):\n with torch.no_grad():\n words = torch.LongTensor(self.doc2token(words))\n result = self.model.embedding(words).numpy()\n return result", "def lemmatize(text, nlp):\n\n return [word.lemma_ for word in nlp(text)]", "def tokenize(text):\n \n #regular expression to avoid pucntuations or any special character\n tokenizer = nltk.RegexpTokenizer(r\"\\w+\")\n \n #tokenizing text\n tokens = tokenizer.tokenize(text)\n \n #initiating lemmatizer\n lemmatizer = WordNetLemmatizer()\n \n #iteratating through each token\n clean_tokens = []\n for tok in tokens:\n \n #stop words are irrelevant in this context of classifying response\n if (tok.lower() not in stopwords.words(\"english\")):\n \n # lemmatizing, normalizing case, and removing leading/trailing white space\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n \n return clean_tokens", "def tokenize(text):\n \n #regular expression to avoid pucntuations or any special character\n tokenizer = nltk.RegexpTokenizer(r\"\\w+\")\n \n #tokenizing text\n tokens = tokenizer.tokenize(text)\n \n #initiating lemmatizer\n lemmatizer = WordNetLemmatizer()\n \n #iteratating through each token\n clean_tokens = []\n for tok in tokens:\n \n #stop words are irrelevant in this context of classifying response\n if (tok.lower() not in stopwords.words(\"english\")):\n \n # lemmatizing, normalizing case, and removing leading/trailing white space\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n \n return clean_tokens", "def instance2fv(self, text):\n if isinstance(text, unicode):\n text = text.encode('utf8')\n\n arr = np.zeros((self.n_feats,), dtype='uint32')\n\n # Convert the text to a sequence of ascii values\n ords = map(ord, text)\n\n # Count the number of times we enter each state\n state = 0\n statecount = defaultdict(int)\n for letter in ords:\n state = self.tk_nextmove[(state << 8) + letter]\n statecount[state] += 1\n\n # Update all the productions corresponding to the state\n for state in statecount:\n for index in self.tk_output.get(state, []):\n arr[index] += statecount[state]\n\n # The returned vector is the TFxIDF vector. The IDF for the\n # linguini system is actually the inv-lang-freq, and this is\n # pre-computed from the training data. We also normalize to len 1\n # at this stage.\n retval = arr * self.ilf\n return retval", "def training(string):\n print(\"Training...\")\n vec = create_vector(string)\n print(\"Selecting features...\")\n feature_list = select_features(vec)\n print(\"Done!\")\n return feature_list", "def tokenize(t):\n tweet_tok = TweetTokenizer(strip_handles=True, reduce_len=True)\n tokens = tweet_tok.tokenize(t)\n wnl = WordNetLemmatizer()\n stems = []\n for item in tokens:\n stems.append(wnl.lemmatize(item))\n return stems", "def str_to_nmslib_vect(tokenizer, text):\n lst = unique(get_token_ids(tokenizer, text))\n lst.sort()\n return toks_to_str(lst)", "def preprocess_training_text(text, accented_chars=True, \n convert_num=False, extra_whitespace=True, \n lemmatization=True, lowercase=True, punctuations=True,\n remove_html=True, remove_num=True, special_chars=True, \n stop_words=True):\n \n\n \"\"\"preprocess text with default option set to true for all steps\"\"\"\n if remove_html == True: #remove html tags\n text = strip_html_tags(text)\n if extra_whitespace == True: #remove extra whitespaces\n text = remove_whitespace(text)\n if accented_chars == True: #remove accented characters\n text = remove_accented_chars(text)\n if lowercase == True: #convert all characters to lowercase\n text = text.lower()\n \n \n doc = nlp(text) #tokenise text\n\n\n clean_text = []\n for token in doc:\n flag = True\n edit = token.text\n # print(\"Word: \", edit, \" Type: \", token.pos_)\n # remove stop words\n if stop_words == True and token.is_stop and token.pos_ != 'NUM': \n flag = False\n # remove punctuations\n if punctuations == True and (token.pos_ == 'PUNCT') and flag == True: \n flag = False\n \n # remove 'X' characters:\n if token.pos_ == 'X':\n flag = False\n # remove special characters\n if special_chars == True and token.pos_ == 'SYM' and flag == True: \n flag = False\n # remove numbers\n if remove_num == True and (token.pos_ == 'NUM' or token.text.isnumeric()) \\\n and flag == True:\n flag = False\n # convert number words to numeric numbers\n if convert_num == True and token.pos_ == 'NUM' and flag == True:\n edit = w2n.word_to_num(token.text)\n # convert tokens to base form\n elif lemmatization == True and token.lemma_ != \"-PRON-\" and flag == True:\n edit = token.lemma_\n # append tokens edited and not removed to list \n if edit != \"\" and flag == True:\n clean_text.append(edit)\n \n # Convert back to string:\n new_text = ' '.join(clean_text)\n regex = re.compile('[^a-zA-Z]')\n new_text = regex.sub(' ', new_text)\n words = re.findall(r'\\w+.', new_text)\n return ' '.join(words)", "def convert_str_list_to_vector(self, string_list: Tuple[str]) -> numpy.ndarray:\n if len(string_list) != 4:\n logger.error(\"convert_str_list_to_vector got a too short or long string list: {}. We return a zero-vector!\",\n string_list)\n return numpy.zeros(shape=(self.word2vec_embedding_size +\n self.word2vec_embedding_size / 2 +\n self.word2vec_embedding_size / 3 +\n self.word2vec_embedding_size / 4,),\n dtype=\"float32\"\n )\n ret = numpy.zeros(shape=(0,), dtype=\"float32\")\n for i, token in enumerate(string_list):\n logger.trace(\"Process the {}. token \\\"{}\\\"\", (i + 1), string_list[i])\n ret = numpy.concatenate([ret,\n numpy.average(\n numpy.reshape(\n self.word2vec_dict.get(string_list[i],\n numpy.negative(\n numpy.ones(\n shape=(self.word2vec_embedding_size,),\n dtype=\"float32\")\n )),\n (int(self.word2vec_embedding_size / (i + 1)), (i + 1))\n ),\n axis=1)],\n axis=0)\n return ret", "def lemmatisation(self, \n text: str\n ) -> Union[str, List[str]]:\n lemmatiser = WordNetLemmatizer()\n\n def lemma_sans_kw(w: str\n ) -> str:\n return (\n lemmatiser.lemmatize(w) if w not in self.target_words else w\n )\n \n if not self.tokenise:\n return ' '.join(\n lemma_sans_kw(w) for w in word_tokenize(text)\n )\n return [lemma_sans_kw(w) for w in text]", "def tokenize(text: str):\n lemmatizer = WordNetLemmatizer()\n stop_words = stopwords.words(\"english\")\n\n # Replace urls\n url_regex = r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n detected_urls = re.findall(url_regex, text)\n for url in detected_urls:\n text = text.replace(url, 'urlplaceholder')\n\n # Normalize and remove punctuation\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text.lower())\n\n # Tokenize\n tokens = word_tokenize(text)\n\n # Lemmatize and remove stop words\n clean_tokens = [lemmatizer.lemmatize(word) for word in tokens if word not in stop_words]\n # clean_tokens = [lemmatizer.lemmatize(tok, pos='v').lower().strip() for tok in clean_tokens]\n\n return clean_tokens", "def vectorize(self, source_text, target_text, use_dataset_max_lengths=True):\r\n \r\n data = super().vectorize(source_text, target_text, use_dataset_max_lengths)\r\n \r\n mltm_x_vector = self.mltm_vectorizer.vectorize(source_text.lower())\r\n mltm_x_vector = mltm_x_vector.astype(np.float32)\r\n \r\n data[\"x_source_mltm_vector\"] = mltm_x_vector\r\n return data", "def svmlight_to_vectors(txt):\n\n MAXENT_LOG.info(\"Attempting to convert {} to a vector file.\".format(txt))\n\n ntf = NamedTemporaryFile(mode='w', delete=False)\n ntf.close()\n\n p = ProcessCommunicator('{} import-svmlight --input \"{}\" --output \"{}\"'.format(mallet_bin, txt, ntf.name),\n stdout_func=MAXENT_LOG.info, stderr_func=MAXENT_LOG.warn, shell=True)\n\n\n if p.wait() == 0:\n MAXENT_LOG.debug(\"Successfully created temporary vector file {}\".format(ntf.name))\n return ntf.name\n else:\n raise ClassifierException(\"SVMLight Conversion did not complete successfully.\")", "def process_text(text):\n text = text.translate(translator)\n tokens = word_tokenize(text)\n# if stem:\n stemmer = PorterStemmer()\n tokens = [stemmer.stem(t) for t in tokens]\n \n return tokens", "def preprocess(text):\n\tX = []\n\tsent_detector = nltk.data.load('tokenizers/punkt/english.pickle')\n\tfor t in text:\n\t\tsents = sent_detector.tokenize(t)\n\t\tresult = ''\n\t\tfor s in sents:\n\t\t\ttokens = word_tokenize(s)\n\t\t\tresult += ' ' + ' '.join(tokens)\n\t\tX.append(result)\n\treturn X", "def tokenize(text):\n \n tokens = word_tokenize(text)\n \n STOPWORDS = list(set(stopwords.words('english')))\n # remove short words\n tokens = [token for token in tokens if len(token) > 2]\n # remove stopwords\n tokens = [token for token in tokens if token not in STOPWORDS]\n \n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "def tokenize(text):\n\n # Replace URLs\n url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n \n detected_urls = re.findall(url_regex, text)\n for url in detected_urls:\n text = text.replace(url, \"urlplaceholder\")\n\n # Remove non alphanumeric characters\n text = re.sub(pattern=r'[^A-Za-z0-9]+',repl=' ', string=text.lower().strip())\n \n # Tokenize words\n tokens = word_tokenize(text)\n \n # Remove stop words\n stop_words = set(stopwords.words('english'))\n filtered_tokens = [w for w in tokens if not w in stop_words]\n \n lemmatizer = WordNetLemmatizer()\n \n clean_tokens = []\n for token in filtered_tokens:\n new_token = lemmatizer.lemmatize(token)\n clean_tokens.append(new_token)\n \n return clean_tokens", "def tokenize(text):\n text = text.translate(str.maketrans('', '', string.punctuation))\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n s = stopwords.words('english')\n result = []\n for token in clean_tokens:\n if token not in s:\n result.append(token)\n\n return result", "def extract_features_only(self, text):\n \n featurelist = []\n \n sentences = util.sentence_tokenize(text)\n taggedSentences = [] \n for sentnumber, sentence0 in enumerate(sentences):\n \n sentence = self.clean_text(sentence0)\n \n # tokenize each sentence to have a list of words to be processed\n tokens = nltk.word_tokenize(sentence)\n #run the above procedure\n sentence_to_parse = self.get_untagged(tokens)\n \n # Save tagged sentences for later computing of expose date\n taggedSentences.append(sentence_to_parse)\n \n #only if the cleaned sentence is NOT empty we parse it\n if sentence_to_parse!=[]:\n tree = self.cp.parse(sentence_to_parse)\n tree1 = self.cp1.parse(sentence_to_parse)\n \n# new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.node in self.st_filter])\n new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.label() in self.st_filter])\n\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(', ,', ',')\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(',', ', ')\n\n new_sentence_to_parse = nltk.word_tokenize(new_sentence_to_parse)\n\n #run the above procedure\n new_sentence_to_parse = self.get_untagged(new_sentence_to_parse)\n \n if new_sentence_to_parse!=[]:\n tree2 = self.cp.parse(new_sentence_to_parse)\n for subtree in tree2.subtrees():\n if subtree.label() in self.st_filter: \n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n \n for subtree in tree1.subtrees():\n if subtree.label() in self.labels_gram1:\n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n\n self.sentences = sentences\n \n n = len(sentences)\n locsSentStarts = [-1] * n\n curpt = 0\n for i in range(n):\n pos = text[curpt:].find(sentences[i])\n locsSentStarts[i] = pos + curpt\n curpt = locsSentStarts[i] + len(sentences[i])\n self.sentence_startPos = locsSentStarts\n \n featObjList = self.initialize_feature_obj_list(featurelist)\n \n featList = [(feat.getType(), feat.getStartPos(), feat.getEndPos(), feat.getString()) for feat in featObjList]\n return featList", "def basic_clean(text):\n wnl = nltk.stem.WordNetLemmatizer()\n stopwords = stopwords.words('english') + ADDITIONAL_STOPWORDS\n text = (unicodedata.normalize('NFKD', text)\n .encode('ascii', 'ignore')\n .decode('utf-8', 'ignore')\n .lower())\n words = re.sub(r'[^\\w\\s]', '', text).split()\n return [wnl.lemmatize(word) for word in words if word not in stopwords]", "def lemmatize_words(text: str, lemmatizer=WordNetLemmatizer()) -> str:\n return ' '.join(lemmatizer.lemmatize(word) for word in text.split())", "def extract_features(sentence, vocabulary):\n n_tokens = len(sentence)\n n_features = n_feature_functions + len(vocabulary)\n X = sp.lil_matrix((n_tokens, n_features), dtype=bool)\n\n for i in xrange(n_tokens):\n for j, f in enumerate(FEATURE_FUNCTIONS):\n X[i, j] = f(sentence, i)\n\n # Vocabulary feature\n try:\n X[i, n_feature_functions + vocabulary[sentence[i][0].lower()]] = 1\n except KeyError:\n pass\n\n return X", "def token2features(sent, i, add_neighs=True):\n \n def add_lexicon_feats(tpl, lookupLexiconDict, usedTags):\n if tpl in lookupLexiconDict:\n for cls in lookupLexiconDict[tpl]:\n if cls not in usedTags:\n ftrs.append(cls) #<--------------------\n usedTags[cls]=1\n else:\n usedTags[cls]+=1\n \n \n ftrs = []\n # bias\n ftrs.append(\"BIAS\")\n # position features\n if i == 0:\n ftrs.append(\"SENT_BEGIN\")\n if i == len(sent)-1:\n ftrs.append(\"SENT_END\")\n\n # the word itself\n word = unicode(sent[i])\n ftrs.append(\"WORD=\" + word)\n word_lcase = word.lower()\n ftrs.append(\"LCASE=\" + word_lcase)\n # some features of the word\n if word.isalnum():\n ftrs.append(\"IS_ALNUM\")\n if word.isnumeric():\n ftrs.append(\"IS_NUMERIC\")\n if word.isdigit():\n ftrs.append(\"IS_DIGIT\")\n if word.isupper():\n ftrs.append(\"IS_UPPER\")\n if word.islower():\n ftrs.append(\"IS_LOWER\")\n\n # USE LEXICONS################################################## !\n maxTries=5\n usedTags = {}\n \n #look front up to 5 places \n if type(sent[0])== str: lSent = map(str.lower, sent)\n else: lSent = map(unicode.lower, sent)\n while(maxTries!=0):\n\n if len(lSent)-i>=maxTries:\n tpl = tuple(lSent[i:maxTries+i])\n add_lexicon_feats(tpl, lookupLexiconDict, usedTags)\n maxTries-=1\n \n #also look backwards: lexicons\n if i>=1:\n tpl = tuple(lSent[i-1:i+1]) # size 2\n add_lexicon_feats(tpl, lookupLexiconDict, usedTags)\n if i<len(lSent) : \n tpl = tuple(lSent[i-1:i+2]) # size 3\n add_lexicon_feats(tpl, lookupLexiconDict, usedTags)\n \n #analyze and add bias towards max used classification \n if usedTags:\n usedTags = list(usedTags.iteritems())\n maxused = max(usedTags, key=operator.itemgetter(1))\n minused = min(usedTags, key=operator.itemgetter(1)) \n if minused[1]!=maxused[1]:\n ftrs.append('BIAS='+maxused[0])\n \n\n #R ************************************************\n if len(word) > 15:\n ftrs.append(\"IS_LENGTHY\")\n if word[0].upper():\n ftrs.append(\"IS_FIRST_UPPER\")\n if word.__contains__(\"http\"):\n ftrs.append(\"IS_HYPERLINK\")\n if any(x.isupper() for x in word):\n ftrs.append(\"IS_MIXEDCASE\")\n if word.isupper():\n ftrs.append(\"ALL_UPPERCASE\")\n if word.__contains__(\"@\"):\n ftrs.append(\"IS_TAG\")\n if word.__contains__(\"#\"):\n ftrs.append(\"IS_HASHTAG\")\n if word in stop_words:\n ftrs.append(\"IS_STOPWORD\")\n if word in ['ing','ly','ed','ious','ies','ive','es','s','ment']:\n ftrs.append(\"CONTAINS_SUFFIX\")\n ftrs.append( nltk.pos_tag([word])[0][1] )\n\n # previous/next word feats\n if add_neighs:\n if i > 0:\n for pf in token2features(sent, i-1, add_neighs = False):\n ftrs.append(\"PREV_\" + pf)\n if i < len(sent)-1:\n for pf in token2features(sent, i+1, add_neighs = False):\n ftrs.append(\"NEXT_\" + pf)\n \n \n \n # return it!\n return ftrs", "def readVector(text):\n items = text.split()\n if int(items[0])+1 != len(items):\n raise ValueError(\"Invalid number of items\")\n return [float(v) for v in items[1:]]", "def generate_vector(text, tf=None):\n if not _trained:\n print(\"Make sure to train parameterizer first\")\n exit(1)\n if tf is None:\n tf = term_frequency.generate_vector(text)\n vector = []\n for i in range(len(tf)):\n vector.append(tf[i] * _idfs[i])\n return vector", "def process_text(self):\n prp1 = preprocessor.Preprocess()\n processed_text = prp1.clean_data(self.text)\n self.vec1 = self.vec.transform(pd.Series(processed_text))", "def compute_label_feature(text, token_to_idx):\n tokens = list(text.strip().lower())\n feats = [token_to_idx[token] for token in tokens]\n return feats", "def text_to_wordlist(text, remove_html_related=True, remove_non_letter=True,\n to_lowercase=True, remove_stopwords=False, use_lem=False):\n if remove_html_related:\n text = url_removal(text)\n # Remove HTML using BeautifulSoup\n text = BeautifulSoup(text, 'lxml').get_text()\n\n # Remove non-letters using regex\n if remove_non_letter:\n text = non_letter_removal(text)\n # Convert words to lower case and split them\n if to_lowercase:\n text = text.lower()\n\n words = text.split()\n # get tagged before possible stopword removal\n tagged_words = pos_tag(words)\n\n # Optionally remove stop words (false by default)\n if remove_stopwords:\n tagged_words = stopword_removal_from_taggedwords(tagged_words)\n\n # Optionally get part of speech tag of words then lemmatize them\n if use_lem:\n words = lemmatize_tagged_words(tagged_words)\n # Return a list of words and tagged words\n return(words, tagged_words)", "def create_vectorizer(ds):\n vectorize_layer = TextVectorization(\n standardize=clean_text,\n split=\"whitespace\",\n max_tokens=MAX_WORDS - 1,\n output_mode=\"int\",\n output_sequence_length=MAX_LEN,\n )\n vectorize_layer.adapt(ds.map(lambda text, label: text))\n return vectorize_layer", "def preproc_user_input(txt, model):\r\n txt = pre_process(txt)\r\n txt_tokenized = [word for word in txt.split(\" \") if word in model.wv.vocab]\r\n return \" \".join(txt_tokenized)", "def text_to_w2v_input(text, tokenizer=None, remove_stopwords=False):\n\n # NOTE: Punkt is a sentence tokenizer\n if not tokenizer:\n tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\n\n # Split text into sentences\n raw_sentences = tokenizer.tokenize(text.decode('utf8').strip())\n\n tokenized_sentences = []\n for raw_sentence in raw_sentences:\n if raw_sentence:\n tokenized_sentences.append(\n text_to_wordlist(raw_sentence, remove_stopwords))\n\n return tokenized_sentences", "def vectorize_text(df: pd.DataFrame):\n # Creating a stop_words list set that are common to many questions.\n common_phrases = [\n 'read the sentence from the passage',\n 'which of the following best describes',\n 'which is the best one sentence * for the section',\n 'which sentence from the passage provides the most evidence'\n 'select the sentence that does not support the central idea of the article',\n 'supports the main idea',\n 'select the paragraph from the section that explains how that shows the ',\n 'that is most relevant to be included in the summary of the article',\n 'according to the article',\n 'which of these is not one',\n ]\n stop_words = stopwords.words('english')\n [stop_words.extend(x.split()) for x in common_phrases]\n\n ct_vectorizer = CountVectorizer(token_pattern='\\\\w{3,}',\n max_df=.3,\n min_df=.001,\n stop_words=list(set(stop_words)),\n strip_accents='ascii', # Faster than unicode.\n ngram_range=(1, 3), # Enable uni, bi, trigrams.\n lowercase=True,\n dtype='uint8')\n\n tfidf_vectorizer = TfidfVectorizer(token_pattern='\\\\w{3,}',\n max_df=.3,\n min_df=.001,\n stop_words=list(set(stop_words)),\n strip_accents='ascii', # Faster than unicode.\n ngram_range=(1, 3), # Enable uni, bi, trigrams.\n lowercase=True,\n sublinear_tf=True, # Replace tf with 1 + log(tf).\n smooth_idf=True, # Default 1 doc for each term.\n dtype=np.float32)\n\n # Count & tf-idf vectorization learns vocab and transforms data into matrices.\n ct_vec = ct_vectorizer.fit_transform(np.array(df.text))\n tfidf = tfidf_vectorizer.fit_transform(np.array(df.text))\n # print(\"Shape of ct_vec:\", ct_vec.shape)\n # print('Size of ct_vec:', sys.getsizeof(ct_vec))\n # print(\"Shape of tfidf:\", tfidf.shape)\n # print('Size of tfidf:', sys.getsizeof(tfidf), '\\n')\n\n ct_names = ct_vectorizer.get_feature_names()\n tf_names = tfidf_vectorizer.get_feature_names()\n\n df_cv = pd.concat(\n [df, pd.DataFrame(ct_vec.toarray(), columns=ct_names)],\n axis=1)\n df_tfidf = pd.concat(\n [df, pd.DataFrame(tfidf.toarray(), columns=tf_names)],\n axis=1)\n\n return (\n df_cv,\n ct_vec,\n ct_names,\n df_tfidf,\n tfidf,\n tf_names\n )", "def get_sentences(text, nlp):\n\n # get sentences from text\n sentences = [sentence for sentence in\n text.replace('!', '.').replace('?', '.').split('.')]\n\n processed_sentences = [convert_to_string(remove_junk(tokenize_text(sentence, nlp))) for sentence in\n text.replace('!', '.').replace('?', '.').split('.')]\n\n # convert the sentences into a list of document vectors\n sentence_vector_list = [nlp(sentence).vector for sentence in processed_sentences]\n\n return sentences, sentence_vector_list", "def extract_features_temporal(self, text, expDateStr = None, onsetDateStr = None, refExpDateStr = None, textType='vaers'):\n \n featurelist = []\n \n sentences = util.sentence_tokenize(text)\n taggedSentences = [] \n# id = 0\n for sentnumber, sentence0 in enumerate(sentences):\n \n sentence = self.clean_text(sentence0)\n \n # tokenize each sentence to have a list of words to be processed\n tokens = nltk.word_tokenize(sentence)\n #run the above procedure\n sentence_to_parse = self.get_untagged(tokens)\n \n # Save tagged sentences for later computing of expose date\n taggedSentences.append(sentence_to_parse)\n \n #only if the cleaned sentence is NOT empty we parse it\n if sentence_to_parse!=[]:\n tree = self.cp.parse(sentence_to_parse)\n tree1 = self.cp1.parse(sentence_to_parse)\n \n# new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.node in self.st_filter])\n new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.label() in self.st_filter])\n\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(', ,', ',')\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(',', ', ')\n\n new_sentence_to_parse = nltk.word_tokenize(new_sentence_to_parse)\n\n #run the above procedure\n new_sentence_to_parse = self.get_untagged(new_sentence_to_parse)\n \n if new_sentence_to_parse!=[]:\n tree2 = self.cp.parse(new_sentence_to_parse)\n for subtree in tree2.subtrees():\n if subtree.label() in self.st_filter: \n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n \n for subtree in tree1.subtrees():\n if subtree.label() in self.labels_gram1:\n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n\n self.initialization_text_data(text, sentences, taggedSentences, textType)\n \n featObjList = self.initialize_feature_obj_list(featurelist)\n \n docFeature = self.extract_temporal_info(featObjList, expDateStr, onsetDateStr, refExpDateStr)\n \n return docFeature", "def getVector(text):\n url = cfg.use_vectoriser\n res = requests.post(url, json={'text': text, 'access_key': cfg.vectoriser_access_key})\n res_dictionary = res.json()\n return res_dictionary['vectors']", "def tokens_from_string(self, text):\n\n if self.level == \"character\":\n return list(text)\n elif self.level == \"word\":\n return nltk.word_tokenize(text)\n else:\n print(\"error: invalid level\")", "def parse_input(input_data, dictionary, model):\n vec_text = TextBlob(input_data).words.lower().lemmatize()\n vec_bow = dictionary.doc2bow(vec_text)\n return model[vec_bow]", "def clean_stopwords_lemmatize(text):\n tokens = clean_stopwords(text)\n tokens = lemmatize_tokens(tokens)\n # count = Counter(tokens)\n # c = count.most_common(15)\n # b = [str(i[0]) for i in c]\n # keywords = [t for t in tokens if t in b]\n news = ['ESPN', 'espn', 'foxsports', 'fox', 'cnn', 'yahoo', '•', '-', '●']\n keywords = [k for k in tokens if not k in news]\n return keywords", "def extract(self, document):\n f_num = len(self.feature_list)\n feature_vector = np.zeros((f_num,))\n words = document.split()\n for i in xrange(len(words)):\n for n in self.ns:\n ngram = self.try_get_ngram(words, n, i)\n if ngram and ngram in self.ngrams:\n self.add_ngram(feature_vector, ngram)\n return feature_vector" ]
[ "0.70021975", "0.6557391", "0.65419817", "0.65404963", "0.6515521", "0.64831823", "0.6474055", "0.64408255", "0.64127994", "0.6378021", "0.63599265", "0.63383466", "0.6331677", "0.6309186", "0.63056415", "0.6245646", "0.6237467", "0.62303", "0.62136185", "0.61973566", "0.6189876", "0.61872435", "0.61831295", "0.61788756", "0.61539006", "0.61421084", "0.61379063", "0.6108796", "0.60876167", "0.60579544", "0.6038214", "0.6024395", "0.6023434", "0.60128266", "0.60105896", "0.60007256", "0.599773", "0.598994", "0.5988629", "0.5986467", "0.5986299", "0.59802616", "0.59756", "0.5974153", "0.597276", "0.5970416", "0.5963332", "0.5960208", "0.5959896", "0.59595436", "0.5958243", "0.5947317", "0.59362084", "0.5923561", "0.5916421", "0.59072936", "0.590715", "0.5906617", "0.5878011", "0.5868159", "0.58483505", "0.58423924", "0.5839385", "0.5838956", "0.5838956", "0.5837959", "0.5831707", "0.5830739", "0.5827296", "0.58239746", "0.58209914", "0.5819262", "0.5817126", "0.5808531", "0.58082616", "0.5798102", "0.5796893", "0.5795046", "0.57903016", "0.57753676", "0.577383", "0.57622755", "0.57556015", "0.57342964", "0.57260036", "0.5725989", "0.5725327", "0.57205135", "0.5716604", "0.57118464", "0.57111543", "0.5709845", "0.57051045", "0.57020336", "0.5684961", "0.5678745", "0.56745934", "0.5669707", "0.5636148", "0.5628829", "0.56273043" ]
0.0
-1
Creates a Tensor for use as an Embedding initialization from the source vocabulary and predefined word embeddings.
def get_pretrained_embeddings(source_vocab,embed_df): num_tokens = len(source_vocab) embedding_dim = embed_df.shape[1] weights = np.zeros((num_tokens,embedding_dim),dtype=np.float32) for idx in range(num_tokens): token = source_vocab.lookup_index(idx) if token in embed_df.index: weights[idx,:] = embed_df.loc[token] else: weights[idx,:] = np.random.randn(1,embedding_dim) embed_tensor = torch.FloatTensor(weights) return embed_tensor
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_embedding_layer(inputs_, vocab_size, embed_size):\n embedding = tf.Variable(tf.random_uniform((vocab_size, embed_size), -1, 1))\n embed = tf.nn.embedding_lookup(embedding, inputs_)\n \n return embed", "def init_word_embed(config):\n embedding_mat_val = np.load(config.wordembed_params)\n with tf.variable_scope('vc'):\n with tf.variable_scope('lstm', reuse=True):\n embedding_mat = tf.get_variable(\"embedding_mat\", [config.num_vocab, config.embed_dim])\n init_we = tf.assign(embedding_mat, embedding_mat_val)\n return [init_we]", "def build_word_embeddings(self):\n if self.mode == \"encode\":\n # Word embeddings are fed from an external vocabulary which has possibly\n # been expanded (see vocabulary_expansion.py).\n encode_emb1 = tf.placeholder(tf.float32, (\n None, None, self.config.word_embedding_dim), \"encode_emb1\")\n # No sequences to decode.\n encode_emb2 = tf.placeholder(tf.float32, (\n None, None, self.config.word_embedding_dim), \"encode_emb2\")\n elif self.mode == \"test\":\n encode_emb1 = tf.placeholder(tf.float32, (\n None, None, self.config.word_embedding_dim), \"encode_emb1\")\n # No sequences to decode.\n encode_emb2 = tf.placeholder(tf.float32, (\n None, None, self.config.word_embedding_dim), \"encode_emb2\")\n else:\n word_emb = tf.get_variable(\n name=\"word_embedding\",\n shape=[self.config.vocab_size, self.config.word_embedding_dim],\n initializer=self.uniform_initializer)\n\n encode_emb1 = tf.nn.embedding_lookup(word_emb, self.encode_ids1)\n encode_emb2 = tf.nn.embedding_lookup(word_emb, self.encode_ids2)\n\n\n self.encode_emb1 = encode_emb1\n self.encode_emb2 = encode_emb2", "def _build(self, ids):\n # Construct embeddings.\n if self._existing_vocab is None:\n if self.EMBEDDINGS not in self._initializers:\n self._initializers[self.EMBEDDINGS] = basic.create_linear_initializer(\n self._vocab_size)\n self._embeddings = tf.get_variable(\n \"embeddings\",\n shape=[self._vocab_size, self._embed_dim],\n dtype=tf.float32,\n initializer=self._initializers[self.EMBEDDINGS],\n partitioner=self._partitioners.get(self.EMBEDDINGS, None),\n regularizer=self._regularizers.get(self.EMBEDDINGS, None),\n trainable=self._trainable)\n else:\n self._embeddings = tf.get_variable(\n \"embeddings\",\n dtype=tf.float32,\n initializer=self._existing_vocab,\n regularizer=self._regularizers.get(self.EMBEDDINGS, None),\n trainable=self._trainable)\n\n # Lookup embeddings\n return tf.nn.embedding_lookup(\n self._embeddings, ids, name=\"embedding_lookup\")", "def embedding_layer(self):\n with tf.name_scope(\"Embedding_Layer\"):\n V_size = len(self.vocab)\n embed_dim = len(self.embed[0]) \n W_embed_ = tf.get_variable(\"W_embed\",shape=[V_size, embed_dim],trainable=False).assign(np.asarray(self.embed))\n W_analogy_embed_ = tf.get_variable(\"W_analogy_embed\",shape=[V_size, embed_dim],trainable=True,initializer=tf.random_uniform_initializer(minval=-1,maxval=1))\n return W_embed_, W_analogy_embed_", "def add_embedding(self):\n ### YOUR CODE HERE (~4-6 lines)\n embeddingTensor = tf.Variable(self.pretrained_embeddings)\n embeddings = tf.nn.embedding_lookup(embeddingTensor, self.input_placeholder)\n embeddings = tf.reshape(embeddings, [-1, self.max_length, Config.n_features * Config.embed_size])\n ### END YOUR CODE\n return embeddings", "def _embed(self):\n with tf.variable_scope('word_embedding'):\n self.pretrained_word_mat = tf.get_variable(\"word_emb_mat\",\n [self.vocab.word_size() - 2, self.vocab.word_embed_dim],\n dtype=tf.float32,\n initializer=tf.constant_initializer(\n self.vocab.word_embeddings[2:],\n dtype=tf.float32),\n trainable=False)\n self.word_pad_unk_mat = tf.get_variable(\"word_unk_pad\",\n [2, self.pretrained_word_mat.get_shape()[1]],\n dtype=tf.float32,\n initializer=tf.constant_initializer(\n self.vocab.word_embeddings[:2],\n dtype=tf.float32),\n trainable=True)\n\n self.word_mat = tf.concat([self.word_pad_unk_mat, self.pretrained_word_mat], axis=0)\n self.p_emb = tf.nn.embedding_lookup(self.word_mat, self.p)\n self.q_emb = tf.nn.embedding_lookup(self.word_mat, self.q)", "def __init__(self,\n vocab_size=None,\n embed_dim=None,\n existing_vocab=None,\n initializers=None,\n partitioners=None,\n regularizers=None,\n trainable=True,\n custom_getter=None,\n name=\"embed\"):\n if vocab_size is None and existing_vocab is None:\n raise ValueError(\"Must provide on of vocab_size or existing_vocab.\")\n\n if existing_vocab is not None and not all(\n x is None for x in [vocab_size, embed_dim, initializers, partitioners]):\n raise ValueError(\"If existing_vocab is provided, none of vocab_size, \"\n \"embedding_dim, initializers, or partitioners is \"\n \"needed.\")\n\n super(Embed, self).__init__(custom_getter=custom_getter, name=name)\n self._existing_vocab = None\n if existing_vocab is None:\n self._vocab_size = vocab_size\n self._embed_dim = embed_dim or _embedding_dim(self._vocab_size)\n else:\n self._existing_vocab = tf.convert_to_tensor(\n existing_vocab, dtype=tf.float32)\n existing_vocab_shape = self._existing_vocab.get_shape().with_rank(2)\n existing_vocab_shape.assert_is_fully_defined()\n self._vocab_size, self._embed_dim = existing_vocab_shape.as_list()\n\n self._initializers = util.check_initializers(\n initializers, self.POSSIBLE_INITIALIZER_KEYS)\n self._partitioners = util.check_partitioners(\n partitioners, self.POSSIBLE_INITIALIZER_KEYS)\n self._regularizers = util.check_regularizers(\n regularizers, self.POSSIBLE_INITIALIZER_KEYS)\n self._trainable = trainable", "def _initialize_embeddings(self):\n with tf.variable_scope(self.scope):\n init_temporal_s = np.sqrt(\n 6. / (self._config.nact_dict[\"num_s\"] + self._config.ndim_emb + 1))\n\n self.w_dt = tf.get_variable(\n name=\"w_dt\",\n shape=[1, self._config.ndim_emb],\n initializer=tf.initializers.random_uniform(\n -init_temporal_s, init_temporal_s))\n\n if self._config.embedding_type not in self._embedding_classes:\n raise ValueError(\n f\"Unknown embedding type: {self._config.embedding_type}.\")\n self.embedding = self._embedding_classes[self._config.embedding_type](\n self._config, self._embed_dim_dict)", "def add_embedding(self, prefix=''):\n with tf.variable_scope(prefix + 'embed'):\n if self.cfg.fix_emb:\n assert (hasattr(self.cfg, 'W_emb'))\n W_emb = pkl.load(open(self.cfg.W_emb_path, 'rb'))\n W = tf.get_variable('W', initializer= W_emb, trainable=True)\n print(\"iniitalize word embedding finished\")\n else:\n weightInit = tf.random_uniform_initializer(-0.001, 0.001)\n vocab = pkl.load(open(self.cfg.vocab_path, 'rb'))\n W = tf.get_variable('W', [len(vocab), self.cfg.emb_size], initializer=weightInit)\n if hasattr(self.cfg, 'relu_w') and self.cfg.relu_w:\n W = tf.nn.relu(W)\n return W", "def tf_word2vec(sentences, vocab, epochs, learning_rate, num_sampled,\n window_size, batch_size, embed_size, tensorboard):\n vocab_size = len(vocab)\n\n # Clears the default graph stack and resets the global default graph;\n # this line is crucial if we want to re-run the class in interactive\n # environment such as jupyter notebook\n tf.reset_default_graph()\n\n # when building out tensorflow's computation graph, it's a good practice to\n # group nodes/operations that have similar purposes together using name_scope;\n # this additional step will give us nicer graph representation in Tensorboard,\n # which is tool that gives us nice graphical representation of the computation\n # graph we have defined\n with tf.name_scope('data'):\n # for target_words:\n # we will use it with tensorflow's loss later, and the function requires rank 2\n # input, that's why there's an extra dimension in the shape\n center_words = tf.placeholder(tf.int32, shape = [batch_size], name = 'center_words')\n target_words = tf.placeholder(tf.int32, shape = [batch_size, 1], name = 'target_words')\n\n with tf.name_scope('embedding_matrix'):\n # the actual word vectors\n embed_matrix = tf.Variable(\n tf.random_uniform([vocab_size, embed_size], -1.0, 1.0), name = 'embed_matrix')\n\n with tf.name_scope('loss'):\n # input -> hidden layer\n embed = tf.nn.embedding_lookup(embed_matrix, center_words, name = 'embed')\n\n # hidden layer -> output layer's weights\n stddev = 1.0 / embed_size ** 0.5\n output_weight = tf.Variable(\n tf.truncated_normal([vocab_size, embed_size], stddev = stddev), name = 'output_weight')\n\n output_bias = tf.Variable(tf.zeros([vocab_size]), name = 'output_bias')\n\n # hidden layer -> output layer + sampled softmax loss\n total_loss = tf.reduce_mean(tf.nn.sampled_softmax_loss( # tf.nn.nce_loss(\n weights = output_weight, biases = output_bias,\n labels = target_words, inputs = embed,\n num_sampled = num_sampled, num_classes = vocab_size), name = 'loss')\n\n # create a summary scalar that reports the loss\n tf.summary.scalar('total_loss', total_loss)\n summary_op = tf.summary.merge_all()\n\n optimizer = tf.train.AdagradOptimizer(learning_rate)\n train_step = optimizer.minimize(total_loss)\n init = tf.global_variables_initializer()\n\n # batch_iters = len(data) // batch_size\n with tf.Session() as sess:\n sess.run(init)\n\n # record the average loss in the last skip_step steps\n history = []\n writer = tf.summary.FileWriter(tensorboard, sess.graph)\n for epoch in trange(epochs):\n iterator = generate_sample(sentences, vocab, window = window_size)\n batch_gen = get_batch(iterator, batch_size)\n\n # for _ in range(batch_iters):\n # try:\n centers, targets = next(batch_gen)\n feed_dict = {center_words: centers, target_words: targets}\n _, loss, summary = sess.run([train_step, total_loss, summary_op], feed_dict)\n\n writer.add_summary(summary, epoch)\n history.append(loss)\n\n writer.close()\n word_vectors = sess.run(embed_matrix)\n\n return word_vectors, history", "def _embed(self):\n with tf.variable_scope('word_embedding'):\n self.word_embeddings = tf.get_variable(\n 'word_embeddings',\n shape=(self.term_vocab.size(), self.term_vocab.embed_dim),\n initializer=tf.constant_initializer(self.term_vocab.embeddings),\n trainable=True\n )\n self.p_word_emb = tf.nn.embedding_lookup(self.word_embeddings, self.p)\n self.q_word_emb = tf.nn.embedding_lookup(self.word_embeddings, self.q)\n\n with tf.variable_scope('char_embedding'):\n self.char_embeddings = tf.get_variable(\n 'char_embeddings',\n shape=(self.char_vocab.size(), self.char_vocab.embed_dim),\n initializer=tf.constant_initializer(self.char_vocab.embeddings),\n trainable=True\n )\n self.p_char_emb = tf.nn.embedding_lookup(self.char_embeddings, self.p_char) # [batch, seqlen, max_char_num, embedding_size]\n self.q_char_emb = tf.nn.embedding_lookup(self.char_embeddings, self.q_char)\n\n self.p_char_emb = self.cnn_emb(self.p_char_emb, \"p_emb\")\n self.q_char_emb = self.cnn_emb(self.q_char_emb, \"q_emb\")\n '''\n self.p_char_emb = tf.reshape(self.p_char_emb, [-1, self.max_char_num, self.emb_size])\n self.q_char_emb = tf.reshape(self.q_char_emb, [-1, self.max_char_num, self.emb_size])\n\n self.p_char_emb = cnn_layer.conv(self.p_char_emb, self.emb_size,\n bias=True, activation=tf.nn.relu, kernel_size=5, name=\"char_conv\", reuse=None)\n self.q_char_emb = cnn_layer.conv(self.q_char_emb, self.emb_size,\n bias=True, activation=tf.nn.relu, kernel_size=5, name=\"char_conv\", reuse=True)\n\n self.p_char_emb = tf.reduce_max(self.p_char_emb, axis=1) # [batch*seqlen, 1, emb_size]\n self.q_char_emb = tf.reduce_max(self.q_char_emb, axis=1)\n\n batch_size = tf.shape(self.p_word_emb)[0]\n self.p_char_emb = tf.reshape(self.p_char_emb, [batch_size, -1, self.emb_size])\n self.q_char_emb = tf.reshape(self.q_char_emb, [batch_size, -1, self.emb_size])\n\n self.p_char_emb = tf.nn.dropout(self.p_char_emb, 0.95)\n self.q_char_emb = tf.nn.dropout(self.q_char_emb, 0.95)\n '''\n self.p_emb = tf.concat([self.p_word_emb, self.p_char_emb], -1)\n self.q_emb = tf.concat([self.q_word_emb, self.q_char_emb], -1)", "def instantiate_weights(self):\n with tf.variable_scope(\"embedding_projection\"), tf.device('/cpu:0'): # embedding matrix\n self.Embedding = tf.get_variable(\"Embedding\", shape=[self.vocab_size, self.embed_size],\n initializer=self.initializer)\n # self.Embedding_label = tf.get_variable(\"Embedding_label\", shape=[self.num_classes, self.embed_size],\n # dtype=tf.float32) # ,initializer=self.initializer\n # self.W_projection = tf.get_variable(\"W_projection\", shape=[self.sequence_length * self.d_model, self.num_classes],\n # initializer=self.initializer) # [embed_size,label_size]\n # self.b_projection = tf.get_variable(\"b_projection\", shape=[self.num_classes])", "def create_embedding(num_symbol, embedding_size, embedding_name):\n return tf.Variable(tf.random_uniform([num_symbol, embedding_size], -0.1, 0.1, tf.float32), name=embedding_name, trainable=True)", "def __init__(self,\n vocab_size,\n embed_dim,\n dropout,\n pretrained,\n embedding=None,\n num_gpus=1,\n default_gpu_id=0,\n regularizer=None,\n random_seed=0,\n trainable=True,\n scope=\"word_feat\"):\n self.vocab_size = vocab_size\n self.embed_dim = embed_dim\n self.dropout = dropout\n self.pretrained = pretrained\n self.embedding = embedding\n self.num_gpus = num_gpus\n self.default_gpu_id = default_gpu_id\n self.regularizer = regularizer\n self.random_seed = random_seed\n self.trainable = trainable\n self.scope = scope\n \n with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):\n self.embedding_layer = create_embedding_layer(self.vocab_size, self.embed_dim, self.pretrained,\n self.embedding, self.num_gpus, self.default_gpu_id, None, self.random_seed, self.trainable)\n \n self.dropout_layer = create_dropout_layer(self.dropout, self.num_gpus, self.default_gpu_id, self.random_seed)", "def get_embed(input_data, vocab_size, embed_dim):\n embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))\n embed = tf.nn.embedding_lookup(embedding, input_data)\n\n return embed", "def __init__(self, lstm_step=80, input_d=300, vocab_size=2196018, embedding=None):\n self.raw_premise = tf.placeholder(shape=[None, lstm_step], dtype=tf.int32, name='premise')\n self.premise_length = tf.placeholder(shape=[None], dtype=tf.int32, name='premise_length')\n\n self.raw_hypothesis = tf.placeholder(shape=[None, lstm_step], dtype=tf.int32, name='hypothesis')\n self.hypothesis_length = tf.placeholder(shape=[None], dtype=tf.int32, name='hypothesis_length')\n\n self.label = tf.placeholder(shape=[None], dtype=tf.int32)\n # Those operations take too many memory\n # Use cpu for those operations (deprecated when using truncate embedding)\n if embedding is not None:\n self.input_embedding = tf.placeholder(dtype=tf.float32, shape=embedding.shape, name='word_embedding')\n self.embedding = tf.Variable(tf.zeros(embedding.shape, dtype=tf.float32))\n else:\n \"\"\"\n If embedding is not provided, then use random number as embedding\n \"\"\"\n self.embedding = tf.Variable(tf.random_uniform([vocab_size, input_d], minval=-0.05, maxval=0.05))\n \"\"\"\n This is the embedding operation. It will be invoked by loading embedding function in the actual model\n \"\"\"\n self.load_embedding_op = self.embedding.assign(self.input_embedding)\n\n self.premise = tf.nn.embedding_lookup(self.embedding, self.raw_premise)\n self.hypothesis = tf.nn.embedding_lookup(self.embedding, self.raw_hypothesis)", "def add_word_embeddings_op(self):\n with tf.variable_scope(\"words\"):\n if self.config.embeddings is None:\n self.logger.info(\"WARNING: randomly initializing word vectors\")\n _word_embeddings = tf.get_variable(\n name=\"_word_embeddings\",\n dtype=tf.float32,\n shape=[self.config.nwords, self.config.dim_word])\n else:\n _word_embeddings = tf.Variable(\n self.config.embeddings,\n name=\"_word_embeddings\",\n dtype=tf.float32,\n trainable=self.config.train_embeddings)\n\n word_embeddings = tf.nn.embedding_lookup(_word_embeddings,\n self.word_ids, name=\"word_embeddings\")\n\n with tf.variable_scope(\"chars\"):\n if self.config.use_chars:\n # get char embeddings matrix\n _char_embeddings = tf.get_variable(\n name=\"_char_embeddings\",\n dtype=tf.float32,\n shape=[self.config.nchars, self.config.dim_char])\n char_embeddings = tf.nn.embedding_lookup(_char_embeddings,\n self.char_ids, name=\"char_embeddings\")\n\n # put the time dimension on axis=1\n s = tf.shape(char_embeddings)\n char_embeddings = tf.reshape(char_embeddings,\n shape=[s[0]*s[1], s[-2], self.config.dim_char])\n word_lengths = tf.reshape(self.word_lengths, shape=[s[0]*s[1]])\n\n # bi lstm on chars\n cell_fw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_char,\n state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_char,\n state_is_tuple=True)\n _output = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, char_embeddings,\n sequence_length=word_lengths, dtype=tf.float32)\n\n # read and concat output\n _, ((_, output_fw), (_, output_bw)) = _output\n output = tf.concat([output_fw, output_bw], axis=-1)\n\n # shape = (batch size, max sentence length, char hidden size)\n output = tf.reshape(output,\n shape=[s[0], s[1], 2*self.config.hidden_size_char])\n word_embeddings = tf.concat([word_embeddings, output], axis=-1)\n\n self.word_embeddings = tf.nn.dropout(word_embeddings, self.dropout)", "def _get_embedding_layer(self, input_data, doc_input_data):\n opts = self._options\n word_embedding = tf.Variable(tf.random_uniform((self.vocab_size, opts.embed_dim), -1.0, 1.0))\n embed = []\n\n temp = tf.zeros([opts.batch_size, opts.embed_dim])\n embed_d = []\n for n in range(opts.sentence_sample):\n temp = tf.add(temp, tf.nn.embedding_lookup(word_embedding, doc_input_data[:, n]))\n embed_d.append(temp)\n\n if opts.concat == 'True':\n combined_embed_vector_length = opts.embed_dim * opts.window_size + opts.embed_dim\n for j in range(opts.window_size):\n embed_w = tf.nn.embedding_lookup(word_embedding, input_data[:, j])\n embed.append(embed_w)\n embed.append(embed_d)\n else:\n combined_embed_vector_length = opts.embed_dim\n embed_w = tf.zeros([opts.batch_size, opts.embed_dim])\n for j in range(opts.window_size):\n embed_w += tf.nn.embedding_lookup(word_embedding, input_data[:, j])\n embed_w += embed_d\n embed.append(embed_w)\n\n return tf.concat(embed, 1), word_embedding, combined_embed_vector_length", "def embedding_setup(self, embedding, emb_trainable):\n if emb_trainable == True:\n emb_variable = tf.get_variable(\n name=\"embedding_matrix\", shape=embedding.shape,\n initializer = tf.constant_initializer(embedding))\n return emb_variable\n else:\n return embedding", "def add_embedding(self):\n #with tf.variable_scope(\"RNN\", reuse = tf.AUTO_REUSE):\n embeddings = tf.get_variable(\"embeddings\", initializer = self.pretrained_embeddings,trainable=True)\n inputs = self.input_placeholder\n inputs = tf.reshape(inputs, [self.config.batch_size, -1 , self.config.n_features])\n embeddings = tf.nn.embedding_lookup(embeddings, self.input_placeholder)\n embeddings = tf.reshape(embeddings, [self.config.batch_size, -1, self.config.n_features* self.config.embed_size])\n embeddings = tf.cast(embeddings, tf.float32)\n return embeddings", "def embedding(inputs,\n vocab_dim,\n embedding_dim,\n reuse,\n validate_indices=False,\n w_init=tf.random_uniform_initializer(-1., 1.),\n trainable=True,\n normalize=False,\n vocab_freqs=None,\n name=\"Embedding\"):\n\n input_shape = util.get_input_shape(inputs)\n assert len(input_shape) == 2, \"Input Tensor shape must be 2-D\"\n\n with tf.variable_scope(name, reuse=reuse):\n with tf.device('/cpu:0'):\n W = tf.get_variable(\n \"W\", shape=[vocab_dim, embedding_dim], initializer=w_init, trainable=trainable)\n if normalize:\n assert vocab_freqs is not None\n vocab_freqs = tf.constant(vocab_freqs, dtype=tf.float32, shape=(vocab_dim, 1))\n W = _normalize(W, vocab_freqs)\n\n output = tf.cast(inputs, tf.int32)\n output = tf.nn.embedding_lookup(W, output, validate_indices=validate_indices)\n\n shape = [-1] + output.get_shape().as_list()[1:3] + [1]\n # seq_length = util.retrieve_seq_length(tf.reshape(inputs, shape))\n\n return output", "def add_word_embedding_op(self):\n if self.pos:\n print(\"adding pos embeddings\")\n with tf.variable_scope(\"pos\"):\n _pos_embeddings = tf.Variable(self.pos_embeddings,\n name=\"la_pos_embeddings\",\n dtype=tf.float32, trainable=False)\n pos_embeddings = tf.nn.embedding_lookup(_pos_embeddings, self.pos_ids,\n name=\"pos_embeddings\")\n self.pos_vecs = pos_embeddings\n print(\"adding word_embeddings\")\n with tf.variable_scope(\"words\"):\n _word_embeddings = tf.Variable(self.embeddings, name=\"_word_embeddings\",\n dtype=tf.float32, trainable=False)\n word_embeddings = tf.nn.embedding_lookup(_word_embeddings,\n self.word_ids,\n name=\"word_embeddings\")\n if self.use_window:\n print(\"Concatenating word vectors of context words\")\n word_embeddings_sl = tf.nn.embedding_lookup(_word_embeddings,\n self.word_ids_sl,\n name=\"word_embeddings_sl\")\n word_embeddings_sr = tf.nn.embedding_lookup(_word_embeddings,\n self.word_ids_sr,\n name=\"word_embeddings_sr\")\n word_embeddings = tf.concat([word_embeddings_sr, word_embeddings,\n word_embeddings_sl], axis=-1)\n if self.use_char_embeddings:\n print(\"adding CNN for char embeddings\")\n with tf.variable_scope(\"chars\"):\n _char_embeddings = tf.get_variable(name=\"_char_embeddings\",\n dtype=tf.float32,\n shape=[self.char_count, \n self.c_dim_input])\n char_embeddings = tf.nn.embedding_lookup(_char_embeddings, \n self.char_ids, \n name=\"char_embeddings\")\n s = char_embeddings.shape\n # the shape of our char_embeddings is now (batch_size, max number of words\n # in each sentence, max number of chars in each word, self.c_dim )\n char_filter = tf.get_variable(\"char_filter\", dtype=tf.float32,\n shape=[self.c_filter_width, \n self.c_filter_height,\n self.c_dim_input,\n self.c_dim_output])\n print(\"adding 2d convolution layer\")\n char_conv_layer = tf.nn.conv2d(char_embeddings, char_filter, \n strides=[1, 1, 1, 1], \n padding=\"SAME\")\n char_conv_layer = tf.nn.tanh(char_conv_layer)\n print(\"adding 2d pooling layer\")\n char_conv_layer = tf.layers.max_pooling2d(char_conv_layer, \n 1, \n strides=1)\n char_output = tf.reshape(char_conv_layer, shape=[-1, self.max_len, \n self.max_word_length*\n self.c_dim_output])\n word_embeddings = tf.concat([word_embeddings, char_output], axis=-1)\n if self.pos and self.concat_pos:\n print(\"concatenating pos with word_embeddings\")\n word_embeddings = tf.concat([word_embeddings, pos_embeddings], axis=-1)\n self.word_embeddings = word_embeddings\n if self.use_additional and self.hybrid:\n print(\"using additional embeddings\")\n _word_embeddings_2 = tf.Variable(self.additional_embeddings,\n name=\"two_word_embeddings\",\n dtype=tf.float32, trainable=False)\n word_embeddings_2 = tf.nn.embedding_lookup(_word_embeddings_2,\n self.word_ids,\n name=\"two_word_embeddings\")\n self.word_embeddings_2 = word_embeddings_2", "def add_embeddings(self):\n\n with tf.device('/cpu:0'):\n with tf.variable_scope('Embedding_Layer'):\n embeddings = tf.Variable(self.initial_embeddings,name = 'Embeddings')\n self.input_embeddings = tf.nn.embedding_lookup(embeddings, self.inputs_placeholder) #(N,S,D)\n self.question_embeddings = tf.nn.embedding_lookup(embeddings, self.questions_placeholder) #(N,S,D)", "def pretrained_embedding_layer(word_to_vec_map, word_to_index):\n \n vocab_size = len(word_to_index) + 1 # adding 1 to fit Keras embedding (requirement)\n any_word = list(word_to_vec_map.keys())[0]\n emb_dim = word_to_vec_map[any_word].shape[0] # define dimensionality of your GloVe word vectors (= 50)\n \n ### START CODE HERE ###\n # Step 1\n # Initialize the embedding matrix as a numpy array of zeros.\n # See instructions above to choose the correct shape.\n emb_matrix = np.zeros((vocab_size, emb_dim))\n \n # Step 2\n # Set each row \"idx\" of the embedding matrix to be \n # the word vector representation of the idx'th word of the vocabulary\n for word, idx in word_to_index.items():\n emb_matrix[idx, :] = word_to_vec_map[word]\n\n # Step 3\n # Define Keras embedding layer with the correct input and output sizes\n # Make it non-trainable.\n embedding_layer = tensorflow.keras.layers.Embedding(input_dim = vocab_size, output_dim = emb_dim, trainable = False)\n ### END CODE HERE ###\n\n # Step 4 (already done for you; please do not modify)\n # Build the embedding layer, it is required before setting the weights of the embedding layer. \n embedding_layer.build((None,)) # Do not modify the \"None\". This line of code is complete as-is.\n \n # Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.\n embedding_layer.set_weights([emb_matrix])\n \n return embedding_layer", "def build_input_embed(self, n_input, t_input):\n n_embed_matrix = tf.Variable(tf.random_uniform(\n [self.num_ntoken, self.n_embed_dim], minval=-0.05, maxval=0.05), name='n_embed_matrix')\n t_embed_matrix = tf.Variable(tf.random_uniform(\n [self.num_ttoken, self.t_embed_dim], minval=-0.05, maxval=0.05), name='t_embed_matrix')\n n_input_embedding = tf.nn.embedding_lookup(n_embed_matrix, n_input)\n t_input_embedding = tf.nn.embedding_lookup(t_embed_matrix, t_input)\n return n_input_embedding, t_input_embedding", "def construct_embedding(self):\n i = 0\n self.load_dicts()\n embedding_shape = (max(self.word2idx.values()) + 1,\n self.embedding_size)\n self.embedding = np.zeros(embedding_shape)\n\n with open(self.config.word_vec_fi_glove, 'r') as fi:\n for line in fi:\n word_vec = line.split(\" \")[1:]\n self.embedding[i, :] = np.array(word_vec, dtype=np.float32)\n i += 1\n\n self.write_embedding()", "def build(self, input_shapes):\n (word_embeddings_shape, _) = input_shapes\n width = word_embeddings_shape.as_list()[-1]\n self.type_embeddings = None\n if self.use_type_embeddings:\n self.type_embeddings = self.add_weight(\n \"type_embeddings\",\n shape=[self.token_type_vocab_size, width],\n initializer=tf.keras.initializers.TruncatedNormal(\n stddev=self.initializer_range),\n dtype=self.dtype)\n\n self.position_embeddings = None\n if self.use_position_embeddings:\n self.position_embeddings = self.add_weight(\n \"position_embeddings\",\n shape=[self.max_position_embeddings, width],\n initializer=tf.keras.initializers.TruncatedNormal(\n stddev=self.initializer_range),\n dtype=self.dtype)\n\n self.output_layer_norm = tf.keras.layers.LayerNormalization(\n name=\"layer_norm\", axis=-1, epsilon=1e-12, dtype=tf.float32)\n self.output_dropout = tf.keras.layers.Dropout(\n rate=self.dropout_prob, dtype=tf.float32)\n super(EmbeddingPostprocessor, self).build(input_shapes)", "def build(self,unused):\n # (word_embeddings_shape, _) = input_shapes\n # width = word_embeddings_shape.as_list()[-1]\n if self.use_type_embeddings:\n self.type_embeddings = self.add_weight(\n \"token_type_embeddings\",\n shape=[self.token_type_vocab_size, self.word_embedding_width],\n initializer=get_initializer(self.initializer_range),\n dtype=self.dtype)\n\n self.position_embeddings = None\n if self.use_position_embeddings:\n self.position_embeddings = self.add_weight(\n \"position_embeddings\",\n shape=[self.max_position_embeddings, self.word_embedding_width],\n initializer=get_initializer(self.initializer_range),\n dtype=self.dtype)\n\n self.output_layer_norm = tf.keras.layers.LayerNormalization(\n name=LAYER_NORM_NAME, axis=-1, epsilon=1e-12, dtype=tf.float32)\n self.output_dropout = tf.keras.layers.Dropout(\n rate=self.dropout_prob, dtype=tf.float32)\n super().build(unused)", "def source_embedding_fairseq(self):\r\n return tf.get_variable(\r\n name=\"W\",\r\n shape=[self.params[\"feature.dim\"], self.params[\"embedding.dim\"]],\r\n initializer=tf.random_normal_initializer(\r\n mean=0.0,\r\n stddev=0.1))", "def get_embed(input_data, vocab_size, embed_dim):\n embedding = tf.Variable(tf.random_uniform((vocab_size,embed_dim), -1, 1))\n embed = tf.nn.embedding_lookup(embedding, input_data)\n #print (\"embed_dim: \",embed_dim) # 向量表达维度为 256\n #print (\"input_data.shape: \",input_data.shape) # (50, 5)\n #print (\"embed.shap: \", embed.shape) # word 的向量表达 ==特征 (50, 5, 256) ==(batch_size, num_step, embed_dim)\n return embed # 返回input的向量表达", "def __init__(self, vocab_size):\n super(Model, self).__init__()\n\n # TODO: initialize vocab_size, embedding_size\n self.vocab_size = vocab_size\n self.embedding_size = 256\n self.batch_size = 1000\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=0.0005)\n\n # TODO: initialize embeddings and forward pass weights (weights, biases)\n self.E = tf.Variable(tf.random.truncated_normal(shape=[self.vocab_size, self.embedding_size], mean=0, stddev=0.1))\n self.W = tf.Variable(tf.random.truncated_normal(shape=[self.embedding_size * 2, self.vocab_size], mean=0, stddev=0.1))\n self.b = tf.Variable(tf.random.truncated_normal(shape=[1, self.vocab_size], mean=0, stddev=0.1))", "def setup_embeddings(self):\n with vs.variable_scope(\"embeddings\"):\n vec_embeddings = tf.get_variable(\"embeddings\", initializer=self.pretrained_embeddings, trainable=False)\n context_batch_embeddings = tf.nn.embedding_lookup(vec_embeddings, self.context_placeholder)\n question_batch_embeddings = tf.nn.embedding_lookup(vec_embeddings, self.question_placeholder)\n context_embeddings = tf.reshape(context_batch_embeddings,\n (-1, self.max_context_len, self.vocab_dim))\n question_embeddings = tf.reshape(question_batch_embeddings,\n (-1, self.max_question_len, self.vocab_dim))\n return context_embeddings, question_embeddings", "def load_embedding_tf(word_to_index, tf_embeddings_file_path, nb_dims):\n # 1. Define the variable that will hold the embedding:\n tf_embedding = tf.Variable(\n tf.constant(0.0, shape=[len(word_to_index)-1, nb_dims]),\n trainable=False,\n name=\"Embedding\"\n )\n\n # 2. Restore the embedding from disks to TensorFlow, GPU (or CPU if GPU unavailable):\n variables_to_restore = [tf_embedding]\n embedding_saver = tf.compat.v1.train.Saver(variables_to_restore)\n embedding_saver.restore(sess, save_path=tf_embeddings_file_path)\n print(\"TF embeddings restored from '{}'.\".format(tf_embeddings_file_path))\n \n return tf_embedding", "def init_word_embeddings(session, model, embeddings_file):\n # Create word embedding array from word2vec file\n vocab_size = FLAGS.vocab_size\n embeddings = []\n with tf.gfile.Open(embeddings_file) as f:\n i = 0\n while i < vocab_size:\n numbers = f.readline().split()\n if len(numbers) > 0:\n embeddings.append([float(n) for n in numbers])\n i += 1\n else:\n break # Last line of embeddings file is empty\n\n # Eliminate the random word embeddings and introduce word2vec to the realm of variable scopes.\n # The victims will be:\n # \"embedding_attention_seq2seq/RNN/EmbeddingWrapper/embedding\"\n # \"embedding_attention_seq2seq/embedding_attention_decoder/embedding\"\n np_embeddings = np.array(embeddings)\n feed_dict = {model.word2vec_placeholder: np_embeddings}\n session.run(model.word2vec_assign_encoder_op, feed_dict=feed_dict)\n session.run(model.word2vec_assign_decoder_op, feed_dict=feed_dict)", "def embedded(self, word_ids, embedding_tensor, scope=\"embedding\"):\n with tf.variable_scope(scope):\n with tf.device(\"/cpu:0\"):\n inputs = tf.nn.embedding_lookup(embedding_tensor, word_ids)\n return inputs", "def make_embedding(src_emb_hparams, src_token_to_id_map,\n tgt_emb_hparams=None, tgt_token_to_id_map=None,\n emb_init_share=False):\n src_embedding = MonoTextData.make_embedding(src_emb_hparams,\n src_token_to_id_map)\n\n if emb_init_share:\n tgt_embedding = src_embedding\n else:\n tgt_emb_file = tgt_emb_hparams[\"file\"]\n tgt_embedding = None\n if tgt_emb_file is not None and tgt_emb_file != \"\":\n tgt_embedding = Embedding(tgt_token_to_id_map, tgt_emb_hparams)\n\n return src_embedding, tgt_embedding", "def __init__(self,\n vocab_size,\n embed_dim,\n unit_dim,\n window_size,\n hidden_activation,\n pooling_type,\n dropout,\n num_gpus=1,\n default_gpu_id=0,\n regularizer=None,\n random_seed=0,\n trainable=True,\n scope=\"subword_feat\"):\n self.vocab_size = vocab_size\n self.embed_dim = embed_dim\n self.unit_dim = unit_dim\n self.window_size = window_size\n self.hidden_activation = hidden_activation\n self.pooling_type = pooling_type\n self.dropout = dropout\n self.num_gpus = num_gpus\n self.default_gpu_id = default_gpu_id\n self.regularizer = regularizer\n self.random_seed = random_seed\n self.trainable = trainable\n self.scope = scope\n \n with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):\n self.embedding_layer = create_embedding_layer(self.vocab_size, self.embed_dim, False,\n None, self.num_gpus, self.default_gpu_id, None, self.random_seed, self.trainable)\n \n self.dropout_layer = create_dropout_layer(self.dropout, self.num_gpus, self.default_gpu_id, self.random_seed)\n \n self.conv_layer = create_convolution_layer(\"multi_1d\", 1, self.embed_dim,\n self.unit_dim, 1, self.window_size, 1, \"SAME\", self.hidden_activation, [0.0], None,\n False, False, True, self.num_gpus, self.default_gpu_id, self.regularizer, self.random_seed, self.trainable)\n \n self.pooling_layer = create_pooling_layer(self.pooling_type, self.num_gpus, self.default_gpu_id)", "def embedding_table(inputs, vocab_size, embed_size, zero_pad=False,\n trainable=True, scope=\"embedding\", reuse=None):\n with tf.variable_scope(scope, reuse=reuse):\n embed_table = tf.get_variable('embedding_table',\n shape=[vocab_size, embed_size],\n initializer=_init,\n trainable=trainable,\n dtype=tf.float32)\n if zero_pad:\n embed_table = tf.concat((tf.zeros(shape=[1, embed_size]), embed_table[1:, :]),\n axis=0)\n\n return tf.nn.embedding_lookup(embed_table, inputs)", "def get_embed(input_data, vocab_size, embed_dim):\n # todo 需要编程:\n # 1、构建嵌入矩阵的查找表\n lookup_w = tf.Variable(\n initial_value=tf.random_uniform([vocab_size, embed_dim], -1.0, 1.0)\n )\n # 2、获得嵌入输出\n embed = tf.nn.embedding_lookup(params=lookup_w, ids=input_data)\n # [N, n_steps, embed_size]\n return embed", "def __init__(self, vocab_size: int, embedding_dim: int, hidden_size: int, dropout: float = 0.2,\n read_context: bool = False, pad_idx: int = Vocabulary.pad_idx):\n super(FullVocabularyModel, self).__init__()\n self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=pad_idx)\n self.embed_dropout = nn.Dropout(dropout)\n self.rnn = nn.LSTM(embedding_dim, hidden_size)\n self.linear = nn.Linear(hidden_size, vocab_size)\n self.loss_fn = nn.CrossEntropyLoss(ignore_index=pad_idx)\n\n self.vocab_size = vocab_size\n self.read_context = read_context\n self.pad_idx = pad_idx\n\n initrange = 0.5 / embedding_dim\n self.embedding.weight.data.uniform_(-initrange, initrange)\n self.embedding.weight.data[pad_idx].zero_()", "def embedding(x, vocab_size, dense_size, name=None, reuse=None, multiplier=1.0):\n with tf.variable_scope(\n name, default_name=\"embedding\", values=[x], reuse=reuse):\n embedding_var = tf.get_variable(\"kernel\", [vocab_size, dense_size])\n emb_x = tf.gather(embedding_var, x)\n if multiplier != 1.0:\n emb_x *= multiplier\n return emb_x", "def __init__(self, layer_id,\n shape, X):\n prefix = 'Embedding' + layer_id\n self.n_words, self.in_size = shape\n\n # weights for embedding, the only parameters\n self.W = init_weights(shape=(self.n_words, self.in_size),\n name=prefix + '#W')\n\n self.params = [self.W]\n\n # Compute the embedded samples\n self.n_timesteps = X.shape[0]\n self.n_samples = X.shape[1]\n\n self.activation = self.W[X.flatten()].reshape([self.n_timesteps,\n self.n_samples,\n self.in_size])", "def createTheModel(vocabulary, window=configuration['mlp']['posWindow']):\n inputLayers, interLayers = [], []\n inputToken = Input((3 + configuration['embedding']['useB1'] + configuration['embedding']['useB-1'],))\n inputLayers.append(inputToken)\n tokenEmb = Embedding(len(vocabulary.tokenIndices), configuration['mlp']['tokenEmb'],\n trainable=configuration['mlp']['trainable'])(inputToken)\n tokenFlatten = Flatten()(tokenEmb)\n interLayers.append(tokenFlatten)\n posNum = (2 * window + 1) * (3 + configuration['embedding']['useB1'] + configuration['embedding']['useB-1'])\n inputPos = Input((posNum,))\n inputLayers.append(inputPos)\n posEmb = Embedding(len(vocabulary.posIndices), configuration['mlp']['posEmb'],\n trainable=configuration['mlp']['trainable'])(inputPos)\n posFlatten = Flatten()(posEmb)\n interLayers.append(posFlatten)\n\n interLayers = keras.layers.concatenate(interLayers)\n lastLayer = Dense(configuration['mlp']['dense1UnitNumber'],\n activation=configuration['nn']['dense1Activation'])(interLayers)\n # dropout=configuration['mlp']['dense1Dropout'])(interLayers)\n lastLayer = Dropout(configuration['mlp']['dense1Dropout'])(lastLayer)\n softmaxLayer = Dense(8 if enableCategorization else 4, activation='softmax')(lastLayer)\n return inputLayers, softmaxLayer", "def embedding(org_input):\n # Create the embedding list\n for f in range(Config.num_feature):\n num_cat_value = Config.schema[f]\n\n if num_cat_value == 1:\n pass\n elif num_cat_value > 1:\n embed_dict[f] = tf.get_variable(\n name=\"embed_\" + str(f),\n shape=[num_cat_value, Config.embed_size[f]],\n trainable=True)\n else:\n raise ValueError(\"Schema values should be positive integers!\")\n\n # Create embedded inputs\n f_size = np.sum(Config.embed_size)\n embedded_input = embed_events(org_input, f_size)\n\n return embedded_input", "def _add_seq2seq(self):\n hps = self._hps\n vsize = self._vocab.size() # size of the vocabulary\n \n with tf.variable_scope('seq2seq'):\n # Some initializers\n self.rand_unif_init = tf.random_uniform_initializer(-hps.rand_unif_init_mag, hps.rand_unif_init_mag, seed=123)\n self.trunc_norm_init = tf.truncated_normal_initializer(stddev=hps.trunc_norm_init_std)\n\n\n with tf.variable_scope('embedding'):\n if hps.pretrained_embeddings:\n word2vec = load_embeddings(hps.embeddings_path, self._vocab.word2id, hps.rand_unif_init_mag)\n self.embedding = tf.get_variable('embedding', [vsize, hps.emb_dim],\n dtype=tf.float32, initializer=tf.constant_initializer(word2vec))\n # self.assign_embedding = tf.assign(self.embedding, word2vec)\n else:\n self.embedding = tf.get_variable('embedding', [vsize, hps.emb_dim],\n dtype=tf.float32, initializer=self.trunc_norm_init)\n if hps.mode==\"train\": self._add_emb_vis(self.embedding) # add to tensorboard\n\n # tensor with shape (batch_size, max_enc_steps, emb_size)\n emb_enc_inputs = tf.nn.embedding_lookup(self.embedding, self._enc_batch)\n if self._hps.hier:\n enc_batch_sections = tf.unstack(self._enc_batch_sections, axis=1)\n sec_emb_enc_inputs = [tf.nn.embedding_lookup(self.embedding, section)\n for section in enc_batch_sections]\n # list length max_dec_steps containing shape (batch_size, emb_size)\n emb_dec_inputs = [tf.nn.embedding_lookup(self.embedding, x)\n for x in tf.unstack(self._dec_batch, axis=1)]\n\n\n # Hierarchical attention model\n if self._hps.hier:\n with tf.variable_scope('encoder'), tf.device(self._next_device()):\n sec_enc_outs = []\n states_fw = []\n states_bw = []\n states = []\n\n # level 1, encode words to sections\n with tf.variable_scope(\"word_level_encoder\", reuse=tf.AUTO_REUSE) as scope:\n encoder_outputs_words = []\n cell_fw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n fw_st, bw_st = None, None\n if self._hps.use_do: # DropOut\n cell_fw = tf.contrib.rnn.DropoutWrapper(cell_fw, output_keep_prob=1.0 - self._hps.do_prob)\n cell_bw = tf.contrib.rnn.DropoutWrapper(cell_bw, output_keep_prob=1.0 - self._hps.do_prob)\n for i in range(self._hps.num_sections):\n encoder_tmp_output, (fw_st, bw_st) = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, inputs=sec_emb_enc_inputs[i], dtype=tf.float32,\n sequence_length=self._batch_sections_len[:,i], swap_memory=True, initial_state_bw=bw_st, initial_state_fw=fw_st)\n # concatenate the forwards and backwards states\n encoder_tmp_output = tf.concat(axis=2, values=encoder_tmp_output) #shape=[batch x seq_len x hidden_size]\n \n encoder_outputs_words.append(encoder_tmp_output)\n # instead of concating the fw and bw states, we use a ff network\n combined_state = self._reduce_states(fw_st, bw_st)\n states.append(combined_state)\n scope.reuse_variables()\n \n # level 2, encode sections to doc\n encoder_outputs_words = tf.stack(encoder_outputs_words, axis=1) # shape [batch x num_sections x seq_len x hidden_size]\n shapes = encoder_outputs_words.shape\n encoder_outputs_words = tf.reshape(encoder_outputs_words, (shapes[0].value, -1, shapes[-1].value)) #shape=[batch x (seq_len * num_sections) x hidden_size]\n\n doc_sections_h = tf.stack([s.h for s in states], axis=1) # [batch x num_sections x hidden_size]\n doc_sections_c = tf.stack([s.c for s in states], axis=1) # [batch x num_sections x hidden_size]\n\n with tf.variable_scope(\"section_level_encoder\"):\n if FLAGS.section_level_encoder == 'RNN':\n cell_fw_1 = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw_1 = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n if self._hps.use_do:\n cell_fw_1 = tf.contrib.rnn.DropoutWrapper(cell_fw_1, output_keep_prob=1.0 - self._hps.do_prob)\n cell_bw_1 = tf.contrib.rnn.DropoutWrapper(cell_bw_1, output_keep_prob=1.0 - self._hps.do_prob)\n encoder_output_sections, (fw_st_2, bw_st_2) =\\\n tf.nn.bidirectional_dynamic_rnn(cell_fw_1, cell_bw_1, inputs=doc_sections_h, sequence_length=self._doc_sec_lens, dtype=tf.float32, swap_memory=True)\n encoder_output_sections = tf.concat(axis=2, values=encoder_output_sections)\n doc_sections_state = self._reduce_states(fw_st_2, bw_st_2)\n else:\n if FLAGS.section_level_encoder == 'AVG': # average section cells\n doc_sections_state_h = tf.reduce_mean(doc_sections_h, axis=1)\n doc_sections_state_c = tf.reduce_mean(doc_sections_c, axis=1)\n elif FLAGS.section_level_encoder == 'FF': # use a feedforward network to combine section cells\n doc_sections_state_h = tf.reshape([doc_sections_h.shape[0].eval(), -1])\n doc_sections_state_h = tf.layers.dense(\n inputs=doc_sections_state_h,\n units=self._hps.hidden,\n activation=tf.nn.relu) \n doc_sections_state_c = tf.reshape([doc_sections_c.shape[0].eval(), -1])\n doc_sections_state_c = tf.layers.dense(\n inputs=doc_sections_state_c,\n units=self._hps.hidden,\n activation=tf.nn.relu)\n else:\n raise AttributeError('FLAGS.section_level_encoder={} is not a valid option'.format(FLAGS.section_level_encoder))\n doc_sections_state = tf.contrib.rnn.LSTMStateTuple(doc_sections_state_c, doc_sections_state_h)\n encoder_output_sections = doc_sections_h \n \n elif not self._hps.multi_layer_encoder:\n with tf.variable_scope('encoder'):\n with tf.variable_scope('word_level_encoder'):\n cell_fw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n (encoder_outputs, (fw_st, bw_st)) =\\\n tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs=emb_enc_inputs, dtype=tf.float32, sequence_length=self._enc_lens, swap_memory=True)\n # concatenate the forwards and backwards states\n encoder_outputs = tf.concat(axis=2, values=encoder_outputs)\n \n # stack n layers of lstms for encoder\n elif self._hps.multi_layer_encoder:\n # TODO: check\n for layer_i in xrange(self._hps.enc_layers):\n with tf.variable_scope('encoder%d'%layer_i), tf.device(\n self._next_device()):\n cell_fw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n if self._hps.use_do: # add dropout\n cell_fw = tf.contrib.rnn.DropoutWrapper(cell_fw, output_keep_prob=1.0 - self._hps.do_prob)\n cell_bw = tf.contrib.rnn.DropoutWrapper(cell_bw, output_keep_prob=1.0 - self._hps.do_prob)\n emb_enc_inputs, (fw_st, bw_st) =\\\n tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs=emb_enc_inputs, dtype=tf.float32, sequence_length=self._enc_lens, swap_memory=True)\n emb_enc_inputs = tf.concat(axis=2, values=emb_enc_inputs)\n encoder_outputs = emb_enc_inputs\n \n if self._hps.hier:\n self._enc_sec_states = encoder_output_sections\n self._enc_states = encoder_outputs_words \n else:\n self._enc_states = encoder_outputs\n self._enc_sec_states = None\n \n # convert the encoder bidirectional hidden state to the decoder state\n # (unidirectional) by an MLP\n if self._hps.hier:\n self._dec_in_state = doc_sections_state\n else:\n with tf.variable_scope('encoder'):\n with tf.variable_scope('word_level_encoder'):\n self._dec_in_state = self._reduce_states(fw_st, bw_st) \n \n # Add the decoder\n\n with tf.variable_scope('decoder'), tf.device(self._next_device()):\n cell = tf.contrib.rnn.LSTMCell(\n self._hps.hidden_dim,\n state_is_tuple=True,\n initializer=self.rand_unif_init)\n \n # We need to pass in the previous step's coverage vector each time\n prev_coverage = self.prev_coverage\\\n if hps.mode==\"decode\" and self._hps.coverage \\\n else None \n \n \n if self._hps.hier:\n decoder_outputs, self._dec_out_state, self.attn_dists, self.p_gens, self.coverage, self.attn_dists_sec =\\\n self.attn_decoder(emb_dec_inputs,\n self._dec_in_state,\n self._enc_states,\n cell,\n self._enc_sec_states,\n num_words_section=self._batch_sections_len,\n enc_padding_mask=self._enc_padding_mask,\n enc_section_padding_mask=self._enc_section_padding_mask,\n initial_state_attention=(self._hps.mode==\"decode\"),\n pointer_gen=self._hps.pointer_gen,\n use_coverage=self._hps.coverage,\n prev_coverage=prev_coverage,\n temperature=self._hps.temperature\n )\n \n else:\n decoder_outputs, self._dec_out_state, self.attn_dists, self.p_gens, self.coverage, _ =\\\n self.attn_decoder(emb_dec_inputs,\n self._dec_in_state,\n self._enc_states,\n cell,\n encoder_section_states=None,\n num_words_section=None,\n enc_padding_mask=self._enc_padding_mask,\n initial_state_attention=(self._hps.mode==\"decode\"),\n pointer_gen=self._hps.pointer_gen,\n use_coverage=self._hps.coverage,\n prev_coverage=prev_coverage,\n ) \n \n\n # Project decoder output to vocabulary\n with tf.variable_scope('output_projection'), tf.device(self._next_device()):\n if self._hps.output_weight_sharing:\n # share weights of embedding layer with projection\n # self.embedding is in shape [vsize, hps.emb_dim]\n w_proj = tf.get_variable('w_proj', [self._hps.emb_dim, self._hps.hidden_dim],\n dtype=tf.float32, initializer=self.trunc_norm_init)\n w = tf.tanh(tf.transpose(tf.matmul(self.embedding, w_proj))) # shape = [vsize, hps.hidden_dim]\n \n # w_t = tf.transpose(w)\n b = tf.get_variable('b', [vsize],\n dtype=tf.float32, initializer=self.trunc_norm_init)\n else: \n w = tf.get_variable('w', [self._hps.hidden_dim, vsize],\n dtype=tf.float32, initializer=self.trunc_norm_init)\n # w_t = tf.transpose(w)\n b = tf.get_variable('b', [vsize],\n dtype=tf.float32, initializer=self.trunc_norm_init)\n # vocabulary score at each decoder step\n vocab_scores = []\n for i,output in enumerate(decoder_outputs):\n if i > 0:\n tf.get_variable_scope().reuse_variables()\n vocab_scores.append(tf.nn.xw_plus_b(output, w, b)) # apply the linear layer\n\n # the final vocab distribution for each decoder time step\n # shape of each element is [batch_size, vsize]\n vocab_dists = [tf.nn.softmax(s) for s in vocab_scores] \n\n \n # pointing / generating\n if FLAGS.pointer_gen:\n final_dists = self._calc_final_dist(vocab_dists, self.attn_dists)\n# log_dists = [tf.log(dist) for dist in final_dists]\n else:\n# log_dists = [tf.log(dist) for dist in vocab_dists]\n final_dists = vocab_dists\n \n\n # Calculate Losses:\n \n if self._hps.mode in ['train', 'eval']:\n # Calculate the loss\n with tf.variable_scope('loss'), tf.device(self._next_device()):\n if FLAGS.pointer_gen:\n # Calculate the loss per step\n # This is fiddly; we use tf.gather_nd to pick out the gold target words\n # will be list length max_dec_steps containing shape (batch_size)\n loss_per_step = [] \n batch_nums = tf.range(0, limit=hps.batch_size) # shape (batch_size)\n for dec_step, dist in enumerate(final_dists):\n # The indices of the target words. shape (batch_size)\n targets = self._target_batch[:,dec_step] \n indices = tf.stack( (batch_nums, targets), axis=1) # shape (batch_size, 2)\n # shape (batch_size). loss on this step for each batch\n gold_probs = tf.gather_nd(dist, indices)\n losses = -tf.log(gold_probs)\n loss_per_step.append(losses)\n\n # Apply dec_padding_mask mask and get loss\n self._loss = _mask_and_avg(loss_per_step, self._dec_padding_mask)\n \n\n else: # baseline model\n # this applies softmax internally\n self._loss = tf.contrib.seq2seq.sequence_loss(\n tf.stack(vocab_scores, axis=1), self._target_batch, self._dec_padding_mask) # this applies softmax internally\n\n tf.summary.scalar('loss', self._loss)\n\n # Calculate coverage loss from the attention distributions\n if self._hps.coverage:\n with tf.variable_scope('coverage_loss'):\n self._coverage_loss = _coverage_loss(self.attn_dists, self._dec_padding_mask)\n tf.summary.scalar('coverage_loss', self._coverage_loss)\n self._total_loss = self._loss + self._hps.cov_loss_wt * self._coverage_loss\n tf.summary.scalar('total_loss', self._total_loss)\n \n # ---------------------------/\n\n\n if self._hps.mode == \"decode\":\n assert len(final_dists) == 1 # final_dists is a singleton list containing shape (batch_size, extended_vsize)\n final_dists = final_dists[0]\n topk_probs, self._topk_ids = tf.nn.top_k(final_dists, hps.batch_size*2) # take the k largest probs. note batch_size=beam_size in decode mode\n self._topk_log_probs = tf.log(topk_probs)", "def build_seq_embeddings(self):\n with tf.variable_scope(\"seq_embedding\"), tf.device(\"/cpu:0\"):\n embedding_map = tf.get_variable(\n name=\"map\",\n shape=[self.config.vocab_size, self.config.word_embedding_size],\n initializer=self.initializer)\n \n # We need to store the normalized lookup table for efficient mapping of embedding vectors to closest words\n self.normed_embedding_map = tf.nn.l2_normalize(embedding_map, dim=1)\n \n seq_embeddings = tf.nn.embedding_lookup(embedding_map, self.input_seqs) \n # seq_embeddings has the shape (batch_size, sequence_length, sentence_length, embedding_size)\n # meaning, for each index in input_seqs (with shape (batch_size, sequence_length, sentence_length)) it stores an embedding vector\n\n #print('Shape seq_embeddings: ' + str(seq_embeddings.get_shape()))\n\n self.seq_embeddings = seq_embeddings", "def init_embedding(input_embedding):\n bias = np.sqrt(3.0 / input_embedding.size(1))\n nn.init.uniform_(input_embedding, -bias, bias)", "def build(self, unused_input_shapes):\n if self.embedding_lookup is None:\n self.embedding_lookup = layers.OnDeviceEmbedding(\n vocab_size=self.config.vocab_size,\n embedding_width=self.config.hidden_size,\n initializer=tf.keras.initializers.TruncatedNormal(\n stddev=self.config.initializer_range),\n name=\"target_embeddings\")\n self.embedding_postprocessor = EmbeddingPostprocessor(\n use_type_embeddings=False,\n use_position_embeddings=True,\n max_position_embeddings=self.config.max_position_embeddings,\n dropout_prob=self.config.hidden_dropout_prob,\n initializer=tf.keras.initializers.VarianceScaling(\n scale=self.config.initializer_gain,\n mode=\"fan_avg\",\n distribution=\"uniform\"),\n name=\"embedding_postprocessor\")\n # Decoder can use a different intermediate size.\n self.multi_channel_cross_attention = self.config.get(\n \"multi_channel_cross_attention\", False)\n self.decoder = TransformerDecoder(\n num_hidden_layers=self.config.num_decoder_layers,\n hidden_size=self.config.hidden_size,\n num_attention_heads=self.config.num_decoder_attn_heads,\n intermediate_size=self.config.decoder_intermediate_size,\n intermediate_activation=self.config.hidden_act,\n hidden_dropout_prob=self.config.hidden_dropout_prob,\n attention_probs_dropout_prob=self.config.attention_probs_dropout_prob,\n initializer_range=self.config.initializer_range,\n multi_channel_cross_attention=self.multi_channel_cross_attention,\n name=\"decoder\")\n super(Decoder, self).build(unused_input_shapes)", "def generate_embedding(inputs, input_shape: list, embedding_args: dict, embedding_rank: int = 1, **kwargs):\n if len(kwargs) > 0:\n print(\"WARNING:kgcnn: Unknown embedding kwargs {0}. Will be reserved for future versions.\".format(kwargs))\n\n if len(input_shape) == embedding_rank:\n n = ks.layers.Embedding(**embedding_args)(inputs)\n else:\n n = inputs\n return n", "def pretrained_embedding_layer(word_to_vec_map, word_to_index):\n vocab_len = len(word_to_index) + 1 # adding 1 to fit Keras embedding (requirement)\n emb_dim = word_to_vec_map[\"cucumber\"].shape[0] # define dimensionality of your GloVe word vectors (= 50)\n emb_matrix = np.zeros((vocab_len, emb_dim)) # Initialize the embedding matrix as a numpy array of zeros of shape (vocab_len, dimensions of word vectors = emb_dim)\n for word, index in word_to_index.items(): # Set each row \"index\" of the embedding matrix to be the word vector representation of the \"index\"th word of the vocabulary\n emb_matrix[index, :] = word_to_vec_map[word]\n embedding_layer = Embedding(vocab_len, emb_dim, trainable = False) # Define Keras embedding layer with the correct output/input sizes, make it trainable. Use Embedding(...). Make sure to set trainable=False. \n embedding_layer.build((None,)) # Build the embedding layer, it is required before setting the weights of the embedding layer. Do not modify the \"None\".\n embedding_layer.set_weights([emb_matrix]) # Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.\n return embedding_layer", "def _create_example(self):\n source = np.random.randn(self.batch_size, self.max_decode_length,\n self.input_depth)\n source_len = np.random.randint(0, self.max_decode_length, [self.batch_size])\n target_len = np.random.randint(0, self.max_decode_length * 2,\n [self.batch_size])\n target = np.random.randn(self.batch_size,\n np.max(target_len), self.input_depth)\n labels = np.random.randint(0, self.vocab_size,\n [self.batch_size, np.max(target_len) - 1])\n\n example_ = namedtuple(\n \"Example\", [\"source\", \"source_len\", \"target\", \"target_len\", \"labels\"])\n return example_(source, source_len, target, target_len, labels)", "def load_embedding(src: str, embedding_type, layers) -> TransformerEmbedding:\n emb = TransformerEmbedding(src, embedding_type=embedding_type, layers=layers)\n return emb", "def __init__(self, input, input_size, embedding_size):\n\n self.input = input\n self.output = layers.EmbeddingLayer(self.input, input_size, embedding_size, W=initialize_parameters()[0])", "def build_vocabulary(self, tokens=None, embeddings=None):\n if tokens is not None and embeddings is not None:\n raise ValueError(\"Only accepts either `tokens` or `embeddings`.\")\n\n if tokens is not None:\n # Build from tokenized tokens\n # for sentence in tqdm(tokens):\n # for word in tokens:\n # print(type(word))\n # exit()\n self.vocab.extend(\n list(set([\n word\n for sentence in tqdm(tokens)\n for word in sentence\n ]))\n )\n elif embeddings is not None:\n # Build from pretrained embeddings\n for word in tqdm(embeddings):\n word = word.strip(\"\\n\")\n word = word.split(\" \")\n\n self.vocab.append(word[0])\n vector = word[1:]\n self.vectors.append(vector)", "def init_embedding(size=50):\n vector = np.random.normal(0.0, 0.01, size)\n return vector", "def init_embedding(self):\n self.embedding.weight.data.uniform_(-1./self.num_embeddings, 1./self.num_embeddings)", "def __init__(self, num_words, embedding_size, use_cuda):\n super(StandardEmbedding, self).__init__()\n self.embedding_size = embedding_size\n self.num_hash_functions = 0\n self.embeddings = nn.Embedding(num_words, embedding_size)\n self.embeddings = self.embeddings.cuda() if use_cuda else self.embeddings", "def create_emb_for_encoder_and_decoder(vocab_size,\n embed_size,\n dtype=tf.float32,\n num_partitions=0,\n scope=None):\n\n if num_partitions <= 1:\n partitioner = None\n else:\n # Note: num_partitions > 1 is required for distributed training due to\n # embedding_lookup tries to colocate single partition-ed embedding variable\n # with lookup ops. This may cause embedding variables being placed on worker\n # jobs.\n partitioner = tf.fixed_size_partitioner(num_partitions)\n\n with tf.variable_scope(\n scope or \"embeddings\", dtype=dtype, partitioner=partitioner) as scope:\n # Share embedding\n embedding_encoder = tf.get_variable(\"shared_embedding\",\n [vocab_size, embed_size], dtype)\n embedding_decoder = embedding_encoder\n\n return embedding_encoder, embedding_decoder", "def TransformerTokenEmbedding(\n num_embeddings, embedding_dim, padding_idx, freeze_embed=False\n):\n m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)\n nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)\n nn.init.constant_(m.weight[padding_idx], 0)\n if freeze_embed:\n m.weight.requires_grad = False\n return m", "def _add_pre_trained_embedding(self):\n\n if self.embedding_type['type'] == 'glove':\n self.logging.info('use pre-trained glove word2vec')\n # a. load pre trained glove\n GLOVE_DIR = '../data/glove_pretrained/glove.6B'\n glove_suffix_name = 'glove.6B.' + str(self.embedding_size) + 'd.txt'\n import os\n import numpy as np\n\n embeddings_index = {}\n f = open(os.path.join(GLOVE_DIR, glove_suffix_name)) # 'glove.6B.100d.txt'))\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n f.close()\n\n self.logging.info('')\n self.logging.info('Found %s word vectors.' % len(embeddings_index))\n\n # b. compute embedding matrix\n embedding_matrix = np.zeros((len(self.word_index) + 1, self.embedding_size))\n cnt = 0\n for word, i in self.word_index.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector # words not found in embedding index will be all-zeros.\n else:\n # self.logging.info('token in train missing in word2vec: ' + str(word))\n cnt += 1\n self.logging.info('total tokens missing: ' + str(cnt) + ' / ' + str(len(self.word_index)))\n\n # c. build embedding layer\n from keras.layers import Embedding\n embedding_layer = Embedding(len(self.word_index) + 1,\n self.embedding_size,\n weights=[embedding_matrix],\n input_length=self.maxlen,\n trainable=False)\n\n elif self.embedding_type['type'] == 'gensim':\n self.logging.info('use pre-trained gensim word2vec')\n\n import gzip\n import gensim\n from keras.layers import Embedding\n import numpy as np\n\n # fname = '../data/word2vec_pretrained/motors/d_300_k_712904_w_6_e_60_v_motors'\n # fname = '../data/word2vec_pretrained/fashion/d_300_k_1341062_w_6_e_70_v_fashion'\n\n self.logging.info('load word2vec path: ' + str(self.embedding_type['path']))\n model = gensim.models.Word2Vec.load(self.embedding_type['path'])\n pretrained_weights = model.wv.syn0\n vocab_size, vector_dim = pretrained_weights.shape\n\n method = 3\n if method == 1:\n self.logging.info('word2vec attempt to fit into embedding layer - middle complex')\n # convert the wv word vectors into a numpy matrix that is suitable for insertion\n # into our TensorFlow and Keras models\n\n embedding_matrix = np.zeros((len(model.wv.vocab), vector_dim))\n for i in range(len(model.wv.vocab)):\n embedding_vector = model.wv[model.wv.index2word[i]]\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n\n embedding_layer = Embedding(input_dim=embedding_matrix.shape[0],\n output_dim=embedding_matrix.shape[1],\n # input_length=self.maxlen,\n weights=[embedding_matrix],\n trainable=False)\n elif method == 2:\n self.logging.info('word2vec simple embedding matching - simple complex')\n embedding_layer = Embedding(input_dim=vocab_size,\n output_dim=vector_dim,\n input_length=self.maxlen,\n weights=[pretrained_weights],\n trainable=False)\n elif method == 3:\n\n self.logging.info('word2vec match using word_index from keras tokenizer - as used in glove match above')\n # b. compute embedding matrix\n\n # sd = 1 / np.sqrt(len(self.word_index) + 1)\n # embedding_matrix = np.random.normal(0, scale=sd, size=(len(self.word_index) + 1, self.embedding_size))\n\n embedding_matrix = np.zeros((len(self.word_index) + 1, self.embedding_size))\n cnt = 0\n for word, i in self.word_index.items():\n if word in model.wv:\n embedding_vector = model.wv[word]\n embedding_matrix[i] = embedding_vector\n else:\n # self.logging.info('token in train missing in word2vec: ' + str(word))\n cnt += 1\n self.logging.info('total tokens missing: ' + str(cnt))\n\n\n # c. build embedding layer\n from keras.layers import Embedding\n embedding_layer = Embedding(len(self.word_index) + 1,\n self.embedding_size,\n weights=[embedding_matrix],\n input_length=self.maxlen,\n trainable=False)\n else:\n raise ValueError('unknown method value')\n\n else:\n raise ValueError('unknown embedding type')\n self.logging.info('create glove pre-trained embedding: ' + str(self.embedding_size))\n return embedding_layer", "def call(self, x, *args, **kwargs):\n with tf.name_scope(\"embedding\"):\n # fills out of bound values with padding symbol\n out_bound_mask = tf.cast(x > (self.vocab_size - 1), dtype=tf.int32)\n x *= 1 - out_bound_mask\n x += out_bound_mask * tf.cast(self.pad_sym, dtype=tf.int32)\n\n embeddings = tf.gather(self.shared_weights, x)\n if self.embed_scale:\n # Scale embedding by the sqrt of the hidden size\n embeddings *= self.hidden_size ** 0.5\n\n if self.mask_paddings:\n # Create binary array of size [batch_size, length]\n # where 1 = padding, 0 = not padding\n padding = get_padding(x, padding_value=self.pad_sym)\n\n # Set all padding embedding values to 0\n # embeddings *= tf.expand_dims(1 - padding, -1)\n embeddings *= tf.cast(tf.expand_dims(1.0 - padding, -1), dtype=embeddings.dtype)\n return embeddings", "def build_model(vocab_size, embedding_dim, rnn_units, batch_size):\n model=tf.keras.Sequential([\n \n tf.keras.layers.Embedding(vocab_size, embedding_dim, batch_input_shape=[batch_size, None]),\n rnn(rnn_units, return_sequences=True, recurrent_initializer='glorot_uniform', stateful=True),\n tf.keras.layers.Dense(vocab_size)\n \n ])\n \n return model", "def embedding_layer(n_categories, embedding_dim, name=None):\n\n input_tensor = Input(shape=(1,))\n x = Embedding(n_categories, embedding_dim, name=name)(input_tensor)\n x = Reshape(target_shape=(embedding_dim,))(x)\n\n return input_tensor, x", "def make_vocab(src_hparams, tgt_hparams):\n src_vocab = MonoTextData.make_vocab(src_hparams)\n\n if tgt_hparams[\"processing_share\"]:\n tgt_bos_token = src_hparams[\"bos_token\"]\n tgt_eos_token = src_hparams[\"eos_token\"]\n else:\n tgt_bos_token = tgt_hparams[\"bos_token\"]\n tgt_eos_token = tgt_hparams[\"eos_token\"]\n tgt_bos_token = utils.default_str(tgt_bos_token,\n SpecialTokens.BOS)\n tgt_eos_token = utils.default_str(tgt_eos_token,\n SpecialTokens.EOS)\n if tgt_hparams[\"vocab_share\"]:\n if tgt_bos_token == src_vocab.bos_token and \\\n tgt_eos_token == src_vocab.eos_token:\n tgt_vocab = src_vocab\n else:\n tgt_vocab = Vocab(src_hparams[\"vocab_file\"],\n bos_token=tgt_bos_token,\n eos_token=tgt_eos_token)\n else:\n tgt_vocab = Vocab(tgt_hparams[\"vocab_file\"],\n bos_token=tgt_bos_token,\n eos_token=tgt_eos_token)\n\n return src_vocab, tgt_vocab", "def build_embeddings(opt, word_dict, for_encoder='src'):\n if for_encoder=='src':\n embedding_dim = opt.src_word_vec_size #512\n elif for_encoder=='tgt':\n embedding_dim = opt.tgt_word_vec_size\n elif for_encoder=='structure':\n embedding_dim = 64\n\n word_padding_idx = word_dict.stoi[Constants.PAD_WORD]\n num_word_embeddings = len(word_dict)\n \n if for_encoder=='src' or for_encoder=='tgt':\n\n return Embeddings(word_vec_size=embedding_dim,\n position_encoding=opt.position_encoding,\n dropout=opt.dropout,\n word_padding_idx=word_padding_idx,\n word_vocab_size=num_word_embeddings,\n sparse=opt.optim == \"sparseadam\")\n elif for_encoder=='structure':\n return Embeddings(word_vec_size=embedding_dim,\n position_encoding=False,\n dropout=opt.dropout,\n word_padding_idx=word_padding_idx,\n word_vocab_size=num_word_embeddings,\n sparse=opt.optim == \"sparseadam\")", "def build(self, hp, inputs=None):\n input_node = inputs\n embedding_dim = self.embedding_dim or hp.Choice('embedding_dim', [8, 16], default=8)\n output_node = tf.stack(\n [\n tf.tensordot(input_node[0][:, col_id], tf.keras.layers.Embedding(1, embedding_dim)(0), axes=0)\n for col_id in range(self.num_of_fields)\n ],\n axis=1\n )\n return output_node", "def build(self, unused_input_shapes):\n self.embedding_lookup = EmbeddingLookup(\n vocab_size=self.config.vocab_size,\n embedding_size=self.config.hidden_size,\n initializer_range=self.config.initializer_range,\n dtype=tf.float32,\n name=\"word_embeddings\")\n self.embedding_postprocessor = CustomEmbeddingPostprocessor(\n word_embedding_width=self.config.hidden_size,\n use_type_embeddings=True,\n token_type_vocab_size=self.config.type_vocab_size,\n use_position_embeddings=True,\n max_position_embeddings=self.config.max_position_embeddings,\n dropout_prob=self.config.hidden_dropout_prob,\n initializer_range=self.config.initializer_range,\n dtype=tf.float32,\n name=\"embeddings\")\n self.encoder = CustomTransformer(\n num_hidden_layers=self.config.num_hidden_layers,\n hidden_size=self.config.hidden_size,\n num_attention_heads=self.config.num_attention_heads,\n intermediate_size=self.config.intermediate_size,\n intermediate_activation=self.config.hidden_act,\n hidden_dropout_prob=self.config.hidden_dropout_prob,\n attention_probs_dropout_prob=self.config.attention_probs_dropout_prob,\n initializer_range=self.config.initializer_range,\n backward_compatible=self.config.backward_compatible,\n float_type=self.float_type,\n name=\"encoder\")\n self.pooler_dense = tf.keras.layers.Dense(\n units=self.config.hidden_size,\n activation=\"tanh\",\n kernel_initializer=get_initializer(self.config.initializer_range))\n super(BertLayer, self).build(unused_input_shapes)", "def _get_embedding(self, data):\n # Tensor(n, c)\n cat = data['cat']\n return self.one_hot_embed(cat)", "def randomly_init_embeddings(self, embed_dim):\n self.embed_dim = embed_dim\n self.embeddings = np.random.rand(self.size(), embed_dim)\n for term in [self.pad_term, self.unk_term, self.eos_term]:\n self.embeddings[self.get_id(term)] = np.zeros([self.embed_dim])", "def _get_word_tensor(embedding_op: tf.Operation) -> tf.Tensor:\n assert embedding_op.type == 'AddV2'\n add = embedding_op.inputs[0].op\n assert add.type == 'AddV2'\n identity = add.inputs[0].op\n assert identity.type == 'Identity'\n gather = identity.inputs[0].op\n assert gather.type == 'ResourceGather'\n\n return gather.outputs[0]", "def create_embedding(skills):\n corpus = list(skills[\"description\"].values)\n embedder = SentenceTransformer(config[\"sentence_transformer\"][\"model\"])\n embedding = embedder.encode(corpus, show_progress_bar=True)\n return embedding", "def target_embedding_fairseq(self):\r\n if self.params[\"embedding.share\"]:\r\n return self.source_embedding_fairseq()\r\n return tf.get_variable(\r\n name=\"W\",\r\n shape=[self.target_vocab_info.total_size, self.params[\"embedding.dim\"]],\r\n initializer=tf.random_normal_initializer(\r\n mean=0.0,\r\n stddev=0.1))", "def __init__(self, config, mode=\"train\", input_reader=None):\n if mode not in [\"train\", \"eval\", \"encode\", \"test\"]:\n raise ValueError(\"Unrecognized mode: %s\" % mode)\n\n self.config = config\n self.mode = mode\n self.reader = input_reader if input_reader else tf.TFRecordReader()\n\n # Initializer used for non-recurrent weights.\n self.uniform_initializer = tf.random_uniform_initializer(\n minval=-self.config.uniform_init_scale,\n maxval=self.config.uniform_init_scale)\n\n\n # Each is an int64 Tensor with shape [batch_size, padded_length].\n self.encode_ids1 = None\n self.encode_ids2 = None\n\n # Boolean masks distinguishing real words (1) from padded words (0).\n # Each is an int32 Tensor with shape [batch_size, padded_length].\n self.encode_mask1 = None\n self.encode_mask2 = None\n\n # Input sentences represented as sequences of word embeddings.\n # Each is a float32 Tensor with shape [batch_size, padded_length, emb_dim].\n self.encode_emb1 = None\n self.encode_emb2 = None\n\n # The output from the sentence encoder.\n # A float32 Tensor with shape [batch_size, num_gru_units].\n self.thought_vectors1 = None\n self.thought_vectors2 = None\n\n self.label = None\n self.feature = None\n # The cross entropy losses and corresponding weights of the decoders. Used\n # for evaluation.\n self.target_cross_entropy_losses = []\n self.accuracy = []\n self.logits = []\n\n # The total loss to optimize.\n self.total_loss = None", "def _create_tf_embed_fnn(\n self,\n x_in: \"tf.Tensor\",\n layer_sizes: List[int],\n fnn_name: Text,\n embed_name: Text,\n ) -> \"tf.Tensor\":\n\n x = train_utils.create_tf_fnn(\n x_in,\n layer_sizes,\n self.droprate,\n self.C2,\n self._is_training,\n layer_name_suffix=fnn_name,\n )\n return train_utils.create_tf_embed(\n x,\n self.embed_dim,\n self.C2,\n self.similarity_type,\n layer_name_suffix=embed_name,\n )", "def _create_input(inputs: List[Tensor], initial: bool = False) \\\n -> Dict[str, Tensor]:\n word_embed = torch.stack(inputs, dim=0)\n seq_len, batch_size, embed_dim = word_embed.size()\n if not initial:\n # Add a dummy token at the end that stands for the token\n # to predict.\n word_embed = torch.cat([\n word_embed,\n word_embed.new_zeros(1, batch_size, embed_dim)\n ], dim=0)\n seq_len += 1\n segment_ids = word_embed.new_zeros(\n seq_len, batch_size, dtype=torch.long)\n return_dict = {\n \"word_embed\": word_embed,\n \"segment_ids\": segment_ids,\n }\n\n if not initial:\n # Only the dummy token is considered target.\n target_mapping = torch.cat([\n torch.zeros(1, seq_len - 1, batch_size),\n torch.ones(1, 1, batch_size)\n ], dim=1).to(device=word_embed.device)\n # Dummy token attends to nothing; actual tokens attend to all.\n permute_mask = torch.cat([\n torch.zeros(seq_len, seq_len - 1, batch_size),\n torch.ones(seq_len, 1, batch_size),\n ], dim=1).to(device=word_embed.device)\n return_dict.update({\n \"target_mapping\": target_mapping,\n \"permute_mask\": permute_mask,\n })\n\n return return_dict", "def __init__(self, voc_size=8000, embed_size=100, hid_size=100, trunc=4,\n model=None):\n\n self.log = logging.getLogger(\"TEST.Embed\")\n self.log.setLevel(logging.INFO)\n\n self.unknown_token = \"UNKNOWN_TOKEN\"\n self.sentence_start_token = \"SENTENCE_START\"\n self.sentence_end_token = \"SENTENCE_END\"\n\n if model is None:\n self.log.info(\"Initializing RNN parameters and functions...\")\n\n self.vocabulary_size = voc_size\n self.embed_size = embed_size\n self.hidden_size = hid_size\n self.bptt_truncate = trunc\n\n # Instantiate the network weights\n # I feel like the first and third are switched for some reason...\n # but it's pretty consistent in the example code. Perhaps it's\n # backwards for a purpose\n # The weights going from the input layer to the word embedding\n # layer (E, in tutorial)\n weights_ie = np.random.uniform(-np.sqrt(1./voc_size),\n np.sqrt(1./voc_size),\n (embed_size, voc_size))\n\n # The weights going from input layer to hidden layer\n # (U, in tutorial)\n weights_eh = np.random.uniform(-np.sqrt(1./voc_size),\n np.sqrt(1./voc_size),\n (3, hid_size, embed_size))\n\n # The weights going from hidden layer to hidden layer\n # (W, in tutorial)\n weights_hh = np.random.uniform(-np.sqrt(1./voc_size),\n np.sqrt(1./voc_size),\n (3, hid_size, hid_size))\n\n # The weights going from hidden layer to output layer\n # (V, in tutorial)\n weights_ho = np.random.uniform(-np.sqrt(1./voc_size),\n np.sqrt(1./voc_size),\n (voc_size, hid_size))\n\n # The bias for the hidden units (no bias applied to embedding layer)\n bias = np.zeros((3, hid_size))\n\n # The bias for the output units\n out_bias = np.zeros(voc_size)\n\n self.weights_ie = theano.shared(\n name='weights_ie',\n value=weights_ie.astype(theano.config.floatX))\n\n self.weights_eh = theano.shared(\n name='weights_eh',\n value=weights_eh.astype(theano.config.floatX))\n\n self.weights_hh = theano.shared(\n name='weights_hh',\n value=weights_hh.astype(theano.config.floatX))\n\n self.weights_ho = theano.shared(\n name='weights_ho',\n value=weights_ho.astype(theano.config.floatX))\n\n self.bias = theano.shared(\n name='bias',\n value=bias.astype(theano.config.floatX))\n\n self.out_bias = theano.shared(\n name='out_bias',\n value=out_bias.astype(theano.config.floatX))\n\n self.cache_ie = theano.shared(\n name='cache_ie',\n value=np.zeros(weights_ie.shape).astype(theano.config.floatX))\n\n self.cache_eh = theano.shared(\n name='cache_eh',\n value=np.zeros(weights_eh.shape).astype(theano.config.floatX))\n\n self.cache_hh = theano.shared(\n name='cache_hh',\n value=np.zeros(weights_hh.shape).astype(theano.config.floatX))\n\n self.cache_ho = theano.shared(\n name='cache_ho',\n value=np.zeros(weights_ho.shape).astype(theano.config.floatX))\n\n self.cache_bias = theano.shared(\n name='cache_bias',\n value=np.zeros(bias.shape).astype(theano.config.floatX))\n\n self.cache_out_bias = theano.shared(\n name='cache_out_bias',\n value=np.zeros(out_bias.shape).astype(theano.config.floatX))\n\n self.vocabulary = []\n self.word_to_index = {}\n self.index_to_word = []\n else:\n self.log.info(\"Loading model parameters from saved model...\")\n\n with open(model, \"rb\") as modelFile:\n params = cPickle.load(modelFile)\n\n self.vocabulary_size = params[0]\n self.embed_size = params[1]\n self.hidden_size = params[2]\n self.bptt_truncate = params[3]\n\n self.weights_ie = params[4]\n self.weights_eh = params[5]\n self.weights_hh = params[6]\n self.weights_ho = params[7]\n\n self.vocabulary = params[8]\n if not self.vocabulary[-1] == self.unknown_token:\n self.log.info(\"Appending unknown token\")\n self.vocabulary[-1] = self.unknown_token\n self.index_to_word = params[9]\n self.word_to_index = params[10]\n\n self.bias = params[11]\n self.out_bias = params[12]\n\n self.cache_ie = params[13]\n self.cache_eh = params[14]\n self.cache_hh = params[15]\n self.cache_ho = params[16]\n self.cache_bias = params[17]\n self.cache_out_bias = params[18]\n # End of if statement\n\n # Symbolic representation of one input sentence\n input = T.ivector('sentence')\n\n # Symbolic representation of the one output sentence\n output = T.ivector('sentence')\n\n # Symbolic representation of the cache decay for RMSprop\n decay = T.scalar('decay')\n\n # Stochastic Gradient Descent step\n learning_rate = T.scalar('learning_rate')\n\n def forward_propagate(word, previous_state):\n \"\"\"\n Vertically propagates one of the words.\n\n :type word: int\n :param word: the index of the current input word\n\n :type previous_state: T.dvector()\n :param word: the output of the hidden layer from the previous\n horizontal layer\n \"\"\"\n # Embedding layer\n word_vector = self.weights_ie[:, word]\n\n # GRU layer\n update_gate = T.nnet.hard_sigmoid(\n self.weights_eh[0].dot(word_vector) +\n self.weights_hh[0].dot(previous_state) +\n self.bias[0]\n )\n\n reset_gate = T.nnet.hard_sigmoid(\n self.weights_eh[1].dot(word_vector) +\n self.weights_hh[1].dot(previous_state) +\n self.bias[1]\n )\n\n hypothesis = T.tanh(\n self.weights_eh[2].dot(word_vector) +\n self.weights_hh[2].dot(previous_state * reset_gate) +\n self.bias[2]\n )\n\n current_state = (T.ones_like(update_gate) - update_gate) * hypothesis + update_gate * previous_state\n\n # Output layer\n current_output = T.nnet.softmax(\n self.weights_ho.dot(current_state) + self.out_bias\n )[0]\n\n # Not sure why current_output[0] and not just current_output...\n return [current_output, current_state]\n\n #######################################################################\n # Symbolically represents going through each input sentence word and\n # then calculating the state of the hidden layer and output word for\n # each word. The forward_propagate function is the one used to\n # generate the output word and hidden layer state.\n #######################################################################\n self.theano = {}\n\n [out, state], updates = theano.scan(\n forward_propagate,\n sequences=input,\n truncate_gradient=self.bptt_truncate,\n outputs_info=[None, dict(initial=T.zeros(self.hidden_size))],\n name=\"forward_propagate\"\n )\n\n # Predicts the output words for each word in the sentence\n prediction = T.argmax(out, axis=1)\n\n # Calculates the output error between the predicted output and the\n # actual output\n out_error = T.sum(T.nnet.categorical_crossentropy(out, output))\n\n # Symbolically represents gradient calculations for gradient descent\n d_weights_ie = T.grad(out_error, self.weights_ie)\n d_weights_eh = T.grad(out_error, self.weights_eh)\n d_weights_hh = T.grad(out_error, self.weights_hh)\n d_weights_ho = T.grad(out_error, self.weights_ho)\n d_bias = T.grad(out_error, self.bias)\n d_out_bias = T.grad(out_error, self.out_bias)\n\n # Symbolic theano functions\n self.forward_propagate = theano.function([input], out,\n name=\"forward_propagate\")\n self.predict = theano.function([input], prediction, name=\"predict\")\n self.calculate_error = theano.function([input, output], out_error,\n name=\"calculate_error\")\n self.bptt = theano.function([input, output],\n [d_weights_ie, d_weights_eh, d_weights_hh, d_weights_ho, d_bias,\n d_out_bias],\n name=\"bptt\")\n\n # RMSprop parameters\n cache_ie = (decay * self.cache_ie) + ((1 - decay) * d_weights_ie ** 2)\n cache_eh = (decay * self.cache_eh) + ((1 - decay) * d_weights_eh ** 2)\n cache_hh = (decay * self.cache_hh) + ((1 - decay) * d_weights_hh ** 2)\n cache_ho = (decay * self.cache_ho) + ((1 - decay) * d_weights_ho ** 2)\n cache_bias = (decay * self.cache_bias) + ((1 - decay) * d_bias ** 2)\n cache_out_bias = (decay * self.cache_out_bias) + ((1 - decay) * d_out_bias ** 2)\n eps = 1e-6 # Prevents division by 0\n\n self.sgd_step = theano.function(\n [input, output, learning_rate, theano.In(decay, value=0.9)],\n [],\n updates=[\n (self.weights_ie, self.weights_ie - learning_rate *\n d_weights_ie / (T.sqrt(self.cache_ie + eps))),\n (self.weights_eh, self.weights_eh - learning_rate *\n d_weights_eh / (T.sqrt(self.cache_eh + eps))),\n (self.weights_hh, self.weights_hh - learning_rate *\n d_weights_hh / (T.sqrt(self.cache_hh + eps))),\n (self.weights_ho, self.weights_ho - learning_rate *\n d_weights_ho / (T.sqrt(self.cache_ho + eps))),\n (self.bias, self.bias - learning_rate * d_bias /\n (T.sqrt(self.cache_bias + eps))),\n (self.out_bias, self.out_bias - learning_rate *\n d_out_bias / (T.sqrt(self.cache_out_bias + eps))),\n (self.cache_ie, cache_ie),\n (self.cache_eh, cache_eh),\n (self.cache_hh, cache_hh),\n (self.cache_ho, cache_ho),\n (self.cache_bias, cache_bias),\n (self.cache_out_bias, cache_out_bias)]\n )\n\n self.x_train = None\n self.y_train = None", "def _configure_embeddings(self):\r\n # TODO(omalleyt): Add integration tests.\r\n from tensorflow.python.keras.layers import embeddings\r\n try:\r\n from tensorboard.plugins import projector\r\n except ImportError:\r\n raise ImportError('Failed to import TensorBoard. Please make sure that '\r\n 'TensorBoard integration is complete.\"')\r\n config = projector.ProjectorConfig()\r\n for layer in self.model.layers:\r\n if isinstance(layer, embeddings.Embedding):\r\n embedding = config.embeddings.add()\r\n embedding.tensor_name = layer.embeddings.name\r\n\r\n if self.embeddings_metadata is not None:\r\n if isinstance(self.embeddings_metadata, str):\r\n embedding.metadata_path = self.embeddings_metadata\r\n else:\r\n if layer.name in embedding.metadata_path:\r\n embedding.metadata_path = self.embeddings_metadata.pop(layer.name)\r\n\r\n if self.embeddings_metadata:\r\n raise ValueError('Unrecognized `Embedding` layer names passed to '\r\n '`keras.callbacks.TensorBoard` `embeddings_metadata` '\r\n 'argument: ' + str(self.embeddings_metadata.keys()))\r\n\r\n class DummyWriter(object):\r\n \"\"\"Dummy writer to conform to `Projector` API.\"\"\"\r\n\r\n def __init__(self, logdir):\r\n self.logdir = logdir\r\n\r\n def get_logdir(self):\r\n return self.logdir\r\n\r\n writer = DummyWriter(self.log_dir)\r\n projector.visualize_embeddings(writer, config)", "def init_embedding(embeddings):\n bias = np.sqrt(3.0 / embeddings.size(1))\n torch.nn.init.uniform_(embeddings, -bias, bias)", "def build_image_embeddings(self):\n inception_output = image_embedding.inception_v3(\n self.images,\n trainable=self.train_inception,\n is_training=self.is_training())\n\n # Map inception output onto embedding space.\n with tf.variable_scope(\"image_embedding\") as scope:\n image_embeddings = tf.contrib.layers.fully_connected(\n inputs=inception_output,\n num_outputs=self.config.sentence_embedding_size,\n activation_fn=None,\n weights_initializer=self.initializer,\n biases_initializer=None,\n scope=scope)\n \n if self.mode == \"train\":\n # to avoid overfitting we use dropout for all fully connected layers\n image_embeddings = tf.nn.dropout(image_embeddings, self.config.dropout_keep_prob_encoder)\n\n # Save the embedding size in the graph.\n tf.constant(self.config.sentence_embedding_size, name=\"image_embedding_size\")\n\n self.image_embeddings = image_embeddings", "def get_model_bicond_sepembed(batch_size, max_seq_length, input_size, hidden_size, target_size,\n vocab_size, pretrain, tanhOrSoftmax, dropout):\n\n # batch_size x max_seq_length\n inputs = tf.placeholder(tf.int32, [batch_size, max_seq_length])\n inputs_cond = tf.placeholder(tf.int32, [batch_size, max_seq_length])\n\n cont_train = True\n if pretrain == \"pre\": # continue training embeddings or not. Currently works better to continue training them.\n cont_train = False\n embedding_matrix = tf.Variable(tf.random_uniform([vocab_size, input_size], -0.1, 0.1), # input_size is embeddings size\n name=\"embedding_matrix\", trainable=cont_train)\n embedding_matrix_cond = tf.Variable(tf.random_uniform([vocab_size, input_size], -0.1, 0.1),\n name=\"embedding_matrix\", trainable=cont_train)\n\n\n # batch_size x max_seq_length x input_size\n embedded_inputs = tf.nn.embedding_lookup(embedding_matrix, inputs)\n embedded_inputs_cond = tf.nn.embedding_lookup(embedding_matrix_cond, inputs_cond)\n\n\n # [batch_size x inputs_size] with max_seq_length elements\n # fixme: possibly inefficient\n # inputs_list[0]: batch_size x input[0] <-- word vector of the first word\n inputs_list = [tf.squeeze(x) for x in\n tf.split(1, max_seq_length, embedded_inputs)]\n inputs_cond_list = [tf.squeeze(x) for x in\n tf.split(1, max_seq_length, embedded_inputs_cond)]\n\n drop_prob = None\n if dropout:\n drop_prob = 0.1\n lstm_encoder = Encoder(rnn_cell.BasicLSTMCell, input_size, hidden_size, drop_prob, drop_prob)\n\n start_state = tf.zeros([batch_size, lstm_encoder.state_size])\n\n ### FORWARD\n\n # [h_i], [h_i, c_i] <-- LSTM\n # [h_i], [h_i] <-- RNN\n fw_outputs, fw_states = lstm_encoder(inputs_list, start_state, \"LSTM\")\n\n # running a second LSTM conditioned on the last state of the first\n fw_outputs_cond, fw_states_cond = lstm_encoder(inputs_cond_list, fw_states[-1],\n \"LSTMcond\")\n\n fw_outputs_fin = fw_outputs_cond[-1]\n\n ### BACKWARD\n bw_outputs, bw_states = lstm_encoder(inputs_list[::-1], start_state, \"LSTM_bw\")\n bw_outputs_cond, bw_states_cond = lstm_encoder(inputs_cond_list[::-1], bw_states[-1],\n \"LSTMcond_bw\")\n bw_outputs_fin = bw_outputs_cond[-1]\n\n outputs_fin = tf.concat(1, [fw_outputs_fin, bw_outputs_fin])\n\n\n if tanhOrSoftmax == \"tanh\":\n model = Projector(target_size, non_linearity=tf.nn.tanh, bias=True)(outputs_fin) # tf.nn.softmax\n else:\n model = Projector(target_size, non_linearity=tf.nn.softmax, bias=True)(outputs_fin) # tf.nn.softmax\n\n return model, [inputs, inputs_cond]", "def embeddings_layers_init(self):\n\n user_embeddings = tf.keras.layers.Embedding(\n self.n_users, self.user_dim, input_length=1)\n\n item_embeddings = tf.keras.layers.Embedding(\n self.n_items, self.item_dim, input_length=1)\n\n return user_embeddings, item_embeddings", "def embed_text(tensors, embeddings):\n wids = tensors[\"wids\"]\n cids = tensors[\"cids\"]\n\n embedding_weights = embeddings.get_initialized_params(trainable=False)\n word_vecs = tf.nn.embedding_lookup(embedding_weights, wids)\n char_emb = common_layers.character_cnn(cids)\n return tf.concat([word_vecs, char_emb], -1)", "def build_encoder(tparams, options):\n\t# word embedding (source)\n\tembedding = tensor.tensor3('embedding', dtype='float32')\n\tx_mask = tensor.matrix('x_mask', dtype='float32')\n\n\t# encoder\n\tproj = get_layer(options['encoder'])[1](tparams, embedding, options,\n\t\t\t\t\t\t\t\t\t\t\tprefix='encoder',\n\t\t\t\t\t\t\t\t\t\t\tmask=x_mask)\n\tctx = proj[0][-1]\n\n\treturn embedding, x_mask, ctx", "def build_bilstm(self, verbose=True):\r\n word_ids = Input(batch_shape=(None, None), dtype='int32', name='word_input')\r\n inputs = [word_ids]\r\n\r\n if self._params.use_pretrain_embedding:\r\n if verbose: logging.info(\"initial word embedding with pretrained embeddings\")\r\n if self._params.word_embedding_dim == 100:\r\n glove_file = self._params.data_dir + '/glove.6B.100d.txt'\r\n elif self._params.word_embedding_dim == 300:\r\n glove_file = self._params.data_dir + '/glove.42B.300d.txt'\r\n else:\r\n logging.error(\"we only support glove embedding with dimension 100 or 300\")\r\n raise ValueError(\"unmatch word dimension, we only support glove embedding with dimension 100 or 300\")\r\n glove_embedding_index = load_glove(glove_file, self._params.word_embedding_dim)\r\n word_vocab = self.input_processor.word_vocab.vocab\r\n glove_embeddings_matrix = np.zeros([len(word_vocab), self._params.word_embedding_dim])\r\n for word, i in word_vocab.items():\r\n vector = glove_embedding_index.get(word)\r\n if vector is not None:\r\n glove_embeddings_matrix[i] = vector\r\n \r\n word_embeddings = Embedding(input_dim=glove_embeddings_matrix.shape[0],\r\n output_dim=glove_embeddings_matrix.shape[1],\r\n trainable=False,\r\n mask_zero=True,\r\n weights=[glove_embeddings_matrix],\r\n name='word_embedding')(word_ids)\r\n else:\r\n word_embeddings = Embedding(input_dim=self._params.word_vocab_size,\r\n output_dim=self._params.word_embedding_dim,\r\n mask_zero=True,\r\n name='word_embedding')(word_ids)\r\n\r\n input_embeddings = [word_embeddings]\r\n if self._params.use_char:\r\n char_ids = Input(batch_shape=(None, None, None), dtype='int32', name='char_input')\r\n inputs.append(char_ids)\r\n if self._params.char_feature == \"lstm\":\r\n char_embeddings = Embedding(input_dim=self._params.char_vocab_size,\r\n output_dim=self._params.char_embedding_dim,\r\n mask_zero=True,\r\n name='char_embedding')(char_ids)\r\n if verbose: logging.info(\"using charcter level lstm features\")\r\n char_feas = TimeDistributed(Bidirectional(LSTM(self._params.char_lstm_size)), name=\"char_lstm\")(char_embeddings)\r\n elif self._params.char_feature == \"cnn\":\r\n # cnn do not support mask\r\n char_embeddings = Embedding(input_dim=self._params.char_vocab_size,\r\n output_dim=self._params.char_embedding_dim,\r\n name='char_embedding')(char_ids)\r\n if verbose: logging.info(\"using charcter level cnn features\")\r\n char_feas = char_cnn_encode(char_embeddings, self._params.n_gram_filter_sizes, self._params.n_gram_filter_nums)\r\n else:\r\n raise ValueError('char feature must be lstm or cnn')\r\n\r\n input_embeddings.append(char_feas)\r\n\r\n if self._params.use_pos:\r\n if verbose: logging.info(\"use pos tag features\")\r\n pos_ids = Input(batch_shape=(None, None), dtype='int32', name='pos_input')\r\n inputs.append(pos_ids)\r\n\r\n\r\n pos_embeddings = Embedding(input_dim=self._params.pos_vocab_size,\r\n output_dim=self._params.pos_embedding_dim,\r\n mask_zero=True,\r\n name='pos_embedding')(pos_ids)\r\n input_embeddings.append(pos_embeddings)\r\n\r\n if self._params.use_dict:\r\n if verbose: logging.info(\"use user dict features\")\r\n dict_ids = Input(batch_shape=(None, None), dtype='int32', name='dict_input')\r\n inputs.append(dict_ids)\r\n\r\n dict_embeddings = Embedding(input_dim=self._params.dict_vocab_size,\r\n output_dim=self._params.dict_embedding_dim,\r\n mask_zero=True,\r\n name='dict_embedding')(dict_ids)\r\n input_embeddings.append(dict_embeddings)\r\n\r\n input_embedding = Concatenate(name=\"input_embedding\")(input_embeddings) if len(input_embeddings)>1 else input_embeddings[0]\r\n input_embedding_ln = LayerNormalization(name='input_layer_normalization')(input_embedding)\r\n #input_embedding_bn = BatchNormalization()(input_embedding_ln)\r\n input_embedding_drop = Dropout(self._params.dropout, name=\"input_embedding_dropout\")(input_embedding_ln)\r\n\r\n z = Bidirectional(LSTM(units=self._params.main_lstm_size, return_sequences=True, dropout=0.2, recurrent_dropout=0.2),\r\n name=\"main_bilstm\")(input_embedding_drop)\r\n z = Dense(self._params.fc_dim, activation='tanh', name=\"fc_dense\")(z)\r\n\r\n if self._params.use_crf:\r\n if verbose: logging.info('use crf decode layer')\r\n crf = CRF(self._params.num_labels, sparse_target=False,\r\n learn_mode='marginal', test_mode='marginal', name='crf_out')\r\n loss = crf.loss_function\r\n pred = crf(z)\r\n else:\r\n loss = 'categorical_crossentropy'\r\n pred = Dense(self._params.num_labels, activation='softmax', name='softmax_out')(z)\r\n\r\n model = Model(inputs=inputs, outputs=pred)\r\n model.summary(print_fn=lambda x: logging.info(x + '\\n'))\r\n model.compile(loss=loss, optimizer=self._params.optimizer)\r\n\r\n self.model = model", "def call(self, inputs):\n unpacked_inputs = tf_utils.unpack_inputs(inputs)\n word_embeddings = unpacked_inputs[0]\n token_type_ids = unpacked_inputs[1]\n input_shape = tf_utils.get_shape_list(word_embeddings, expected_rank=3)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n width = input_shape[2]\n\n output = word_embeddings\n if self.use_type_embeddings:\n flat_token_type_ids = tf.reshape(token_type_ids, [-1])\n token_type_embeddings = tf.gather(self.type_embeddings,\n flat_token_type_ids)\n token_type_embeddings = tf.reshape(token_type_embeddings,\n [batch_size, seq_length, width])\n output += token_type_embeddings\n\n if self.use_position_embeddings:\n position_embeddings = tf.expand_dims(\n tf.slice(self.position_embeddings, [0, 0], [seq_length, width]),\n axis=0)\n\n output += position_embeddings\n\n output = self.output_layer_norm(output)\n output = self.output_dropout(output)\n\n return output", "def call(self, inputs):\n unpacked_inputs = tf_utils.unpack_inputs(inputs)\n word_embeddings = unpacked_inputs[0]\n token_type_ids = unpacked_inputs[1]\n input_shape = tf_utils.get_shape_list(word_embeddings, expected_rank=3)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n width = input_shape[2]\n\n output = word_embeddings\n if self.use_type_embeddings:\n flat_token_type_ids = tf.reshape(token_type_ids, [-1])\n token_type_embeddings = tf.gather(self.type_embeddings,\n flat_token_type_ids)\n token_type_embeddings = tf.reshape(token_type_embeddings,\n [batch_size, seq_length, width])\n output += token_type_embeddings\n\n if self.use_position_embeddings:\n position_embeddings = tf.expand_dims(\n tf.slice(self.position_embeddings, [0, 0], [seq_length, width]),\n axis=0)\n\n output += position_embeddings\n\n output = self.output_layer_norm(output)\n output = self.output_dropout(output)\n\n return output", "def testEmbeddings(self):\n input_data = {\n \"x\":\n constant_op.constant(\n np.array(np.random.random_sample((20)), dtype=np.int32))\n }\n\n class EmbeddingModel(keras.Model):\n\n def __init__(self):\n super(EmbeddingModel, self).__init__()\n self.shared_weights = self.add_weight(\n \"weights\",\n shape=(2000, 300),\n dtype=dtypes.float32,\n initializer=init_ops.random_normal_initializer(\n mean=0.0, stddev=300**(-0.5)))\n\n @def_function.function(input_signature=[\n tensor_spec.TensorSpec(shape=(20), dtype=dtypes.int32)\n ])\n def func(self, x):\n return array_ops.gather(self.shared_weights, x)\n\n model = EmbeddingModel()\n root, output_func = self._freezeModel(model.func)\n self._testConvertedFunction(root, root.f, output_func, input_data)", "def instantiate_weights(self):\n self.product_embeddings = tf.get_variable(\n name='product_embeddings',\n shape=[50000, 300],\n dtype=tf.float32\n )\n self.aisle_embeddings = tf.get_variable(\n name='aisle_embeddings',\n shape=[250, 50],\n dtype=tf.float32\n )\n self.department_embeddings = tf.get_variable(\n name='department_embeddings',\n shape=[50, 10],\n dtype=tf.float32\n )\n self.W_relu = tf.get_variable(\"W_relu\",shape=[670, 30]) #这个参数后续需要自适应\n self.b_relu = tf.get_variable(\"bias_relu\",shape=[30]) \n self.W_projection = tf.get_variable(\"W_projection\",shape=[30, 1]) \n self.b_projection = tf.get_variable(\"bias_projection\",shape=[1])", "def embedding_model(\n n_factors: int = 50,\n window: int = 5,\n min_count: int = 1,\n learning_rate: float = 0.05,\n negative_samples: int = 10,\n negative_exponent: float = 0.75,\n workers: int = 4,\n n_iterations: int = 10,\n batch_size: int = 10000,\n skip_gram: int = 0,\n) -> Word2Vec:\n logger.info(\"Defining Embedding Neural Network model.\")\n model = Word2Vec(\n vector_size=n_factors,\n window=window,\n min_count=min_count,\n alpha=learning_rate,\n negative=negative_samples,\n ns_exponent=negative_exponent,\n workers=workers,\n epochs=n_iterations,\n batch_words=batch_size,\n sg=skip_gram,\n compute_loss=True,\n )\n return model", "def __init__(self, input_dim, output_dim, name='embedding_layer'):\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.name = name\n\n # Randomly generate weights\n self.embeddings = shared((input_dim, output_dim),\n self.name + '__embeddings')\n\n # Define parameters\n self.params = [self.embeddings]", "def _embed(self):\n batch_size = tf.shape(self.p)[0]\n with tf.variable_scope(\"emb\"):\n with tf.variable_scope(\"char\"):\n pc_emb = tf.reshape(tf.nn.embedding_lookup(\n self.char_embed, self.pc), \n [batch_size * self.max_p_len, self.max_w_len, self.vocab.char_embed_dim])\n qc_emb = tf.reshape(tf.nn.embedding_lookup(\n self.char_embed, self.qc), \n [batch_size * self.max_q_len, self.max_w_len, self.vocab.char_embed_dim])\n cell_fw = tf.contrib.rnn.GRUCell(self.char_hidden_size)\n cell_bw = tf.contrib.rnn.GRUCell(self.char_hidden_size)\n _, (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, pc_emb, self.pc_length, dtype=tf.float32)\n pc_emb = tf.concat([state_fw, state_bw], axis=1)\n _, (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, qc_emb, self.qc_length, dtype=tf.float32)\n qc_emb = tf.concat([state_fw, state_bw], axis=1)\n pc_emb = tf.reshape(pc_emb, [batch_size, self.max_p_len, 2 * self.char_hidden_size])\n qc_emb = tf.reshape(qc_emb, [batch_size, self.max_q_len, 2 * self.char_hidden_size])\n\n with tf.name_scope(\"word\"):\n p_emb = tf.nn.embedding_lookup(self.word_embed, self.p)\n q_emb = tf.nn.embedding_lookup(self.word_embed, self.q)\n\n with tf.name_scope(\"pos\"):\n p_pos_emb = tf.nn.embedding_lookup(self.pos_embed, self.p_pos)\n q_pos_emb = tf.nn.embedding_lookup(self.pos_embed, self.q_pos)\n \n with tf.name_scope(\"em\"):\n sh = tf.shape(self.p_em)\n resh = [sh[0], sh[1], 1]\n p_em_feat = tf.reshape(tf.cast(self.p_em, dtype=tf.float32), shape=resh)\n\n self.p_emb = tf.concat([p_emb, pc_emb, p_pos_emb, p_em_feat], axis=2)\n self.q_emb = tf.concat([q_emb, qc_emb, q_pos_emb], axis=2)", "def cbow_model(vocabulary_size, embedding_size, context_length, batch_size,\n num_sampled, valid_examples, learning_rate):\n input_batch_size = context_length * batch_size\n\n graph = tf.Graph()\n with graph.as_default():\n # Input data.\n tf_train_dataset = tf.placeholder(tf.int32, shape=[input_batch_size])\n tf_train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])\n\n # This sums the input embeddings in a batch of size input_batch_size,\n # by group of context_length. This results in an input vector with\n # batch_size rows.\n word_mean_op = tf.constant((1.0 / context_length) *\n np.kron(np.eye(batch_size), np.ones([1, context_length])), dtype=tf.float32)\n\n # Variables.\n embeddings = tf.Variable(tf.random_uniform(\n [vocabulary_size, embedding_size], -1.0, 1.0))\n softmax_weights = tf.Variable(tf.truncated_normal(\n [vocabulary_size, embedding_size], stddev=1.0 / np.sqrt(embedding_size)))\n softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))\n\n # Model.\n # Look up embeddings for inputs.\n embed = tf.nn.embedding_lookup(embeddings, tf_train_dataset)\n word_means = tf.matmul(word_mean_op, embed)\n # Compute the softmax loss, using a sample of the negative labels each time.\n loss = tf.reduce_mean(tf.nn.sampled_softmax_loss(\n softmax_weights, softmax_biases, word_means, tf_train_labels, num_sampled, vocabulary_size))\n\n # Optimizer.\n optimizer = tf.train.AdagradOptimizer(learning_rate).minimize(loss)\n\n # Compute the similarity between minibatch examples and all embeddings.\n # We use the cosine distance:\n norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))\n normalized_embeddings = embeddings / norm\n\n similarity = None\n if valid_examples is not None:\n valid_dataset = tf.constant(valid_examples, dtype=tf.int32)\n valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)\n similarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings))\n\n tf_graph = {\n 'graph': graph,\n 'data_ph': tf_train_dataset,\n 'labels_ph': tf_train_labels }\n\n return tf_graph, optimizer, loss, normalized_embeddings, similarity", "def __init__(self, \n k=DEFAULT_EMBEDDING_SIZE, \n eta=DEFAULT_ETA, \n epochs=DEFAULT_EPOCH, \n batches_count=DEFAULT_BATCH_COUNT, \n seed=DEFAULT_SEED,\n embedding_model_params={},\n optimizer=DEFAULT_OPTIM, \n optimizer_params={'lr':DEFAULT_LR},\n loss=DEFAULT_LOSS, \n loss_params={},\n regularizer=DEFAULT_REGULARIZER, \n regularizer_params={},\n verbose=DEFAULT_VERBOSE):\n # Store for restoring later.\n self.all_params = \\\n {\n 'k': k,\n 'eta': eta,\n 'epochs': epochs,\n 'batches_count': batches_count,\n 'seed': seed,\n 'embedding_model_params': embedding_model_params,\n 'optimizer': optimizer,\n 'optimizer_params': optimizer_params,\n 'loss': loss,\n 'loss_params': loss_params,\n 'regularizer': regularizer,\n 'regularizer_params': regularizer_params,\n 'verbose': verbose\n\n }\n tf.reset_default_graph()\n\n self.is_filtered = False\n self.loss_params = loss_params\n\n self.embedding_model_params = embedding_model_params\n\n self.k = k\n self.seed = seed\n self.epochs = epochs\n self.eta = eta\n self.regularizer_params = regularizer_params\n self.batches_count = batches_count\n if batches_count == 1:\n logger.warn(\n 'batches_count=1. All triples will be processed in the same batch. This may introduce memory issues.')\n print('WARN: when batches_count=1 all triples will be processed in the same batch. '\n 'This may introduce memory issues.')\n\n try:\n self.loss = LOSS_REGISTRY[loss](self.eta, self.loss_params, verbose=verbose)\n except KeyError:\n msg = 'Unsupported loss function: {}'.format(loss)\n logger.error(msg)\n raise ValueError(msg)\n\n try:\n if regularizer is not None:\n self.regularizer = REGULARIZER_REGISTRY[regularizer](self.regularizer_params, verbose=verbose)\n else:\n self.regularizer = regularizer\n except KeyError:\n msg = 'Unsupported regularizer: {}'.format(regularizer)\n logger.error(msg)\n raise ValueError(msg)\n\n self.optimizer_params = optimizer_params\n if optimizer == \"adagrad\":\n self.optimizer = tf.train.AdagradOptimizer(learning_rate=self.optimizer_params.get('lr', DEFAULT_LR))\n elif optimizer == \"adam\":\n self.optimizer = tf.train.AdamOptimizer(learning_rate=self.optimizer_params.get('lr', DEFAULT_LR))\n elif optimizer == \"sgd\":\n self.optimizer = tf.train.GradientDescentOptimizer(\n learning_rate=self.optimizer_params.get('lr', DEFAULT_LR))\n elif optimizer == \"momentum\":\n self.optimizer = tf.train.MomentumOptimizer(learning_rate=self.optimizer_params.get('lr', DEFAULT_LR),\n momentum=self.optimizer_params.get('momentum',\n DEFAULT_MOMENTUM))\n else:\n msg = 'Unsupported optimizer: {}'.format(optimizer)\n logger.error(msg)\n raise ValueError(msg)\n\n self.verbose = verbose\n\n self.rnd = check_random_state(self.seed)\n\n self.initializer = tf.contrib.layers.xavier_initializer(uniform=False, seed=self.seed)\n self.tf_config = tf.ConfigProto(allow_soft_placement=True)\n self.tf_config.gpu_options.allow_growth = True\n self.sess_train = None\n self.sess_predict = None\n self.trained_model_params = []\n self.is_fitted = False\n self.eval_config = {}", "def get_model_tweetonly(batch_size, max_seq_length, input_size, hidden_size, target_size,\n vocab_size, pretrain, tanhOrSoftmax, dropout):\n\n # batch_size x max_seq_length\n inputs = tf.placeholder(tf.int32, [batch_size, max_seq_length])\n\n cont_train = True\n if pretrain == \"pre\":\n cont_train = False\n embedding_matrix = tf.Variable(tf.random_uniform([vocab_size, input_size], -0.1, 0.1), # input_size is embeddings size\n name=\"embedding_matrix\", trainable=cont_train)\n\n # batch_size x max_seq_length x input_size\n embedded_inputs = tf.nn.embedding_lookup(embedding_matrix, inputs)\n\n\n # [batch_size x inputs_size] with max_seq_length elements\n # fixme: possibly inefficient\n # inputs_list[0]: batch_size x input[0] <-- word vector of the first word\n inputs_list = [tf.squeeze(x) for x in\n tf.split(1, max_seq_length, embedded_inputs)]\n\n lstm_encoder = Encoder(rnn_cell.BasicLSTMCell, input_size, hidden_size)\n start_state = tf.zeros([batch_size, lstm_encoder.state_size])\n\n # [h_i], [h_i, c_i] <-- LSTM\n # [h_i], [h_i] <-- RNN\n outputs, states = lstm_encoder(inputs_list, start_state, \"LSTM\")\n\n drop_prob = None\n if dropout:\n drop_prob = 0.1\n\n lstm_encoder = Encoder(rnn_cell.BasicLSTMCell, input_size, hidden_size, drop_prob, drop_prob)\n\n outputs_fin = outputs[-1]\n if tanhOrSoftmax == \"tanh\":\n model = Projector(target_size, non_linearity=tf.nn.tanh)(outputs_fin) #tf.nn.softmax\n else:\n model = Projector(target_size, non_linearity=tf.nn.softmax)(outputs_fin) # tf.nn.softmax\n\n\n return model, [inputs]", "def __init__(self, vocab, embed_size=512, dropout_rate=0.1, max_len=200):\n super(DecoderEmbeddings, self).__init__()\n pad_token_idx = 0 #vocab.tokenizer.ids_to_tokens[0]\n assert vocab.tokenizer.ids_to_tokens[0] == '[PAD]'\n self.embeddings = nn.Embedding(len(vocab.tokenizer.ids_to_tokens), embed_size, padding_idx=pad_token_idx)\n self.positional_encoding = PositionalEncoding(d_model=embed_size, dropout=dropout_rate, max_len=max_len)", "def __init__(self,\n vocab_size,\n embed_dim,\n unit_dim,\n window_size,\n hidden_activation,\n pooling_type,\n dropout,\n num_gpus=1,\n default_gpu_id=0,\n regularizer=None,\n random_seed=0,\n trainable=True,\n scope=\"char_feat\"):\n self.vocab_size = vocab_size\n self.embed_dim = embed_dim\n self.unit_dim = unit_dim\n self.window_size = window_size\n self.hidden_activation = hidden_activation\n self.pooling_type = pooling_type\n self.dropout = dropout\n self.num_gpus = num_gpus\n self.default_gpu_id = default_gpu_id\n self.regularizer = regularizer\n self.random_seed = random_seed\n self.trainable = trainable\n self.scope = scope\n \n with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):\n self.embedding_layer = create_embedding_layer(self.vocab_size, self.embed_dim, False,\n None, self.num_gpus, self.default_gpu_id, None, self.random_seed, self.trainable)\n \n self.dropout_layer = create_dropout_layer(self.dropout, self.num_gpus, self.default_gpu_id, self.random_seed)\n \n self.conv_layer = create_convolution_layer(\"multi_1d\", 1, self.embed_dim,\n self.unit_dim, 1, self.window_size, 1, \"SAME\", self.hidden_activation, [0.0], None,\n False, False, True, self.num_gpus, self.default_gpu_id, self.regularizer, self.random_seed, self.trainable)\n \n self.pooling_layer = create_pooling_layer(self.pooling_type, self.num_gpus, self.default_gpu_id)", "def __init__(self,\n config,\n is_training,\n inputs):\n self._input = inputs\n vocab_size = config.vocab_size # num of possible words\n self._gpu_devices = [i for i in range(len(get_gpu_devices(FLAGS.gpu_devices)))]\n self._gpu_num = len(self._gpu_devices)\n self._cpu_device = FLAGS.cpu_device\n\n with tf.name_scope(\"model_variables\"):\n with tf.name_scope(\"global_step\"):\n self._global_step = tf.Variable(0, name='global_step', trainable=False)\n\n with tf.name_scope(\"epoch_counter\"):\n self._epoch_count = tf.Variable(0, name='epoch', trainable=False)\n self._epoch_inc = tf.assign(self._epoch_count, tf.add(self._epoch_count, tf.constant(1)))\n self._epoch_reset = tf.assign(self._epoch_count, tf.constant(0))\n\n # ptrs to the lstm cell object, ltsm initial state op and final state\n self._cell = []\n self._initial_state = []\n self._final_state = []\n\n # construct the embedding layer on cpu device\n with tf.variable_scope(\"embedding\"), tf.device(self._cpu_device):\n # the embedding matrix is allocated in the cpu to save valuable gpu memory for the model.\n embedding_map = tf.get_variable(\n name=\"embedding\", shape=[vocab_size, config.embedding_size], dtype=tf.float32)\n b_embed_in = tf.get_variable(name=\"b_embed_in\", shape=[config.embedding_size], dtype=tf.float32)\n embedding = tf.nn.embedding_lookup(embedding_map, self._input.input_data) + b_embed_in\n\n # non variational wrapper for the embedding\n if is_training and config.keep_prob_embed < 1:\n embedding_out = tf.nn.dropout(embedding,\n config.keep_prob_embed) # / config.keep_prob_embed\n else:\n embedding_out = embedding\n\n # split input to devices if needed\n with tf.name_scope(\"split_inputs\"):\n if self._gpu_num > 1:\n embedding_out = tf.split(embedding_out, self._gpu_num)\n targets = tf.split(inputs.targets, self._gpu_num)\n else:\n embedding_out = [embedding_out]\n targets = [inputs.targets]\n\n # construct the rest of the model on every gpu\n all_loss = [] # 2D array of scalar loss; [i,j] element stands for the loss of the j-th layer of the i-th gpu\n all_grads = [] # 2D array of grads; [i,j] element stands for the grad of the j-th layer of the i-th gpu\n\n with tf.variable_scope(\"gpus\"):\n for i in range(self._gpu_num):\n with tf.device(\"/gpu:%d\" % self._gpu_devices[i]), tf.name_scope(\"gpu-%d\" % i):\n loss, grads, cell, initial_state, final_state, cache_data = self.complete_model(embedding_out[i],\n embedding_map,\n config,\n is_training,\n inputs,\n targets[i])\n\n self._cache_data = cache_data\n self._cell.append(cell)\n self._initial_state.append(initial_state)\n self._final_state.append(final_state)\n all_loss.append(loss)\n all_grads.append(grads)\n\n # reuse variables for the next gpu\n tf.get_variable_scope().reuse_variables()\n\n # reduce per-gpu-loss to total loss\n with tf.name_scope(\"reduce_loss\"):\n self._loss = self.reduce_loss(all_loss)\n\n if config.dynamic_eval is not None:\n # average grads ; sync point\n with tf.name_scope(\"average_grads\"):\n averaged_grads = self.average_grads(all_grads)\n\n # get trainable vars\n tvars = tf.trainable_variables()\n\n self._dynamic_eval = DynamicEval(config, tvars, averaged_grads)\n\n self._train_op = self._dynamic_eval.update_op()", "def _create_embedding_variable(self, name, initial_value):\n if name not in self._tls._embed_variables:\n embed_var = tf.Variable(\n initial_value,\n name=name + str(threading.get_ident()),\n shape=(None, None),\n dtype=tf.float32,\n trainable=False,\n )\n self._tls._embed_variables[name] = embed_var\n else:\n embed_var = self._tls._embed_variables[name]\n embed_var.assign(initial_value)\n return embed_var", "def instantiate_weights(self):\n with tf.name_scope(\"decoder_init_state\"):\n self.W_initial_state = tf.get_variable(\"W_initial_state\", shape=[self.hidden_size, self.hidden_size*2], initializer=self.initializer)\n self.b_initial_state = tf.get_variable(\"b_initial_state\", shape=[self.hidden_size*2])\n with tf.name_scope(\"embedding_projection\"): # embedding matrix\n self.Embedding = tf.get_variable(\"Embedding\", shape=[self.vocab_size, self.embed_size],initializer=self.initializer) # [vocab_size,embed_size] tf.random_uniform([self.vocab_size, self.embed_size],-1.0,1.0)\n self.Embedding_label = tf.get_variable(\"Embedding_label\", shape=[self.num_classes, self.embed_size*2],dtype=tf.float32) #,initializer=self.initializer\n self.W_projection = tf.get_variable(\"W_projection\", shape=[self.hidden_size*2, self.num_classes],\n initializer=self.initializer) # [embed_size,label_size]\n self.b_projection = tf.get_variable(\"b_projection\", shape=[self.num_classes])\n\n # GRU parameters:update gate related\n with tf.name_scope(\"gru_weights_encoder\"):\n self.W_z = tf.get_variable(\"W_z\", shape=[self.embed_size, self.hidden_size], initializer=self.initializer)\n self.U_z = tf.get_variable(\"U_z\", shape=[self.embed_size, self.hidden_size], initializer=self.initializer)\n self.b_z = tf.get_variable(\"b_z\", shape=[self.hidden_size])\n # GRU parameters:reset gate related\n self.W_r = tf.get_variable(\"W_r\", shape=[self.embed_size, self.hidden_size], initializer=self.initializer)\n self.U_r = tf.get_variable(\"U_r\", shape=[self.embed_size, self.hidden_size], initializer=self.initializer)\n self.b_r = tf.get_variable(\"b_r\", shape=[self.hidden_size])\n\n self.W_h = tf.get_variable(\"W_h\", shape=[self.embed_size, self.hidden_size], initializer=self.initializer)\n self.U_h = tf.get_variable(\"U_h\", shape=[self.embed_size, self.hidden_size], initializer=self.initializer)\n self.b_h = tf.get_variable(\"b_h\", shape=[self.hidden_size])\n\n with tf.name_scope(\"gru_weights_decoder\"):\n self.W_z_decoder = tf.get_variable(\"W_z_decoder\", shape=[self.embed_size*2, self.hidden_size*2], initializer=self.initializer)\n self.U_z_decoder = tf.get_variable(\"U_z_decoder\", shape=[self.embed_size*2, self.hidden_size*2], initializer=self.initializer)\n self.C_z_decoder = tf.get_variable(\"C_z_decoder\", shape=[self.embed_size * 2, self.hidden_size * 2],initializer=self.initializer) #TODO\n self.b_z_decoder = tf.get_variable(\"b_z_decoder\", shape=[self.hidden_size*2])\n # GRU parameters:reset gate related\n self.W_r_decoder = tf.get_variable(\"W_r_decoder\", shape=[self.embed_size*2, self.hidden_size*2], initializer=self.initializer)\n self.U_r_decoder = tf.get_variable(\"U_r_decoder\", shape=[self.embed_size*2, self.hidden_size*2], initializer=self.initializer)\n self.C_r_decoder = tf.get_variable(\"C_r_decoder\", shape=[self.embed_size * 2, self.hidden_size * 2],initializer=self.initializer) #TODO\n self.b_r_decoder = tf.get_variable(\"b_r_decoder\", shape=[self.hidden_size*2])\n\n self.W_h_decoder = tf.get_variable(\"W_h_decoder\", shape=[self.embed_size*2, self.hidden_size*2], initializer=self.initializer)\n self.U_h_decoder = tf.get_variable(\"U_h_decoder\", shape=[self.embed_size*2, self.hidden_size*2], initializer=self.initializer) #TODO\n self.C_h_decoder = tf.get_variable(\"C_h_decoder\", shape=[self.embed_size * 2, self.hidden_size * 2],initializer=self.initializer)\n self.b_h_decoder = tf.get_variable(\"b_h_decoder\", shape=[self.hidden_size*2])\n\n with tf.name_scope(\"full_connected\"):\n self.W_fc=tf.get_variable(\"W_fc\",shape=[self.hidden_size*2,self.hidden_size])\n self.a_fc=tf.get_variable(\"a_fc\",shape=[self.hidden_size])" ]
[ "0.74340177", "0.7306449", "0.72952855", "0.70558393", "0.7044115", "0.68895066", "0.67812735", "0.6750932", "0.67492104", "0.67476356", "0.67427427", "0.6717155", "0.6711093", "0.6680487", "0.6676144", "0.6672202", "0.6647878", "0.6621589", "0.66202843", "0.65980107", "0.658886", "0.6586531", "0.6531334", "0.65286624", "0.65250295", "0.6519084", "0.65062785", "0.64712554", "0.6382853", "0.6364604", "0.63344926", "0.63317245", "0.6310544", "0.6304809", "0.6304148", "0.62498164", "0.6203061", "0.61877984", "0.61850864", "0.61698747", "0.61607033", "0.61514133", "0.61407125", "0.61136097", "0.6103015", "0.6082179", "0.6070234", "0.6060752", "0.6043806", "0.603122", "0.60290176", "0.6023309", "0.59869546", "0.59678787", "0.59638184", "0.5957849", "0.5954293", "0.5939391", "0.5937538", "0.5935299", "0.5929666", "0.5926617", "0.59260964", "0.5903822", "0.5903713", "0.59028614", "0.59006244", "0.5893193", "0.5883604", "0.5877171", "0.58761704", "0.58755654", "0.58658624", "0.584614", "0.584295", "0.5840003", "0.5838431", "0.5829407", "0.58258873", "0.58249336", "0.58105195", "0.5804602", "0.58002186", "0.57970023", "0.57943845", "0.5788356", "0.5788356", "0.57842904", "0.5780896", "0.57787", "0.5772783", "0.5768228", "0.5762246", "0.57596517", "0.575167", "0.5741646", "0.57399917", "0.5739313", "0.5724532", "0.57237846" ]
0.66311824
17
Retrieve the dictform of all of the transactions in a given bar or for the whole simulation.
def transactions(self, dt=None): if dt is None: # flatten the by-day transactions return [ txn for by_day in itervalues(self._processed_transactions) for txn in by_day ] return self._processed_transactions.get(dt, [])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transaction_data(self):\n return list(map(lambda transaction:transaction.to_json(), self.transaction_map.values()))", "def transaction_base() -> Dict[str, Any]:\n return {\n \"first_name\": \"Donald\",\n \"last_name\": \"Duck\",\n \"company\": \"Duck Co\",\n \"email\": \"[email protected]\",\n \"telephone\": \"991234567\",\n \"mobile\": \"+358991234567\",\n \"street\": \"1313 Webfoot Walk\",\n \"postal_code\": \"00000\",\n \"city\": \"Duckburg\",\n \"country\": \"US\",\n \"information\": \"Quack, damn you!\",\n \"items\": [],\n }", "def get_transactions_as_json(self):\n transactions = self.transaction_map.values() \n transactions_json = list(map( lambda t: t.to_json(), transactions )) \n return transactions_json", "def transactions(self, transactions: list):\n num_txs = len(transactions)\n transactions_size = num_txs * self._message_size['tx']\n return {\n 'id': 'transactions',\n 'transactions': transactions,\n 'size': kB_to_MB(transactions_size)\n }", "def transactions(self):\n url = f'{self._ynab.api_url}/budgets/{self.id}/transactions'\n response = self._ynab.session.get(url)\n if not response.ok:\n self._logger.error('Error retrieving transactions, response was : %s with status code : %s',\n response.text,\n response.status_code)\n return []\n return [Transaction(self._ynab, transaction)\n for transaction in response.json().get('data', {}).get('transactions', [])]", "def fetch_bank_transactions(self):\n return self.fetch('/bank_transactions')", "def transactions(self):\n url = f'{self._ynab.api_url}/budgets/{self.budget.id}/accounts/{self.id}/transactions'\n response = self._ynab.session.get(url)\n if not response.ok:\n self._logger.error('Error retrieving transactions, response was : %s with status code : %s',\n response.text,\n response.status_code)\n return []\n return [Transaction(self._ynab, transaction)\n for transaction in response.json().get('data', {}).get('transactions', [])]", "def get_transaction_data():\n data = parse_json()\n income_instances = create_transactions(data['incomes'])\n expense_instances = create_transactions(data['expenses'])\n for expense in expense_instances:\n expense.amount = -(expense.amount)\n transactions = income_instances + expense_instances\n return transactions", "def test_wallets_get_transaction_list(self):\n pass", "def get_transactions(self):\n # open a cursor object\n cur = self.get_cursor()\n\n # get transactions from database\n cur.execute(\"SELECT * FROM transactions\")\n transactions_data = cur.fetchall()\n\n # convert into a dict of values.\n transactions_list = []\n [transactions_list.append({'transaction_id': transaction[0],\n 'date': transaction[1],\n 'payee_id': transaction[2],\n 'description': transaction[3],\n 'amount': transaction[4]})\n for transaction in transactions_data]\n\n # close the cursor\n self.close_cursor()\n\n return transactions_list", "def unbalanced(self):\n # TODO: Find a way to make a sql query to return all unbalanced transactions\n return []", "def _buildTotalsDict(self, fromdt, todt):\r\n pairs = [(t, t.effectForPeriod(fromdt, todt)) for t in self.transactions]\r\n return dict(pairs)", "def transactions(self):\r\n return tx.AccountTransactions(self)", "def load_data():\n dataFile = open('transactions.json', 'r')\n data = json.load(dataFile)\n transactions = data['transactions']#TID\n items = data['items']#item sets\n return items, transactions", "def report(self):\n result = {}\n result_buy = 0\n result_sell = 0\n result_outcome = 0\n for pair in self.pairs:\n orders = self.get_orders_for(pair)\n buy, sell = self.get_buy_and_sell_costs(orders)\n outcome = sell - buy\n result_pair = {\"buy\": buy, \"sell\": sell, \"outcome\": outcome}\n result.update({pair: result_pair})\n result_buy += buy\n result_sell += sell\n result_outcome += outcome\n\n # Add global results\n all = {\"buy\": result_buy, \"sell\": result_sell, \"outcome\": result_outcome}\n result.update({\"all\": all})\n return result", "def transactions(self):\r\n return tx.Transactions(self)", "def transactions(self):\n return copy.deepcopy(self._transactions)", "def get_transactions():\n\n wallet = \"TTfoWGU2M939cgZm8CksPtz1ytJRM9GiN7\"\n\n url = \"https://api.trongrid.io/v1/accounts/{}/transactions\".format(wallet)\n\n response = requests.request(\"GET\", url)\n\n print(response.text)", "def new_get_buys_transaction_history(self, cb_account):\n date: datetime = now()\n if cb_account == \"wallet_id_btc\":\n return MockAPIObject(\n data=[{\n \"created_at\": str(date + timedelta(days=-1)),\n \"resource\": \"buy\",\n \"status\": \"completed\",\n \"amount\": {\n \"amount\": 10,\n \"currency\": \"BTC\"\n },\n \"total\": {\n \"amount\": 10,\n \"currency\": \"BTC\"\n },\n \"fees\": [{\n \"amount\": {\n \"amount\": 1,\n \"currency\": \"EUR\"\n }\n }]\n }, {\n \"created_at\": str(date + timedelta(days=1)),\n \"resource\": \"buy\",\n \"status\": \"completed\",\n \"amount\": {\n \"amount\": 5,\n \"currency\": \"BTC\"\n },\n \"total\": {\n \"amount\": 5,\n \"currency\": \"BTC\"\n },\n \"fees\": [{\n \"amount\": {\n \"amount\": 0.5,\n \"currency\": \"EUR\"\n }\n }]\n }])\n else:\n return MockAPIObject()", "def dict(self):\n return {\"data\": self.data.dict(), \"inventory\": self.inventory.dict()}", "def get_pending_transactions():\n\n return History.get_pending().get()", "def _amount_all(self):\n res = {}\n ut_obj = self.env['l10n.ut']\n for iwdl_brw in self.browse(self.ids):\n # Using a clousure to make this call shorter\n f_xc = ut_obj.sxc(\n iwdl_brw.invoice_id.company_id.currency_id.id,\n iwdl_brw.invoice_id.currency_id.id,\n iwdl_brw.islr_wh_doc_id.date_uid)\n\n res[iwdl_brw.id] = {\n 'amount': (iwdl_brw.base_amount * (iwdl_brw.retencion_islr / 100.0)) or 0.0,\n 'currency_amount': 0.0,\n 'currency_base_amount': 0.0,\n }\n for xml_brw in iwdl_brw.xml_ids:\n res[iwdl_brw.id]['amount'] = xml_brw.wh\n res[iwdl_brw.id]['currency_amount'] = f_xc(\n res[iwdl_brw.id]['amount'])\n res[iwdl_brw.id]['currency_base_amount'] = f_xc(\n iwdl_brw.base_amount)", "def buildMarketOrdersData(self):\n d = {}\n for orderID, myMarketOrderDict in self.frame.mode.game.marketOrders.iteritems():\n if myMarketOrderDict['system'] == self.mySystemDict['id']:\n amount = myMarketOrderDict['amount']\n min = myMarketOrderDict['min']\n max = myMarketOrderDict['max']\n value = myMarketOrderDict['value']\n if myMarketOrderDict['type'] == 'sell':\n s = 'SELL (%d %s) for MIN=%d' % (amount, value, min)\n elif myMarketOrderDict['type'] == 'buy-all':\n s = 'BUY ALL (%d %s) for MAX=%d' % (amount, value, max)\n elif myMarketOrderDict['type'] == 'buy-any':\n s = 'BUY ANY (%d %s) for MAX=%d' % (amount, value, max)\n d[orderID] = s\n return d", "def decoderawtransaction(self, hexstring):\n return dict(self.proxy.decoderawtransaction(hexstring))", "def __preprocess_transactions(self):\n\n p_bar = tqdm(range(14), desc=\"Preprocessing transactions\", leave=False)\n\n try:\n # 0. If optional fields not in the transactions add missing\n optional_fields = [\n \"Sector\",\n \"Industry\",\n \"Country\",\n \"Region\",\n \"Fees\",\n \"Premium\",\n \"ISIN\",\n ]\n if not set(optional_fields).issubset(set(self.__transactions.columns)):\n for field in optional_fields:\n if field not in self.__transactions.columns:\n self.__transactions[field] = np.nan\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 1. Convert Date to datetime\n self.__transactions[\"Date\"] = pd.to_datetime(self.__transactions[\"Date\"])\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 2. Sort transactions by date\n self.__transactions = self.__transactions.sort_values(by=\"Date\")\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 3. Capitalize Ticker and Type [of instrument...]\n self.__transactions[\"Ticker\"] = self.__transactions[\"Ticker\"].map(\n lambda x: x.upper() if isinstance(x, str) else x\n )\n self.__transactions[\"Type\"] = self.__transactions[\"Type\"].map(\n lambda x: x.upper() if isinstance(x, str) else x\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 4. Translate side: [\"deposit\", \"buy\"] -> 1 and [\"withdrawal\", \"sell\"] -> -1\n self.__transactions[\"Signal\"] = self.__transactions[\"Side\"].map(\n lambda x: 1\n if x.lower() in [\"deposit\", \"buy\"]\n else (-1 if x.lower() in [\"withdrawal\", \"sell\"] else 0)\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 5. Convert quantity to signed integer\n self.__transactions[\"Quantity\"] = (\n abs(self.__transactions[\"Quantity\"]) * self.__transactions[\"Signal\"]\n )\n\n # Adjust quantity and price for splits\n for ticker in self.__transactions[\"Ticker\"].unique():\n try:\n splits_df = get_splits(ticker)\n if not splits_df.empty:\n splits_df = splits_df.tz_localize(tz=None)\n for split_date in splits_df.index:\n self.__transactions[\"Quantity\"] = np.where(\n (self.__transactions[\"Ticker\"] == ticker)\n & (self.__transactions[\"Date\"] < split_date),\n self.__transactions[\"Quantity\"]\n * splits_df.loc[split_date].values,\n self.__transactions[\"Quantity\"],\n )\n self.__transactions[\"Price\"] = np.where(\n (self.__transactions[\"Ticker\"] == ticker)\n & (self.__transactions[\"Date\"] < split_date),\n self.__transactions[\"Price\"]\n / splits_df.loc[split_date].values,\n self.__transactions[\"Price\"],\n )\n\n except Exception:\n console.print(\"\\nCould not get splits adjusted\")\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 6. Determining the investment/divestment value\n self.__transactions[\"Investment\"] = (\n self.__transactions[\"Quantity\"] * self.__transactions[\"Price\"]\n + self.__transactions[\"Fees\"]\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 7. Reformat crypto tickers to yfinance format (e.g. BTC -> BTC-USD)\n crypto_trades = self.__transactions[self.__transactions.Type == \"CRYPTO\"]\n self.__transactions.loc[\n (self.__transactions.Type == \"CRYPTO\"), \"Ticker\"\n ] = [\n f\"{crypto}-{currency}\"\n for crypto, currency in zip(\n crypto_trades.Ticker, crypto_trades.Currency\n )\n ]\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 8. Reformat STOCK/ETF tickers to yfinance format if ISIN provided.\n\n # If isin not valid ticker is empty\n self.__transactions[\"yf_Ticker\"] = self.__transactions[\"ISIN\"].apply(\n lambda x: yf.utils.get_ticker_by_isin(x) if not pd.isna(x) else np.nan\n )\n\n empty_tickers = list(\n self.__transactions[\n (self.__transactions[\"yf_Ticker\"] == \"\")\n | (self.__transactions[\"yf_Ticker\"].isna())\n ][\"Ticker\"].unique()\n )\n\n # If ticker from isin is empty it is not valid in yfinance, so check if user provided ticker is supported\n removed_tickers = []\n for item in empty_tickers:\n with suppress_stdout():\n # Suppress yfinance failed download message if occurs\n valid_ticker = not (\n yf.download(\n item,\n start=datetime.datetime.now() + datetime.timedelta(days=-5),\n progress=False,\n ).empty\n )\n if valid_ticker:\n # Invalid ISIN but valid ticker\n self.__transactions.loc[\n self.__transactions[\"Ticker\"] == item, \"yf_Ticker\"\n ] = np.nan\n else:\n self.__transactions.loc[\n self.__transactions[\"Ticker\"] == item, \"yf_Ticker\"\n ] = \"\"\n removed_tickers.append(item)\n\n # Merge reformatted tickers into Ticker\n self.__transactions[\"Ticker\"] = self.__transactions[\"yf_Ticker\"].fillna(\n self.__transactions[\"Ticker\"]\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 9. Remove unsupported ISINs that came out empty\n self.__transactions.drop(\n self.__transactions[self.__transactions[\"Ticker\"] == \"\"].index,\n inplace=True,\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 10. Create tickers dictionary with structure {'Type': [Ticker]}\n unsupported_type = self.__transactions[\n (~self.__transactions[\"Type\"].isin([\"STOCK\", \"ETF\", \"CRYPTO\"]))\n ].index\n if unsupported_type.any():\n self.__transactions.drop(unsupported_type, inplace=True)\n console.print(\n \"[red]Unsupported transaction type detected and removed. Supported types: stock, etf or crypto.[/red]\"\n )\n\n for ticker_type in set(self.__transactions[\"Type\"]):\n self.tickers[ticker_type] = list(\n set(\n self.__transactions[\n self.__transactions[\"Type\"].isin([ticker_type])\n ][\"Ticker\"]\n )\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 11. Create list with tickers except cash\n self.tickers_list = list(set(self.__transactions[\"Ticker\"]))\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 12. Save transactions inception date\n self.inception_date = self.__transactions[\"Date\"].iloc[0]\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 13. Populate fields Sector, Industry and Country\n if (\n self.__transactions.loc[\n self.__transactions[\"Type\"] == \"STOCK\",\n optional_fields,\n ]\n .isnull()\n .values.any()\n ):\n # If any fields is empty for stocks (overwrites any info there)\n self.__load_company_data()\n\n p_bar.n += 1\n p_bar.refresh()\n\n # Warn user of removed ISINs\n if removed_tickers:\n p_bar.disable = True\n console.print(\n f\"\\n[red]The following tickers are not supported and were removed: {removed_tickers}.\"\n f\"\\nManually edit the 'Ticker' field with the proper Yahoo Finance suffix or provide a valid ISIN.\"\n f\"\\nSuffix info on 'Yahoo Finance market coverage':\"\n \" https://help.yahoo.com/kb/exchanges-data-providers-yahoo-finance-sln2310.html\"\n f\"\\nE.g. IWDA -> IWDA.AS[/red]\\n\"\n )\n except Exception:\n console.print(\"\\nCould not preprocess transactions.\")\n raise", "def get_aggregated_values(self):\n if not self._initialized:\n raise Exception(\"To readout you must first initialize, then\"\n \"process batches!\")\n else:\n ret_vals = [q.readout() for q in self.quantities]\n return dict(zip(self.quantity_names, ret_vals))", "def tx_prices(self) -> Dict[str, List[float]]:\n agent_pbk_to_name = self.game.configuration.agent_pbk_to_name\n results = {\n agent_name: [] for agent_name in agent_pbk_to_name.values()\n } # type: Dict[str, List[float]]\n\n temp_game = Game(self.game.configuration, self.game.initialization)\n\n # compute the partial scores for every agent after every transaction\n # (remember that indexes of the transaction start from one, because index 0 is reserved for the initial scores)\n for idx, tx in enumerate(self.game.transactions):\n temp_game.settle_transaction(tx)\n results[agent_pbk_to_name[tx.seller_pbk]].append(tx.amount)\n\n return results", "def wem_market_value_all():\n __query = \"\"\"\n select\n date_trunc('month', wfs.trading_interval) AS trading_day,\n sum(wfs.eoi_quantity * wbs.price) as energy_interval,\n wf.fueltech_id\n from wem_facility_scada wfs\n left join wem_facility wf on wfs.facility_id = wf.code\n join wem_balancing_summary wbs on wfs.trading_interval = wbs.trading_interval\n where\n wf.fueltech_id is not null\n group by 1, wf.fueltech_id\n order by 1 desc, 2 asc\n \"\"\"\n\n query = __query.format()\n\n json_envelope = {}\n\n with engine.connect() as c:\n rows = c.execute(query)\n\n current_tech = None\n\n for row in rows:\n\n current_tech = row[2]\n\n if current_tech not in json_envelope.keys():\n json_envelope[current_tech] = {\n \"id\": f\"wem.fuel_tech.{current_tech}.market_value\",\n \"fuel_tech\": current_tech,\n \"region\": \"wa\",\n \"type\": \"market_value\",\n \"units\": \"AUD\",\n \"history\": {\n \"interval\": \"1M\",\n \"start\": None,\n \"last\": None,\n \"data\": [],\n },\n }\n\n if (\n json_envelope[current_tech][\"history\"][\"start\"] == None\n or row[0] < json_envelope[current_tech][\"history\"][\"start\"]\n ):\n json_envelope[current_tech][\"history\"][\"start\"] = row[0]\n\n if (\n json_envelope[current_tech][\"history\"][\"last\"] == None\n or row[0] > json_envelope[current_tech][\"history\"][\"last\"]\n ):\n json_envelope[current_tech][\"history\"][\"last\"] = row[0]\n\n json_envelope[current_tech][\"history\"][\"data\"].append(row[1])\n\n return [json_envelope[i] for i in json_envelope.keys()]", "def get_goal(self):\n request_name = \"list_inventory_orders\"\n\n orders = self.make_request(request_name)\n order_dict = dict()\n for order in orders:\n order_name = order[\"@id\"].encode('utf-8')\n item_dict = dict()\n for item in order[\"items\"]:\n item_id = item[\"inventory-item-id\"].encode('utf-8')\n item_quantity = item[\"quantity\"]\n item_dict[item_id] = item_quantity\n order_dict[order_name] = item_dict\n return order_dict", "def _get_dicts(self, unit_set):\n name_dict = {}\n unit_dict = {}\n for unit in unit_set:\n name_dict[unit.name] = unit.coef\n unit_dict[unit.unit] = unit\n\n return name_dict, unit_dict", "def get_transactions(filters, as_dict=1):\n\tfilter_by_voucher = 'AND gl.voucher_type = %(voucher_type)s' if filters.get('voucher_type') else ''\n\tgl_entries = frappe.db.sql(\"\"\"\n\t\tSELECT\n\n\t\t\t/* either debit or credit amount; always positive */\n\t\t\tcase gl.debit when 0 then gl.credit else gl.debit end as 'Umsatz (ohne Soll/Haben-Kz)',\n\n\t\t\t/* 'H' when credit, 'S' when debit */\n\t\t\tcase gl.debit when 0 then 'H' else 'S' end as 'Soll/Haben-Kennzeichen',\n\n\t\t\t/* account number or, if empty, party account number */\n\t\t\tacc.account_number as 'Konto',\n\n\t\t\t/* against number or, if empty, party against number */\n\t\t\t%(temporary_against_account_number)s as 'Gegenkonto (ohne BU-Schlüssel)',\n\n\t\t\tgl.posting_date as 'Belegdatum',\n\t\t\tgl.voucher_no as 'Belegfeld 1',\n\t\t\tLEFT(gl.remarks, 60) as 'Buchungstext',\n\t\t\tgl.voucher_type as 'Beleginfo - Art 1',\n\t\t\tgl.voucher_no as 'Beleginfo - Inhalt 1',\n\t\t\tgl.against_voucher_type as 'Beleginfo - Art 2',\n\t\t\tgl.against_voucher as 'Beleginfo - Inhalt 2',\n\t\t\tgl.party_type as 'Beleginfo - Art 3',\n\t\t\tgl.party as 'Beleginfo - Inhalt 3',\n\t\t\tcase gl.party_type when 'Customer' then 'Debitorennummer' when 'Supplier' then 'Kreditorennummer' else NULL end as 'Beleginfo - Art 4',\n\t\t\tpar.debtor_creditor_number as 'Beleginfo - Inhalt 4'\n\n\t\tFROM `tabGL Entry` gl\n\n\t\t\t/* Kontonummer */\n\t\t\tleft join `tabAccount` acc \n\t\t\ton gl.account = acc.name\n\n\t\t\tleft join `tabCustomer` cus\n\t\t\ton gl.party_type = 'Customer'\n\t\t\tand gl.party = cus.name\n\n\t\t\tleft join `tabSupplier` sup\n\t\t\ton gl.party_type = 'Supplier'\n\t\t\tand gl.party = sup.name\n\n\t\t\tleft join `tabParty Account` par\n\t\t\ton par.parent = gl.party\n\t\t\tand par.parenttype = gl.party_type\n\t\t\tand par.company = %(company)s\n\n\t\tWHERE gl.company = %(company)s \n\t\tAND DATE(gl.posting_date) >= %(from_date)s\n\t\tAND DATE(gl.posting_date) <= %(to_date)s\n\t\t{}\n\t\tORDER BY 'Belegdatum', gl.voucher_no\"\"\".format(filter_by_voucher), filters, as_dict=as_dict)\n\n\treturn gl_entries", "def transactions(self):\n return self._call_account_method(\n 'transactions'\n )", "def tx_counts(self) -> Dict[str, Dict[str, int]]:\n agent_pbk_to_name = self.game.configuration.agent_pbk_to_name\n result = {agent_name: 0 for agent_name in agent_pbk_to_name.values()}\n results = {\"seller\": result.copy(), \"buyer\": result.copy()}\n\n temp_game = Game(self.game.configuration, self.game.initialization)\n\n # compute the partial scores for every agent after every transaction\n # (remember that indexes of the transaction start from one, because index 0 is reserved for the initial scores)\n for idx, tx in enumerate(self.game.transactions):\n temp_game.settle_transaction(tx)\n results[\"seller\"][agent_pbk_to_name[tx.seller_pbk]] += 1\n results[\"buyer\"][agent_pbk_to_name[tx.buyer_pbk]] += 1\n\n return results", "def query_symbol_bars(self, symbol: str):\n return self._call_txtrader_api('query_symbol_bars', {'symbol': symbol})", "def compute_helper_mempool_dictionaries():\n txn_density_dict = {}\n txn_parents_dict = {}\n txn_size_dict = {}\n mempool_data = parse_mempool_csv()\n for elem in mempool_data:\n size = elem.weight/MAXIMUM_BLOCK_WEIGHT # weight mapped to (0,1)\n txn_size_dict[elem.txid] = size \n txn_density_dict[elem.txid] = elem.fee/size\n if elem.parents != '':\n txn_parents_dict[elem.txid] = elem.parents.strip().split(';')\n return txn_density_dict,txn_parents_dict,txn_size_dict", "def get(self, transaction_ids):\n try:\n transaction_ids = list(set(list(transaction_ids)))\n request = {\"transaction_ids\": transaction_ids}\n response = {}\n # Validate User Input\n validations_result = validate_transaction_ids(transaction_ids)\n if validations_result is not None and len(validations_result) > 0:\n response = {\"ResponseCode\": ResponseCodes.InvalidRequestParameter.value,\n \"ResponseDesc\": ResponseCodes.InvalidRequestParameter.name,\n \"ValidationErrors\": validations_result}\n else:\n transaction_outputs_dict = {}\n for transaction_id in sorted(transaction_ids):\n transaction_outputs = db_session.query(TransactionOutput).filter(\n TransactionOutput.transaction_id == transaction_id).order_by(\n TransactionOutput.id.asc()).all()\n\n trans_output_as_list = []\n total_num_of_transaction_outputs = 0\n for transaction_output in transaction_outputs:\n output_address_response = json.loads(\n requests.get('http://localhost:5000/bitcoin/transactions/outputs/addresses',\n {'transaction_id': transaction_id,\n 'transaction_output_id': transaction_output.id}).text)\n if output_address_response[\"ResponseCode\"] == ResponseCodes.Success.value:\n trans_output_as_list.append(serialize_transaction_output(transaction_output,\n output_address_response[\n \"NumberOfOutputAddresses\"],\n output_address_response[\n \"OutputAddresses\"]))\n total_num_of_transaction_outputs = total_num_of_transaction_outputs + 1\n else:\n response = {\"ResponseCode\": output_address_response[\"ResponseCode\"],\n \"ResponseDesc\": output_address_response[\"ResponseDesc\"],\n \"ErrorMessage\": \"Internal Error in Transaction Output Address Service : \"\n + output_address_response[\"ErrorMessage\"]\n }\n break\n transaction_outputs_dict[transaction_id] = {\"NumberOfOutputs\": total_num_of_transaction_outputs,\n \"TransactionOutputs\": trans_output_as_list}\n\n if total_num_of_transaction_outputs > 0:\n response = {\"ResponseCode\": ResponseCodes.Success.value,\n \"ResponseDesc\": ResponseCodes.Success.name,\n \"TransactionOutputData\": transaction_outputs_dict\n }\n else:\n response = {\"ResponseCode\": ResponseCodes.NoDataFound.value,\n \"ResponseDesc\": ResponseCodes.NoDataFound.name,\n \"ErrorMessage\": ResponseDescriptions.NoDataFound.value}\n except Exception as ex:\n response = {\"ResponseCode\": ResponseCodes.InternalError.value,\n \"ResponseDesc\": ResponseCodes.InternalError.name,\n \"ErrorMessage\": str(ex)}\n finally:\n return response", "def produce(self, request, meta, raven_variables, dispatch, t, level=None):\n #balance = defaultdict(float)\n interaction = self.get_interaction()\n balance, meta = interaction.produce(request, meta, raven_variables, dispatch, t, level)\n #for resource, quantity in int_balance.items():\n # balance[resource] += quantity\n return balance, meta", "def raw_get_transaction(cls, txid):\n r = requests.get(cls.MAIN_TX_API.format(txid), timeout=DEFAULT_TIMEOUT)\n r.raise_for_status() # pragma: no cover\n return r.json()", "def _balances(self) -> Dict[str, int]:\n\n return self.client.get(self._resources(\"balance\"))", "def tx_transaction_mirs(self, txs_hash: str, pandas: bool=False) -> dict:\n \n tx_transaction_mirs = self.network + bf_tx_url + txs_hash + bf_tx_transaction_mirs_url\n\n response = query_blockfrost(tx_transaction_mirs, self.api_key, self.proxies)\n \n return pd.DataFrame.from_dict(response) if pandas else response", "def get_transactions(self):\n\n df = self.__transactions[\n [\n \"Date\",\n \"Type\",\n \"Ticker\",\n \"Side\",\n \"Price\",\n \"Quantity\",\n \"Fees\",\n \"Investment\",\n \"Currency\",\n \"Sector\",\n \"Industry\",\n \"Country\",\n \"Region\",\n ]\n ]\n df = df.replace(np.nan, \"-\")\n df[\"Date\"] = df[\"Date\"].dt.strftime(\"%Y-%m-%d\")\n df.sort_values(by=\"Date\", ascending=False, inplace=True)\n return df", "def jsonrpc_gettx(self, txid):\n txnw = self.node.storage.db.get(hex2b(txid))\n if txnw is None:\n return {}\n txnw = TxnWrapper.unserialize(SerializationBuffer(txnw))\n info = hex_bytes_in_dict(\n txnw.txn.to_dict())\n\n # Add blockheight\n info['blockheight'] = txnw.timestamp / TIME_MULTIPLIER\n return info", "def stats(self):\n return {attr: getattr(self, attr) for attr in ['cash', 'rawg_quantity', 'rawg_demand', 'rawg_price', 'rig_quantity', 'rig_supply', 'rig_price']}", "def get_df_transactions():\n\n _, res = DBX.files_download(c.io.FILE_TRANSACTIONS)\n return pd.read_excel(io.BytesIO(res.content), index_col=0)", "def __init__(self, miser, fromdt, todt, numBars = 100):\r\n def keysToString(indict):\r\n \"\"\"Return a new dict that has converted `indict`'s keys from\r\n Transaction to string.\"\"\"\r\n newD = {}\r\n for k, v in indict.iteritems():\r\n newD[k.name] = v\r\n return newD\r\n\r\n self.income = dictToSortedList(keysToString(miser.income(fromdt, todt)))\r\n self.expenses = dictToSortedList(keysToString(miser.expenses(fromdt, todt)))\r\n self.numBars = numBars\r\n \r\n sumStr = \"\\nProfile of expenses:\"\r\n sumStr += self.expensesBar\r\n\r\n print sumStr", "def get_balance(self):\n\n return {\n 'saturacion': (self.config['saturacion'] + 100) * 100.0 / 200.0,\n 'contraste': (self.config['contraste'] + 100) * 100.0 / 200.0,\n 'brillo': (self.config['brillo'] + 100) * 100.0 / 200.0,\n 'hue': (self.config['hue'] + 100) * 100.0 / 200.0,\n 'gamma': (self.config['gamma'] + 100) * 100.0 / 200.0\n }", "def _amount_all(self, cr, uid, ids,field_name, arg, context={}):\n res={}\n for record in self.browse(cr, uid, ids, context=context):\n res[record.id] = { 'amount_untaxed': 0.0, 'amount_tax': 0.0, 'amount_total': 0.0}\n amount_untaxed = 0.0\n amount_tax = 0.0\n amount_total = 0.0\n\t if not record.allowances_lines_after and record.allowances_lines_before:\n \tfor line in record.allowances_lines_before:\n \tamount_untaxed += line.amount_untaxed\n \tamount_tax += line.amount_tax\n \tamount_total += line.amount_total\n \tres[record.id]['amount_untaxed'] = amount_untaxed \n \tres[record.id]['amount_tax'] = amount_tax \n \tres[record.id]['amount_total'] = amount_total \n\n\t elif record.allowances_lines_after and record.allowances_lines_before :\n \tfor line in record.allowances_lines_after:\n \tamount_untaxed += line.amount_untaxed\n \tamount_tax += line.amount_tax\n \tamount_total += line.amount_total\n \tres[record.id]['amount_untaxed'] = amount_untaxed \n \tres[record.id]['amount_tax'] = amount_tax \n \tres[record.id]['amount_total'] = amount_total \n return res", "def get_biases(self):\n return list(self.b.values())", "def get_biases(self):\n return list(self.b.values())", "def get_biases(self):\n return list(self.b.values())", "def get_biases(self):\n return list(self.b.values())", "def determineUnitHistory():\n\tunitTracker = Unitiser()\n\t\n\timport transactions\n\ttrades = transactions.allTransactions()\n\t\n\thistory = dict()\n\t\n\tfor date in timeline():\n\t\t#print(\"\\ntimelime:\", date.strftime('%Y-%m-%d %H:%M:%S'))\n\t\timport valuator\n\t\tvalue = valuator.getPortfolioValueAt(date)\n\t\tif date in trades:\n\t\t\tprior = getPortfolioBefore(date)\n\t\t\tprior_value = valuator.getPortfolioValueAt(date, portfolio = prior)\n\n\t\t\tinvested = Decimal('0.0')\n\t\t\tfor equity in trades[date]:\n\t\t\t\ttrade = trades[date][equity]\n\t\t\t\t#print(equity, trade)\n\t\t\t\tif trade['action'] == 'buy':\n\t\t\t\t\tinvested = invested + Decimal(trade['value'])\n\t\t\t\telif trade['action'] == 'sell':\n\t\t\t\t\tinvested = invested - Decimal(trade['value'])\n\n\t\t\tsince = getPortfolioAt(date)\n\t\t\tsince_value = valuator.getPortfolioValueAt(date, portfolio = since)\n\n\t\t\t#print(\"change amount is\", invested)\n\t\t\tif invested > 0:\n\t\t\t\tunitTracker.invest(invested, prior_value)\n\t\t\telif invested < 0:\n\t\t\t\tunitTracker.divest(abs(invested), prior_value)\n\n\t\thistory[date] = {\n\t\t\t 'date' : date,\n\t\t\t 'value' : value.quantize(TWOPLACES),\n\t\t\t 'units' : unitTracker.numberOfUnits().quantize(TWOPLACES),\n\t\t\t 'price' : unitTracker.pricePerUnit(value).quantize(TWOPLACES),\n\t\t\t 'invested' : unitTracker.invested\n\t\t\t }\n\t\n\treturn history", "def get_all_stat(self):\n all_stat=dict()\n for stat_type in self.log_book.keys():\n stat = self.get_stat(stat_type)\n all_stat[stat_type] = stat\n return all_stat", "def T1s(self) -> Dict[int, Optional[float]]:\n return {qs.id: qs.T1 for qs in self.qubits_specs}", "def get_string_of_transactions(self):\n s = \"\"\n for transaction in self.transactions:\n s += transaction.to_string()\n return s", "def get_values(self, currency):\n curr_dict = {\n \"brazilian_real\": None,\n \"american_dollar\": None,\n \"european_euro\": None,\n \"british_pound\": None,\n \"japanese_yen\": None,\n \"swiss_frank\": None,\n \"canadian_dollar\": None,\n \"australian_dollar\": None\n }\n index = 0\n for key in curr_dict:\n if key != currency:\n # list comprehension to get values from data\n curr_dict[key] = [\n element for record in select_records(currency, 1) for element in record\n if element == record[index] and isinstance(element, float)\n ]\n index += 1\n else:\n continue\n return curr_dict", "def submit(\n self,\n dispatcher: CollectingDispatcher,\n tracker: Tracker,\n domain: Dict[Text, Any],\n ) -> List[Dict]:\n\n search_type = tracker.get_slot(\"search_type\")\n transaction_history = tracker.get_slot(\"transaction_history\")\n transactions_subset = transaction_history.get(search_type, {})\n vendor = tracker.get_slot(\"vendor_name\")\n if vendor:\n transactions = transactions_subset.get(vendor.lower())\n vendor = f\" with {vendor}\"\n else:\n transactions = [\n v for k in list(transactions_subset.values()) for v in k\n ]\n vendor = \"\"\n\n start_time = parser.isoparse(tracker.get_slot(\"start_time\"))\n end_time = parser.isoparse(tracker.get_slot(\"end_time\"))\n\n for i in range(len(transactions) - 1, -1, -1):\n transaction = transactions[i]\n transaction_date = parser.isoparse(transaction.get(\"date\"))\n\n if transaction_date < start_time or transaction_date > end_time:\n transactions.pop(i)\n\n numtransacts = len(transactions)\n total = sum([t.get(\"amount\") for t in transactions])\n slotvars = {\n \"total\": f\"{total:.2f}\",\n \"numtransacts\": numtransacts,\n \"start_time_formatted\": tracker.get_slot(\"start_time_formatted\"),\n \"end_time_formatted\": tracker.get_slot(\"end_time_formatted\"),\n \"vendor_name\": vendor,\n }\n\n dispatcher.utter_message(\n template=f\"utter_searching_{search_type}_transactions\", **slotvars\n )\n dispatcher.utter_message(\n template=f\"utter_found_{search_type}_transactions\", **slotvars\n )\n\n return [\n SlotSet(\"time\", None),\n SlotSet(\"time_formatted\", None),\n SlotSet(\"start_time\", None),\n SlotSet(\"end_time\", None),\n SlotSet(\"start_time_formatted\", None),\n SlotSet(\"end_time_formatted\", None),\n SlotSet(\"grain\", None),\n SlotSet(\"search_type\", None),\n SlotSet(\"vendor_name\", None),\n ]", "def get_dict(self):\n return", "def _table_tree(self, real_account):\n return [{\n 'account': ra.account,\n 'balances_children':\n serialize_inventory(realization.compute_balance(ra),\n at_cost=True),\n 'balances': serialize_inventory(ra.balance, at_cost=True),\n 'is_leaf': len(ra) == 0 or bool(ra.txn_postings),\n 'postings_count': len(ra.txn_postings)\n } for ra in realization.iter_children(real_account)]", "def histogramintegrals(self):\n return {}", "def showTransactions(self):\n self.scanTransactions()\n txns = []\n\n # Summarize the stats\n for x in range(len(self._trans)):\n stats = self._trans[x]\n trans_time = 0\n remote_calls = 0\n for name, stat in stats:\n trans_time += stat.total_tt\n remote_calls += 1\n txns.append((x, trans_time, remote_calls))\n\n results = [\"TX#\\tTime\\tCalls\",\n \"=\" * 22]\n\n for item in txns:\n results.append(\"%3d\\t%4f\\t%5d\" % item)\n \n return \"\\n\".join(results)", "def get_all_orders():", "def summary(self,attr='raw'):\n g = {}\n g['gid'] = map(lambda x : x.gid, self.taxonomies)\n g['sp'] = map(lambda x : x.presences.species , self.taxonomies)\n \n g['gns'] = map(lambda x : x.presences.genera , self.taxonomies) \n g['fam'] = map(lambda x : x.presences.families , self.taxonomies)\n g['ord'] = map(lambda x : x.presences.orders , self.taxonomies)\n g['cls'] = map(lambda x : x.presences.classes , self.taxonomies)\n g['phy'] = map(lambda x : x.presences.phyla , self.taxonomies)\n g['kng'] = map(lambda x : x.presences.kingdoms , self.taxonomies)\n #g['all'] = map(lambda x : (x.gid,int(x.presences.species),int(x.genera),int(x.families),int(x.orders),int(x.classes),int(x.phyla),int(x.kingdoms)),self.taxonomies)\n keys = settings.TAXONOMIC_TREE_KEYS\n if attr == 'int':\n for key in keys:\n g[key] = map(lambda p : int(p) ,g[key])\n elif attr == 'str':\n for key in keys:\n g[key] = map(lambda p : str(p) ,g[key]) \n elif attr == 'list':\n for key in keys:\n g[key] = map(lambda p : p.list ,g[key]) \n elif attr == 'mapping':\n for key in keys:\n g[key] = map(lambda p : p.map ,g[key]) \n elif attr == 'raw':\n return g\n else:\n logger.error(\"Wrong attribute selection\")\n return None\n \n return g", "def test_get_transaction_types(self):\n pass", "def dump(self):\n result = super(BattleshipTransaction, self).dump()\n\n result['Name'] = self._name\n result['Action'] = self._action\n result['Ships'] = self._ships\n if self._action == 'JOIN':\n result['Board'] = self._board\n if self._action == 'FIRE':\n result['Row'] = self._row\n result['Column'] = self._column\n if self._reveal_space is not None:\n result['RevealSpace'] = self._reveal_space\n if self._reveal_nonce is not None:\n result['RevealNonce'] = self._reveal_nonce\n\n return result", "def _get_amount_total_base(self):\n res = {}\n for txt in self:\n res[txt.id] = 0.0\n for txt_line in txt.txt_ids:\n if txt_line.invoice_id.type in ['out_refund', 'in_refund']:\n res[txt.id] -= txt_line.untaxed\n else:\n res[txt.id] += txt_line.untaxed\n return res", "async def b_chain() -> dict:\n authority_chain = await chain.consensus()\n return {\"chain\": authority_chain[\"chain\"]}", "def chart_of_accounts(qbo_session, attrs = \"strict\"):\n\n #query all the accounts\n accounts = qbo_session.get_objects(\"Account\")\n\n #by strict, I mean the order the docs say to use when udpating:\n #https://developer.intuit.com/docs/0025_quickbooksapi/\n #0050_data_services/030_entity_services_reference/account\n\n if attrs == \"strict\":\n attrs = [\n \"Id\", \"SyncToken\", \"MetaData\", \"Name\", \"SubAccount\",\n \"ParentRef\", \"Description\", \"FullyQualifiedName\", \"Active\",\n \"Classification\", \"AccountType\", \"AccountSubType\", \"AcctNum\",\n \"OpeningBalance\", \"OpeningBalanceDate\", \"CurrentBalance\",\n \"CurentBalanceWithSubAccounts\", \"CurrencyRef\"\n ]\n\n else:\n #TODO: validate the attrs against the 'strict' list above\n pass\n\n #As a first cut, we'll sort them by AccountType in trial balance order\n\n tb_type_order = [\n \"Bank\", \"Accounts Receivable\", \"Other Current Asset\",\n \"Fixed Asset\", \"Other Asset\",\n \"Accounts Payable\", \"Credit Card\",\n \"Other Current Liability\", \"Other Liability\",\n \"Equity\",\n \"Income\", \"Other Income\",\n \"Expense\", \"Other Expense\", \"Cost of Goods Sold\"\n ]\n\n accounts_by_type = {} #{Accounts_Payable:[row_list]\n\n for a_id in accounts:\n a = accounts[a_id]\n at = a[\"AccountType\"]\n if at not in tb_type_order:\n raise Exception(\"Unexpected AccountType: %s\" % at)\n\n if at not in accounts_by_type:\n accounts_by_type[at]=[]\n\n this_row = []\n for field in attrs:\n if field not in a:\n this_row.append(\"\")\n else:\n value = a[field]\n if isinstance(value,(list,tuple,dict)):\n this_row.append(\"<complex>\")\n else:\n this_row.append(a[field])\n\n accounts_by_type[at].append(this_row)\n\n rows = [attrs] #headers are the first row\n for at in tb_type_order:\n if at in accounts_by_type:\n for row in accounts_by_type[at]:\n rows.append(row)\n\n return rows", "def get_tarefa_mais_barata(tarefas):\n\tdict_custo_total = {}\n\ttarefa_barata = {}\n\tfor tarefa in tarefas:\n \t\ttarefa_id = tarefa['identificador']\n\t\tif not (dict_custo_total.has_key(tarefa_id)):\n\t\t\tdict_custo_total[tarefa_id] = {'tarefa': tarefa, 'custo': 0}\n\t\t\tfor outra_tarefa in tarefas:\n\t\t\t\tif (outra_tarefa['identificador'] != tarefa_id):\n\t\t\t\t\tdict_custo_total[tarefa_id]['custo'] += calcula_custo(tarefa['tempo_de_execucao'], outra_tarefa['custo_por_hora'])\n\n\t\tif (tarefa_barata == {} or (tarefa_barata['custo'] > dict_custo_total[tarefa_id]['custo'])):\n\t\t\ttarefa_barata = dict_custo_total[tarefa_id]\n\n\treturn tarefa_barata['tarefa']", "def fetch_all_tx(self):\n transactions = []\n for block in self.chain:\n transactions.append(block.data)\n return transactions", "async def test_all_transactions(self):\n response = await self.collect(get_request_json_return_value=self.GATLING_JSON)\n self.assert_measurement(response, value=\"2\", entities=self.expected_entities)", "def to_json(self) -> Dict[str, Any]:\n\n return {\n **self.index.to_json(),\n \"timelock\": self.timelock,\n \"amount\": self.amount,\n \"spend_key\": self.spend_key.hex(),\n \"state\": self.state.value,\n }", "def gettransaction(self, txid):\n return TransactionInfo(**self.proxy.gettransaction(txid))", "def get_transaction_prices(self):\n cleaned_data = self.cleaned_data()\n supplier_cleaned_data = cleaned_data.get('cleaned_supplier_data')\n transaction_cleaned_data = cleaned_data.get('cleaned_transaction_data')\n merged_data = self.merge_supplier_transaction(supplier_cleaned_data, transaction_cleaned_data)\n calculated_data = self.calculate_prices(merged_data)\n self.export_calculated_prices(calculated_data)\n return calculated_data", "def get_tx_info(tx):\n\n input_addresses = []\n output_addresses = []\n payments = []\n\n try:\n response = json.loads(make_request('http://tbtc.blockr.io/api/v1/tx/info/' + tx))\n except Exception as e:\n status = json.loads(e.message).get('status')\n if status in ['error', 'fail']:\n return {'from': None, 'to': None, 'amount': None, 'confirmations': 0}\n\n vins = response.get('data').get('vins')\n vouts = response.get('data').get('vouts')\n confirmations = response.get('data').get('confirmations')\n\n for i in range(len(vins)):\n if vins[i].get('address') not in input_addresses:\n input_addresses.append(vins[i].get('address'))\n for i in range(len(vouts)):\n output_addresses.append(vouts[i].get('address'))\n payments.append(vouts[i].get('amount'))\n\n return {'from': input_addresses, 'to': output_addresses, 'amount': payments, 'confirmations': confirmations}", "def test_execute_dump_all_transaction(self):\n\n instruction = Instruction(\"dump()\")\n\n with std_out() as (out, err):\n self.transaction_manager.execute(instruction)\n\n output = out.getvalue().strip()\n\n self.assertEqual(output, \"{1: {'x14': { x14: 140 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x12': { x12: 120 }, 'x6': { x6: 60 }, 'x20': { x20: 200 }, 'x4': { x4: 40 }}, 2: {'x14': { x14: 140 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x11': { x11: 110 }, 'x12': { x12: 120 }, 'x1': { x1: 10 }, 'x6': { x6: 60 }, 'x20': { x20: 200 }, 'x4': { x4: 40 }}, 3: {'x14': { x14: 140 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x12': { x12: 120 }, 'x6': { x6: 60 }, 'x20': { x20: 200 }, 'x4': { x4: 40 }}, 4: {'x14': { x14: 140 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x3': { x3: 30 }, 'x12': { x12: 120 }, 'x13': { x13: 130 }, 'x6': { x6: 60 }, 'x20': { x20: 200 }, 'x4': { x4: 40 }}, 5: {'x14': { x14: 140 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x12': { x12: 120 }, 'x6': { x6: 60 }, 'x20': { x20: 200 }, 'x4': { x4: 40 }}, 6: {'x14': { x14: 140 }, 'x20': { x20: 200 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x12': { x12: 120 }, 'x6': { x6: 60 }, 'x15': { x15: 150 }, 'x4': { x4: 40 }, 'x5': { x5: 50 }}, 7: {'x14': { x14: 140 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x12': { x12: 120 }, 'x6': { x6: 60 }, 'x20': { x20: 200 }, 'x4': { x4: 40 }}, 8: {'x14': { x14: 140 }, 'x20': { x20: 200 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x12': { x12: 120 }, 'x6': { x6: 60 }, 'x7': { x7: 70 }, 'x4': { x4: 40 }, 'x17': { x17: 170 }}, 9: {'x14': { x14: 140 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x12': { x12: 120 }, 'x6': { x6: 60 }, 'x20': { x20: 200 }, 'x4': { x4: 40 }}, 10: {'x19': { x19: 190 }, 'x14': { x14: 140 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x9': { x9: 90 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x12': { x12: 120 }, 'x6': { x6: 60 }, 'x20': { x20: 200 }, 'x4': { x4: 40 }}}\")", "def get_dict(self):\n return {\n \"type\": self.item_type,\n \"size\": self.size,\n \"toppings\": self.toppings,\n \"price\": self.get_price()\n }", "def to_dict(self):\n d = {'Name': self._name,\n 'Account Type': self.account_type}\n if self._assets:\n d['Assets'] = [to_dict(asset) for asset in self._assets.values()]\n if self._cash != 0:\n d['Available Cash'] = self._cash\n return d", "def _get_new_bar(self, symbol):\n for b in self.symbol_data[symbol]:\n yield b", "def _construct_all_holdings(self):\n d = dict((s, 0.0) for s in self.symbol_list)\n d['datetime'] = self.backtest_date\n d['cash'] = self.initial_capital\n d['commission'] = 0.0\n d['total'] = self.initial_capital\n d['buy_times'] = 0\n d['sell_times'] = 0\n d['total_times'] = 0\n d['hold'] = 0\n return [d]", "async def get_transactions(self, guild_id, user):\n doc = await self.db[str(guild_id)].find_one({'id': user.id})\n if doc is None or len(doc['transactions']) == 0:\n return -1\n else:\n return doc['transactions']", "def getData(self):\n return dict(self._dump_data)", "def getDataDict(self):\n #code begins here \n return self.__dflst,self.__dfwells", "def _retrieve_transaction_table_input(self, execution_arn: str) -> Dict:\n response = self.client.get_execution_history(executionArn=execution_arn,maxResults=1000)\n events = response[\"events\"]\n record_purchase_entered_events = [\n event\n for event in events\n if event[\"type\"] == \"TaskStateEntered\" and event[\"stateEnteredEventDetails\"][\"name\"] == \"InsertPurchase\"\n ]\n\n record_refund_entered_events = [\n event\n for event in events\n if event[\"type\"] == \"TaskStateEntered\" and event[\"stateEnteredEventDetails\"][\"name\"] == \"InsertRefund\"\n ]\n\n record_error_entered_events = [\n event\n for event in events\n if event[\"type\"] == \"TaskStateEntered\" and event[\"stateEnteredEventDetails\"][\"name\"] == \"InsertError\"\n ]\n \n self.assertTrue(\n record_purchase_entered_events,\n \"Cannot find InsertPurchase TaskStateEntered event\",\n )\n self.assertTrue(\n record_refund_entered_events,\n \"Cannot find InsertPurchase TaskStateEntered event\",\n )\n self.assertTrue(\n record_error_entered_events,\n \"Cannot find InsertPurchase TaskStateEntered event\",\n )\n purchase_table_input=[] #PurchaseTable inputs\n refund_table_input=[] # RefundTable inputs\n error_table_input=[] # ErrorTable inputs\n for transaction in record_purchase_entered_events:\n transaction_input = json.loads(transaction[\"stateEnteredEventDetails\"][\"input\"])\n\n purchase_table_input.append(transaction_input)\n self.inserted_purchase_record_id.append(transaction_input[\"TransactionId\"]) # save this ID for cleaning up PurchaseTable\n\n for transaction in record_refund_entered_events:\n transaction_input = json.loads(transaction[\"stateEnteredEventDetails\"][\"input\"])\n\n refund_table_input.append(transaction_input)\n self.inserted_refund_record_id.append(transaction_input[\"TransactionId\"]) # save this ID for cleaning up RefundTable\n\n for transaction in record_error_entered_events:\n transaction_input = json.loads(transaction[\"stateEnteredEventDetails\"][\"input\"])\n\n error_table_input.append(transaction_input)\n self.inserted_error_record_id.append(transaction_input[\"TransactionId\"]) # save this ID for cleaning up ErrorTable\n\n return purchase_table_input, refund_table_input, error_table_input", "def get_transaction_totals(self, params=None):\n return self.get(f\"{self.gateway_path}/totals\", params)", "def transactions_df():\n return pd.DataFrame(\n {\n \"user_id\": [1, 1, 1, 2, 2, 2, 3, 3, 3],\n \"item_id\": [11, 22, 22, 11, 22, 33, 33, 33, 44],\n \"amount\": [10, 20, 30, 40, 50, 60, 70, 80, 90],\n }\n )", "def amenity_all():\n state_holder = []\n for state in models.storage.all(\"Amenity\").values():\n state_holder.append(state.to_dict())\n return_holder = jsonify(state_holder)\n return return_holder", "def dictOfBetas(self, free=True, fixed=False):\n s = {}\n for e in self.children:\n d = e.dictOfBetas(free, fixed)\n s = dict(s, **d)\n return s", "def _buildTransDict(self, fromdt, todt, ttype):\r\n totalsDict = self._buildTotalsDict(fromdt, todt)\r\n sortedTotsList = dictToSortedList(totalsDict)\r\n\r\n return dict([(k,v) for k,v in totalsDict.iteritems() if type(k) == ttype])", "def return_as_dictionary(self):\n output_dict = Inventory.return_as_dictionary(self)\n output_dict['material'] = self.material\n output_dict['size'] = self.size\n\n return output_dict", "def all_transactions(self, request):\n user_id = request.data[\"user\"]\n user = User.objects.get(id=user_id)\n user_transactions = user.transactions.all()\n serializer = TransactionSerializer(user_transactions, many=True)\n\n return Response(serializer.data)", "def get_all_books() -> List[Dict]:\n pass", "def dict_values(self):\n return self.__dict__", "def getCurrentSystemFunds(self):\n e = self.myParent.myEmpireDict\n d = self.myParent.mySystemDict\n return [e['CR'], d['AL'],d['EC'],d['IA']]", "def get_transactions_sum_data(month_objects, amount_type):\n if (amount_type == 'expenses'):\n chart_data = [\n result['amount__sum'].quantize(D('0.01')).copy_abs()\n if result['amount__sum'] != None\n else 0\n for result in \n [month.transactions.filter(amount__lte=0).aggregate(Sum('amount')) for month in month_objects]\n ]\n elif (amount_type == 'incomes'):\n chart_data = [\n result['amount__sum'].quantize(D('0.01')).copy_abs() \n if result['amount__sum'] != None\n else 0\n for result in \n [month.transactions.filter(amount__gt=0).aggregate(Sum('amount')) for month in month_objects]\n ]\n \n return chart_data", "def get_chain_data(self, symbol: str): \n return self.trader.fetch_chain_data(symbol)", "def _load_transactions(self):\r\n\t\tlogger.debug(\"Enter\")\r\n\t\ttry:\r\n\t\t\twith open(self._state_file, 'rb') as tmp:\r\n\t\t\t\tlogger.debug(\"There is a file.\")\r\n\t\t\t\ttmp_dict = pickle.load(tmp)\r\n\t\t\t\tlogger.debug(\"Dictionary loaded from file: %s\" % tmp_dict)\r\n\t\texcept IOError as e: # File doesn't exists\r\n\t\t\tlogger.debug(\"Exit - No file. Error message: %s\" % e)\r\n\t\t\ttmp_dict = {}\r\n\t\t\t\r\n\t\treturn tmp_dict", "def return_as_dictionary(self):\n output_dict = Inventory.return_as_dictionary(self)\n output_dict['brand'] = self.brand\n output_dict['voltage'] = self.voltage\n\n return output_dict", "def dictOfBetas(self, free=True, fixed=False):\n if fixed and self.status != 0:\n return {self.name: self}\n\n if free and self.status == 0:\n return {self.name: self}\n\n return dict()", "def transaction(self, transaction):\n # Allow for a list of blocks..\n transaction = utils.request_type(transaction)\n\n res = r.get(self.url + self.tx_info + str(transaction))\n return self.execute(res)", "def __init__(self):\n self.transaction_index = {}\n self.transaction_list = []" ]
[ "0.63048834", "0.602332", "0.5960751", "0.56639814", "0.56584823", "0.5563182", "0.5531879", "0.55191296", "0.55072176", "0.5477383", "0.54718333", "0.5458796", "0.5426807", "0.54197216", "0.541453", "0.5338547", "0.5327534", "0.53225285", "0.53161556", "0.5279584", "0.52619934", "0.5237087", "0.52224517", "0.52208483", "0.52202314", "0.52195555", "0.5216679", "0.5214561", "0.5192317", "0.5190232", "0.51868635", "0.51672924", "0.5163985", "0.51561975", "0.51450855", "0.5128815", "0.5127976", "0.51187205", "0.50943434", "0.50937515", "0.5092108", "0.50841814", "0.506848", "0.5064129", "0.5063724", "0.50629747", "0.50383264", "0.503129", "0.503129", "0.503129", "0.503129", "0.5001976", "0.50000143", "0.49497277", "0.49371666", "0.4936105", "0.4934925", "0.4934214", "0.49326798", "0.4931223", "0.49275663", "0.49220523", "0.49218124", "0.49053317", "0.4904125", "0.48903102", "0.4890083", "0.48821458", "0.48813823", "0.4880626", "0.48732132", "0.4867718", "0.48675153", "0.48642778", "0.48608243", "0.48523393", "0.4852034", "0.4846197", "0.48452893", "0.48409748", "0.48356566", "0.4831061", "0.4829988", "0.48288727", "0.48272905", "0.482643", "0.48256853", "0.48212123", "0.48201984", "0.48171985", "0.48094684", "0.48060596", "0.48027426", "0.48025575", "0.4800572", "0.47963676", "0.47922558", "0.47921076", "0.47916767", "0.47877666", "0.47861665" ]
0.0
-1
Add 1 to the Partition.file_counter if self._subarray is an instance of FileArray and not a temporary FileArray.
def _increment_file_counter(self): self._add_to_file_counter(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _add_to_file_counter(self, i):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n if subarray is None:\n return\n\n try:\n if isinstance(subarray, FileArray) and not isinstance(\n subarray, CachedArray\n ):\n try:\n filename = subarray.get_filename()\n except Exception:\n filename = None\n\n if filename is None:\n return\n\n file_counter = self.file_counter\n # count = file_counter.get(filename, 0)\n # file_counter[filename] = count + i\n # if file_counter[filename] <= 0:\n count = file_counter.get(filename, 0) + i\n if count <= 0:\n # Remove the file from the dictionary if its count has\n # dropped to zero\n file_counter.pop(filename, None)\n else:\n file_counter[filename] = count\n except Exception:\n # If we're here then it is likely that FileArray has been\n # torn down, so just do nothing.\n pass", "def __len__(self):\n return len(self.files[self.split])", "def _decrement_file_counter(self):\n self._add_to_file_counter(-1)", "def fileCount(self):\n pass", "def getFileCount(self) -> int:\n ...", "def numberFiles(self):\n with open(self.inputfile) as fin:\n for n, _ in enumerate(fin, start=1): pass\n self.n = n\n return self.n", "def numberFiles(self):\n return self.n", "def count_files(self):\n self.file_count = 0\n self.count_files_loop(self.dirpath)\n return", "def unique_files(self):\n self._tempfiles[-1].ctr = -1", "def __len__(self) -> int:\n return len(self.files)", "def stat_beg_file(self, filename):\n\n self.batchvals['numfiles'] += 1\n self.filevals['filename'] = filename\n self.filevals['start_time'] = time.time()\n\n return -1", "def __len__(self):\n return len(self.files)", "def file_num_increment(full_fpath):\r\n while os.path.isfile(full_fpath) == True:\r\n \r\n fpath, fext = os.path.splitext(full_fpath) #['C:\\Users\\Desktop\\file(1)', '.ext']\r\n\r\n if re.findall(\"[(]\\d+[)]\", fpath) != []: #Check if there is (x) in the path.\r\n for counter in range(1000): #Loop 1000 times\r\n if fpath.endswith(f\"({counter})\"): \r\n fpath = replace_last(fpath, f\"({counter})\", f\"({counter+1})\") #Replace the last occurence of (counter) in the string.\r\n full_fpath = fpath + fext\r\n break\r\n else: #here we pass for cases where (counter) is in the file/folder name itself. We skip them.\r\n continue\r\n else: #If there is no (counter), we create (1)\r\n counter = 1\r\n full_fpath = fpath + '(' + str(counter) + ')' + fext\r\n\r\n return full_fpath", "def __len__(self):\n\n return len(self._file_list)", "def sequential_files(self, ctr=0):\n self._tempfiles[-1].ctr = ctr", "def __del__(self):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n # If the subarray is unique it will have 2 references to\n # it plus 1 within this method, making 3. If it has more\n # than 3 references to it then it is not unique.\n if getrefcount is not None:\n self._decrement_file_counter()\n if subarray is None or getrefcount(subarray) > 3:\n return\n else:\n # getrefcount has itself been deleted or is in the process\n # of being torn down\n return\n\n _partition_file = getattr(subarray, \"_partition_file\", None)\n if _partition_file is not None:\n # This partition contains a temporary file which is not\n # referenced by any other partition on this process, so if\n # there are no lock files present remove the file from\n # disk.\n _remove_temporary_files(_partition_file)\n\n else:\n try:\n if FileArray is not None and isinstance(subarray, FileArray):\n try:\n filename = subarray.get_filename()\n except Exception:\n filename = None\n\n if self.file_counter.get(filename, 999) <= 0:\n # This partition contains a non-temporary file\n # which is not referenced by any other\n # partitions, so close the file.\n subarray.close()\n except Exception:\n # If we're here then it is likely that FileArray has been\n # torn down, so just do nothing.\n pass\n # --- End: if", "def _write_array_on_file(self, pa_array):\n pa_batch = pa.RecordBatch.from_struct_array(pa_array)\n self._num_bytes += pa_array.nbytes\n self.pa_writer.write_batch(pa_batch)", "def on_disk(self):\n return isinstance(self._subarray, FileArray)", "def n_subfile(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_file(recursive=False):\n n += 1\n return n", "def __number_of_files(self):\n self.__get_files()\n return len(self.files)", "def fileCounter(directory):", "def n_total_files(self):\n return len(self.fileinfo)", "def setNextFile(self):\n\n if (self.nReadBlocks >= self.processingHeaderObj.dataBlocksPerFile):\n self.nReadFiles=self.nReadFiles+1\n if self.nReadFiles > self.nTotalReadFiles:\n self.flagNoMoreFiles=1\n raise schainpy.admin.SchainWarning('No more files to read')\n\n print('------------------- [Opening file] ------------------------------',self.nReadFiles)\n self.nReadBlocks = 0\n #if self.nReadBlocks==0:\n # self.readFirstHeader()", "def number_idx(self, filename):\n with open(filename) as fh:\n firstline = fh.readline()\n parts = firstline.split('\\t')\n # only add if there are 4 parts\n if len(parts) != 4:\n return\n\n count = 1\n def writeline(fho, line, count):\n fho.write(line.rstrip() + '\\t' + str(count) + '\\n')\n\n with open(filename + '.tmp', 'w+b') as fho:\n writeline(fho, firstline, count)\n count += 1\n for line in fh:\n writeline(fho, line, count)\n count += 1\n\n shutil.move(filename + '.tmp', filename)", "def to_disk(self, reopen=True):\n # try:\n tfa = CachedArray(self.array)\n # except Exception:\n # return False\n\n fd, _lock_file = mkstemp(\n prefix=tfa._partition_file + \"_\", dir=tfa._partition_dir\n )\n close(fd)\n\n self.subarray = tfa\n _temporary_files[tfa._partition_file] = (\n tfa._partition_dir,\n _lock_file,\n set(),\n )\n\n if reopen:\n # Re-open the partition\n self.open(self.config)\n\n return True", "def __len__(self):\n return len(self.file_paths)", "def increment_count(self):\n self.image_count +=1\n if self.image_count > self.max_count:\n self.image_count = self.count_start # overflow", "def totalfiles(self):\n return len([sz for sz in self.iterate()])", "def append_filepath(self, filepath):\n idx = len(self.t_sect['filepaths'])\n self.t_sect['filepaths'].append(filepath)\n return idx", "def incr_counter(self, path):\n res = self.read_counter(path)\n # print 'incr_counter:', path, res, '->', res + 1\n res += 1\n self.cursor.execute('REPLACE INTO counter(fullpath, count) VALUES(?, ?)', (path, res))\n self.conn.commit()\n pass", "def inc_size(self):\r\n self.__length += 1", "def n_file(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_file(recursive=True):\n n += 1\n return n", "def count_lines(file_obj):\n for idx, line in enumerate(file_obj):\n pass\n file_obj.seek(0)\n return idx + 1", "def __setitem__(self, filenr, data_arr):\n cvcfile = self.filenames[filenr]\n cvcpath = os.path.join(self.filefolder, cvcfile)\n data_arr.tofile(cvcpath)", "def files_processed(self) -> int:\n with self.lock:\n return self._files_processed", "def getnrfiles(self):\n return len(self.filenames)", "def __len__(self):\n return len(self.frame1_files)", "def file_close(self):\n if self.on_disk:\n self._subarray.close()", "def get_num_files(self, file_type):\n return self.file_type_counter.get(file_type, 0)", "def fileno(self):\n return 1", "def increment(self):\n self.pos += 1\n if self.pos == len(self.progress) - 1:\n self.pos = 0", "def getFileCount(self, startingWithPath=\"\"):\n return self.__controller._getRecordsCount(startingWithPath)", "def file_count(self) -> int:\n if self.dataset is None:\n raise ValueError('No known dataset found!')\n return self._max_file_count", "def inc(self):\n \n self.count += 1", "def __len__(self):\n return len(self.files_self_A_rgbd)", "def getFileCount(self):\n\n if self.filecount == -1:\n self.filecount = self.db.filecount()\n\n return self.filecount", "def increment(self):\n self.increments += 1\n if self.increments == self.length:\n self.finished = True", "def updateCounts(self):\n found = False\n fileName = \"counts\"\n if not os.access(fileName, os.F_OK):\n try:\n TFH = open(fileName, \"w\")\n TFH.close()\n except IOError as inst: # @UnusedVariable\n self.logIt(__name__ + \".updateCounts(): Unable to open \" + fileName + \" for write.\" + \" => \" + str(\n inst.errno) + \":\" + str(inst.strerror) + \"\\n\")\n raise\n\n self.logIt(__name__ + \".updateCounts(): fileName=\" + fileName + \"\\n\")\n try:\n FH = open(fileName, \"rb+\")\n # FH = posixfile.open(fileName, \"rb+\") # posixfile has been deprecated.\n # FH.lock('w|')\n data = None\n while 1:\n data = str(FH.readline())\n if data is None or data == \"\": break\n data = re.sub(\"\\n\", \"\", data)\n self.debug(__name__ + \".updateCounts(): data is \" + str(data) + \"\\n\")\n ms = str(self.msgNum) + \"=\"\n self.debug(__name__ + \".updateCounts(): ms is\" + str(ms) + \"\\n\")\n if re.search(ms, data):\n found = True\n self.debug(__name__ + \".updateCounts(): DEBUG0.5\\n\")\n break\n self.debug(__name__ + \".updateCounts(): DEBUG1\\n\")\n if data and found:\n self.debug(__name__ + \".updateCounts(): DEBUG2\\n\")\n eloc = FH.tell()\n self.debug(__name__ + \".updateCounts(): eloc=\" + str(eloc) + \"\\n\")\n sloc = eloc - len(data) - 1\n self.debug(__name__ + \".updateCounts(): sloc=\" + str(sloc) + \"\\n\")\n FH.seek(sloc, os.SEEK_SET)\n cloc = FH.tell()\n self.debug(__name__ + \".updateCounts(): cloc=\" + str(cloc) + \"\\n\")\n myList = list()\n myList = data.split('=')\n icount = int(myList[1]) + 1\n FH.write(str(self.msgNum) + \"=\" + str(icount) + \"\\n\")\n else:\n self.debug(__name__ + \".updateCounts(): DEBUG3\\n\")\n FH.write(str(self.msgNum) + \"=1\" + \"\\n\")\n FH.lock('u')\n FH.close()\n except IOError as inst: # @UnusedVariable\n pass\n # self.logIt( __name__ + \".updateCounts(): Unable to open \" + fileName + \" for write.\" + \" => \" + str( inst.errno ) + \":\" + str( inst.strerror ) + \"\\n\" )\n # Endtry", "def _get_file_length(self, file):\n self[file] = file.stat().st_size", "def fget(self):\n if not hasattr(self, \"_n\"):\n self._n = 0\n self._n += 1\n return self._n", "def incrementWriteCount(self):\n self.writeCount += 1", "def __len__(self):\n return self._num_samples_per_file * len(self._files) // self._world_size", "def inc( self ):\n self.count += 1", "def _setcounter():\n fname = os.path.basename(camera.status.lastfile)\n tname = fname.split('.')[0]\n i = len(tname)-1\n if i > -1:\n while tname[i].isdigit() and i>-1:\n i = i - 1\n nname = fname[:-4]\n bname = tname[:i+1]\n for file in glob.glob('/data/counters/'+bname+'[0-9][0-9][0-9].cntr'):\n os.remove(file)\n for file in glob.glob('/data/counters/'+bname+'[0-9][0-9][0-9][0-9].cntr'):\n os.remove(file)\n f = open('/data/counters/'+nname+'cntr','w')\n f.close()", "def advance_image_count(self,file_id=None,image_num=None):\n # self.next_image = (self.next_image+1) % self.num_images\n if file_id is not None:\n self.file_id = file_id\n if image_num is None:\n self.next_image += 1\n else:\n self.next_image = image_num + 1\n if self.next_image >= self.num_images:\n self.next_image = 0\n self.file_id += 1\n self.signal_next_image_num.emit(self.next_image)\n self.signal_file_id.emit(self.file_id)", "def _register_temporary_file(self):\n _partition_file = self._subarray._partition_file\n _partition_dir = self._subarray._partition_dir\n if _partition_file not in _temporary_files:\n fd, _lock_file = mkstemp(\n prefix=_partition_file + \"_\", dir=_partition_dir\n )\n close(fd)\n _temporary_files[_partition_file] = (\n _partition_dir,\n _lock_file,\n set(),\n )\n else:\n _, _lock_file, _ = _temporary_files[_partition_file]\n\n return _lock_file", "def getNumTimeDataFiles(self):\n return self.nTimeDataFiles", "def file_len(f):\n\n for n, l in enumerate(f, 1):\n pass\n f.seek(0) # rewind\n return n", "def fs_files_total(self):\n return self._fs_files_total", "def increase_counter(self):\n self.values = self.values + 1", "def increment(self, features, fname, v=1):\n if fname not in features:\n features[fname] = 0\n features[fname] += v", "def get_nrof_aux(self):\n aux = 0\n for l in self.aux_array:\n if l:\n aux += 1\n return aux", "def fix_index(self):\n if self.record_size <= self.size:\n self.record_size += 1\n if self.index % self.size == 0:\n self.isFull = True if len(self._storage) == self.size else False\n if self.replace_flag:\n self.index = 1\n return self.index\n else:\n sys.stderr.write('Experience replay buff is full and replace is set to FALSE!\\n')\n return -1\n else:\n self.index += 1\n return self.index", "def fileno(self):\r\n raise NotImplementedError()", "def _setup_n_ints_in_file(self):\n self.n_ints_in_file = sigproc.calc_n_ints_in_file(self.filename)", "def increment_counter(self) -> None:", "def file_progress_sig_handler(self, bytes_read: int):\n # Increment the bytes read\n self.file_bytes_read += bytes_read\n\n # Update the progress bar\n self.fileAnalyzeProgressBar.setValue(self.file_bytes_read)\n\n logging.debug(\"Analyzing File Progress: \" + str(self.file_bytes_read))", "def getNumStatDataFiles(self):\n return self.nStatDataFiles", "def has_subfile(self) -> bool:\n\t\tself._update_subfiles()\n\t\treturn bool(len(self.subfiles))", "def size(self) -> int:\n return sum(p.size for p in self.iterfiles())", "def add_count(self):\n self.count += 1", "def file_num(self):\n command = \"SELECT COUNT(id) FROM files;\"\n return self.c.execute(command)", "def __len__(self):\n return int(np.floor(len(self.wav_paths)))", "def touched_files(self, parent):", "def file_count(self) -> str:\n return pulumi.get(self, \"file_count\")", "def len(self):\n # print(self.processed_file_names)\n return self.len_", "def updateCounter(self):\n self.counter = self.counter + 1\n self.syncDataStructure[\"+\"][str(self.instanceID)] = self.counter", "def handle_put_progress(self, filegen):\n # print \"bytes so-far: \", filegen.bytes_read\n\n if self.maybe_touch():\n self.log(\"UPLOAD_PROGRESS\", level=INFO)\n self.touch()\n Backend.touch(\n self.current_upload,\n bytes_downloaded=filegen.bytes_read,\n location=self.location)", "def segment_counter(self, _):\n raise NotImplementedError(\n \"We do not support externally altering the segment counter\")", "def get_data_id(self):\n self.data_id = len(glob.glob(osp.join(self.save_dir, 'depth', '*npy')))\n return self.data_id", "def get_lenght(self):\n return len(self.filelist)", "def file_number(self, file_number):\n\n self._file_number = file_number", "def file_number(self, file_number):\n\n self._file_number = file_number", "def initiallize_buffer(self):\n assert os.path.isdir(self.directory)\n #sorting files topologically, files' format is -> data_num.h5 \n files_list = sorted(os.listdir(self.directory + '/' + self.name + '/'), key = lambda x: int(x.split(\"_\")[1].split(\".\")[0]))\n self.files_counter = 0\n if files_list != []: \n for file_name in files_list:\n self.memorize(name = file_name, error = 1)\n self.files_counter += 1\n self.files_tracker = file_name\n else:\n self.files_tracker = 'data_-1.h5'", "def add_file(self, letter, block_size):\n cluster = 1\n i = 0\n j = 0\n\n continuous = True\n while(i<self.size and j<block_size):\n if(self.disk_mem[i]==\".\"):\n self.disk_mem[i] = letter\n if not continuous:\n continuous = True\n cluster += 1\n j+=1\n else:\n continuous = False\n i+=1\n return cluster", "def upload_files(self):\n return 1 << 0", "def addFile(self, filePath): \n \n self.filePathDict[filePath] = [0,[]]", "def _inc_counter(self) -> None:\n self._state_storage.increment_counter()", "def fs_files_used(self):\n return self._fs_files_used", "def get_num_files(self):\n\t\tif self.num_files_in_set is None and self.set_type == FAST5SET_TARBALL:\n\t\t\tself.num_files_in_set = len(self.files)\n\t\treturn self.num_files_in_set", "def addPointCountToEdge(self, edge):\n attributes = edge.getAttributes()\n if self.edge_id__count.has_key(attributes.get(self.shapeFileUniqueId)):\n self.edge_id__count[attributes.get(self.shapeFileUniqueId)] = self.edge_id__count[attributes.get(self.shapeFileUniqueId)] + 1\n else:\n self.edge_id__count[attributes.get(self.shapeFileUniqueId)] = 1\n edge.setAttributes(attributes)", "def process(self):\n self.reader += 1", "def count(self):\n\t\treturn sum(read.copy for read in self.__iter__())", "def extra_memory(self):\n if not self.in_memory:\n # --------------------------------------------------------\n # The subarray is on disk so getting the partition's data\n # array will require extra memory\n # --------------------------------------------------------\n extra_memory = True\n else:\n # --------------------------------------------------------\n # The subarray is already in memory\n # --------------------------------------------------------\n config = self.config\n\n p_part = self.part\n if p_part:\n extra_memory = True\n elif not config[\"unique_subarray\"]:\n extra_memory = True\n else:\n p_data = self._subarray\n\n if not numpy_ma_isMA(p_data):\n # The p_data is not a masked array\n extra_memory = isinstance(p_data.base, numpy_ndarray)\n else:\n # The p_data is a masked array\n memory_overlap = isinstance(\n p_data.data.base, numpy_ndarray\n )\n if not (\n p_data.mask is numpy_ma_nomask\n or not numpy_ma_is_masked(p_data)\n ):\n # There is at least one missing data point\n memory_overlap |= isinstance(\n p_data.mask.base, numpy_ndarray\n )\n\n extra_memory = memory_overlap\n # --- End: if\n\n p_dtype = p_data.dtype\n\n if not extra_memory:\n if config[\"func\"] is not None:\n extra_memory = True\n else:\n p_units = self.Units\n units = config[\"units\"]\n if (\n not p_units.equals(units)\n and bool(p_units) is bool(units)\n and not (\n p_data.flags[\"C_CONTIGUOUS\"]\n and p_dtype.kind == \"f\"\n )\n ):\n extra_memory = True\n\n # ------------------------------------------------------------\n # Extra memory is required if the dtype needs changing\n # ------------------------------------------------------------\n if not extra_memory:\n dtype = config[\"dtype\"]\n if dtype is not None and dtype != p_data.dtype:\n extra_memory = True\n # --- End: if\n\n # ------------------------------------------------------------\n # Amount of extra memory (in bytes) required to access the\n # array\n # ------------------------------------------------------------\n return self.nbytes if extra_memory else 0", "def min_file_histogram(self):\n return self._min_file_histogram", "def _update_count(self):\n self._count = len(self._items)", "def __len__(self):\n\n return math.ceil(len(self.img_files) * self.gen_count / self.batch_size)", "def reduce(self, array, index):\n\n return 0", "def __len__(self):\n return len(self.image_file_names)", "def fileno(self):\n return None" ]
[ "0.83802605", "0.6079216", "0.60073787", "0.5972287", "0.5871502", "0.58649766", "0.58356875", "0.5813897", "0.5656929", "0.5650024", "0.5628978", "0.5588129", "0.5575472", "0.55536973", "0.55466735", "0.55383646", "0.5529106", "0.5502226", "0.54975855", "0.5481056", "0.54359984", "0.5412819", "0.5331949", "0.52709615", "0.5260593", "0.52554363", "0.5216349", "0.5193865", "0.51864076", "0.51768965", "0.5171308", "0.5161445", "0.5160157", "0.5151611", "0.512528", "0.51065946", "0.51014495", "0.5088919", "0.50852406", "0.5068842", "0.5065401", "0.50386566", "0.50366586", "0.5024731", "0.5022408", "0.50217134", "0.501115", "0.50025594", "0.49950272", "0.4993197", "0.49873495", "0.49836847", "0.4965577", "0.4941178", "0.49391267", "0.4934542", "0.4930227", "0.49134943", "0.49080187", "0.49061012", "0.49033248", "0.4892175", "0.4879754", "0.48753807", "0.48634028", "0.48473728", "0.4834655", "0.48266846", "0.48257676", "0.48153964", "0.48047906", "0.48020706", "0.47917888", "0.47833127", "0.4780719", "0.47712538", "0.4770419", "0.47691157", "0.47683647", "0.47659448", "0.47630748", "0.47630462", "0.47630462", "0.47607407", "0.47483426", "0.47433466", "0.47384593", "0.47356316", "0.47345176", "0.47258684", "0.472575", "0.4718935", "0.4715791", "0.47103822", "0.46854302", "0.46809283", "0.46724072", "0.4670188", "0.46681052", "0.466746" ]
0.71174276
1
Subtract 1 from the Partition.file_counter if self._subarray is an instance of FileArray and not a temporary FileArray.
def _decrement_file_counter(self): self._add_to_file_counter(-1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _add_to_file_counter(self, i):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n if subarray is None:\n return\n\n try:\n if isinstance(subarray, FileArray) and not isinstance(\n subarray, CachedArray\n ):\n try:\n filename = subarray.get_filename()\n except Exception:\n filename = None\n\n if filename is None:\n return\n\n file_counter = self.file_counter\n # count = file_counter.get(filename, 0)\n # file_counter[filename] = count + i\n # if file_counter[filename] <= 0:\n count = file_counter.get(filename, 0) + i\n if count <= 0:\n # Remove the file from the dictionary if its count has\n # dropped to zero\n file_counter.pop(filename, None)\n else:\n file_counter[filename] = count\n except Exception:\n # If we're here then it is likely that FileArray has been\n # torn down, so just do nothing.\n pass", "def __del__(self):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n # If the subarray is unique it will have 2 references to\n # it plus 1 within this method, making 3. If it has more\n # than 3 references to it then it is not unique.\n if getrefcount is not None:\n self._decrement_file_counter()\n if subarray is None or getrefcount(subarray) > 3:\n return\n else:\n # getrefcount has itself been deleted or is in the process\n # of being torn down\n return\n\n _partition_file = getattr(subarray, \"_partition_file\", None)\n if _partition_file is not None:\n # This partition contains a temporary file which is not\n # referenced by any other partition on this process, so if\n # there are no lock files present remove the file from\n # disk.\n _remove_temporary_files(_partition_file)\n\n else:\n try:\n if FileArray is not None and isinstance(subarray, FileArray):\n try:\n filename = subarray.get_filename()\n except Exception:\n filename = None\n\n if self.file_counter.get(filename, 999) <= 0:\n # This partition contains a non-temporary file\n # which is not referenced by any other\n # partitions, so close the file.\n subarray.close()\n except Exception:\n # If we're here then it is likely that FileArray has been\n # torn down, so just do nothing.\n pass\n # --- End: if", "def _increment_file_counter(self):\n self._add_to_file_counter(1)", "def __len__(self):\n return len(self.files[self.split])", "def unique_files(self):\n self._tempfiles[-1].ctr = -1", "def numberFiles(self):\n return self.n", "def getFileCount(self) -> int:\n ...", "def fileCount(self):\n pass", "def __len__(self) -> int:\n return len(self.files)", "def numberFiles(self):\n with open(self.inputfile) as fin:\n for n, _ in enumerate(fin, start=1): pass\n self.n = n\n return self.n", "def __len__(self):\n return len(self.files)", "def stat_beg_file(self, filename):\n\n self.batchvals['numfiles'] += 1\n self.filevals['filename'] = filename\n self.filevals['start_time'] = time.time()\n\n return -1", "def file_close(self):\n if self.on_disk:\n self._subarray.close()", "def count_files(self):\n self.file_count = 0\n self.count_files_loop(self.dirpath)\n return", "def __len__(self):\n\n return len(self._file_list)", "def on_disk(self):\n return isinstance(self._subarray, FileArray)", "def n_subfile(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_file(recursive=False):\n n += 1\n return n", "def sequential_files(self, ctr=0):\n self._tempfiles[-1].ctr = ctr", "def getFileCount(self):\n\n if self.filecount == -1:\n self.filecount = self.db.filecount()\n\n return self.filecount", "def __number_of_files(self):\n self.__get_files()\n return len(self.files)", "def __len__(self):\n return len(self.frame1_files)", "def file_count(self) -> int:\n if self.dataset is None:\n raise ValueError('No known dataset found!')\n return self._max_file_count", "def get_nrof_aux(self):\n aux = 0\n for l in self.aux_array:\n if l:\n aux += 1\n return aux", "def fix_index(self):\n if self.record_size <= self.size:\n self.record_size += 1\n if self.index % self.size == 0:\n self.isFull = True if len(self._storage) == self.size else False\n if self.replace_flag:\n self.index = 1\n return self.index\n else:\n sys.stderr.write('Experience replay buff is full and replace is set to FALSE!\\n')\n return -1\n else:\n self.index += 1\n return self.index", "def count_deleted_bytes(self): # FileObj.count_deleted_bytes\n if self.deleted:\n return self.bytes \n else:\n return 0", "def getnrfiles(self):\n return len(self.filenames)", "def reduce(self, array, index):\n\n return 0", "def n_total_files(self):\n return len(self.fileinfo)", "def __len__(self):\n return len(self.file_paths)", "def close(self, **kwargs):\n config = getattr(self, \"config\", None)\n\n if config is None:\n return\n\n if kwargs:\n config.update(kwargs)\n\n original = getattr(self, \"_original\", None)\n logger.partitioning(\"Partition.close: original = {}\".format(original))\n\n if not original:\n originally_on_disk = False\n original_subarray = None\n else:\n originally_on_disk = not original.in_memory\n original_subarray = original._subarray\n\n config = self.config\n logger.partitioning(\" config = {}\".format(config))\n\n if config[\"serial\"]:\n # --------------------------------------------------------\n # SERIAL\n # --------------------------------------------------------\n logger.partitioning(\" serial\")\n\n if config[\"readonly\"]:\n logger.partitioning(\" readonly=True\")\n\n if originally_on_disk:\n logger.partitioning(\" subarray originally on disk\")\n\n if config.get(\"to_disk\", False):\n # 1.1.1.1 The original subarray was on disk,\n # we don't want to keep the current\n # subarray in memory, and we are happy\n # to discard any changes that may have\n # been made to the subarray.\n logger.partitioning(\" 1.1.1.1 revert\")\n self.revert()\n elif free_memory() <= cf_fm_threshold():\n # 1.1.1.2 The original subarray was on disk,\n # we are happy to keep the current\n # subarray in memory, but there is not\n # enough free memory to do so.\n logger.partitioning(\n \" 1.1.1.2 revert ({} <= {})\".format(\n free_memory(), cf_fm_threshold()\n )\n )\n self.revert()\n else:\n # 1.1.1.3 The original subarray was on disk\n # and there is enough memory to keep\n # the current subarray in memory\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # The original subarray was a temporary\n # file which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n del self.masked\n logger.partitioning(\n \" 1.1.1.3 del masked ({} > {})\".format(\n free_memory(), cf_fm_threshold()\n )\n )\n\n else:\n logger.partitioning(\" subarray originally in memory\")\n if config.get(\"to_disk\", False):\n # 1.1.2.1 Original subarray was in memory and\n # we don't want to keep the current\n # subarray in memory\n logger.partitioning(\" 1.1.2.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.1.2.2 Original subarray was in memory and\n # unique but there is not enough\n # memory to keep the current subarray\n logger.partitioning(\" 1.1.2.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.1.2.3 Original subarray was in memory and\n # unique and there is enough memory to\n # keep the current subarray in memory\n logger.partitioning(\" 1.1.2.3 pass\")\n pass\n else:\n # config['readonly'] is False\n if originally_on_disk:\n if config.get(\"to_disk\", False):\n # 1.2.1.1 Original subarray was on disk and\n # there and we don't want to keep the\n # array\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # Original subarray was a temporary file\n # on disk which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n logger.partitioning(\" 1.2.1.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.2.1.2 Original subarray was on disk but\n # there is not enough memory to keep\n # it\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # Original subarray was a temporary file\n # on disk which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n logger.partitioning(\" 1.2.1.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.2.1.3 Original subarray was on disk and\n # there is enough memory to keep it\n logger.partitioning(\" 1.2.1.3 pass\")\n del self.masked\n else:\n if config.get(\"to_disk\", False):\n # 1.2.2.1 Original subarray was in memory but\n # we don't want to keep it\n logger.partitioning(\" 1.2.2.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.2.2.2 Original subarray was an in memory\n # but there is not enough memory to\n # keep it\n logger.partitioning(\" 1.2.2.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.2.2.3 Original subarray was in memory and\n # there is enough memory to keep it\n logger.partitioning(\" 1.2.2.3 del masked\")\n del self.masked\n else:\n logger.partitioning(\"Partition.close: parallel\")\n # --------------------------------------------------------\n # PARALLEL\n # --------------------------------------------------------\n pass\n\n # if hasattr(self, '_original'):\n # del self._original\n\n # print(hasattr(self, 'config')),\n try:\n del self.config\n except AttributeError:\n pass", "def files_processed(self) -> int:\n with self.lock:\n return self._files_processed", "def file_pointer(self):\n\n try:\n self.__file.seek(self.__file.tell() - 1)\n except Exception as e:\n raise e", "def undo_scan(self, sub_array_id: int):", "def _get_file_length(self, file):\n self[file] = file.stat().st_size", "def get_num_files(self, file_type):\n return self.file_type_counter.get(file_type, 0)", "def files(self) -> _SeqNumSlicer:\n if self._seq_num_slicer is None:\n self._seq_num_slicer = _SeqNumSlicer(self)\n return self._seq_num_slicer", "def fileno(self):\n return 1", "def __len__(self):\n return int(np.floor(len(self.wav_paths)))", "def count_deleted(self): # FileObj.count_deleted\n if self.deleted:\n return 1\n else:\n return 0", "def __len__(self):\n return len(self.files_self_A_rgbd)", "def totalfiles(self):\n return len([sz for sz in self.iterate()])", "def touched_files(self, parent):", "def to_disk(self, reopen=True):\n # try:\n tfa = CachedArray(self.array)\n # except Exception:\n # return False\n\n fd, _lock_file = mkstemp(\n prefix=tfa._partition_file + \"_\", dir=tfa._partition_dir\n )\n close(fd)\n\n self.subarray = tfa\n _temporary_files[tfa._partition_file] = (\n tfa._partition_dir,\n _lock_file,\n set(),\n )\n\n if reopen:\n # Re-open the partition\n self.open(self.config)\n\n return True", "def fileno(self):\r\n raise NotImplementedError()", "def fileCounter(directory):", "def getNumTimeDataFiles(self):\n return self.nTimeDataFiles", "def __len__(self):\n return self._num_samples_per_file * len(self._files) // self._world_size", "def count_lines(file_obj):\n for idx, line in enumerate(file_obj):\n pass\n file_obj.seek(0)\n return idx + 1", "def file_len(f):\n\n for n, l in enumerate(f, 1):\n pass\n f.seek(0) # rewind\n return n", "def fget(self):\n if not hasattr(self, \"_n\"):\n self._n = 0\n self._n += 1\n return self._n", "def min_file_histogram(self):\n return self._min_file_histogram", "def __len__(self):\n return self._fa.faidx.index[self.name].rlen", "def fileno(self):\n return None", "def fileno(self):\n return None", "def __len__(self):\n return len(self._current_block) - 1", "def _write_array_on_file(self, pa_array):\n pa_batch = pa.RecordBatch.from_struct_array(pa_array)\n self._num_bytes += pa_array.nbytes\n self.pa_writer.write_batch(pa_batch)", "def get_lenght(self):\n return len(self.filelist)", "def n_file(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_file(recursive=True):\n n += 1\n return n", "def file_num_increment(full_fpath):\r\n while os.path.isfile(full_fpath) == True:\r\n \r\n fpath, fext = os.path.splitext(full_fpath) #['C:\\Users\\Desktop\\file(1)', '.ext']\r\n\r\n if re.findall(\"[(]\\d+[)]\", fpath) != []: #Check if there is (x) in the path.\r\n for counter in range(1000): #Loop 1000 times\r\n if fpath.endswith(f\"({counter})\"): \r\n fpath = replace_last(fpath, f\"({counter})\", f\"({counter+1})\") #Replace the last occurence of (counter) in the string.\r\n full_fpath = fpath + fext\r\n break\r\n else: #here we pass for cases where (counter) is in the file/folder name itself. We skip them.\r\n continue\r\n else: #If there is no (counter), we create (1)\r\n counter = 1\r\n full_fpath = fpath + '(' + str(counter) + ')' + fext\r\n\r\n return full_fpath", "def getFileCount(self, startingWithPath=\"\"):\n return self.__controller._getRecordsCount(startingWithPath)", "def FreeFile():\n existing = VBFiles.getOpenChannels()\n if existing:\n return max(existing) + 1\n else:\n return 1", "def __len__(self):\n return int(np.ceil(len(self.image_filenames) / (self.batch_size)))", "def reset(self):\n self.all_files_idx = np.arange(self._div*self._nb_dir)\n\n if self.shuffle>1:\n np.random.shuffle(self.all_files_idx)\n\n self.idx_folder = self.all_files_idx//self._div\n self.idx_file = self.all_files_idx % self._div\n self.current_folder = self.idx_folder[0]\n self.current_file = self.idx_file[0]", "def fs_files_total(self):\n return self._fs_files_total", "def process(self):\n\n if len(self.files) == self._file_ptr:\n raise pipeline.PipelineStopIteration\n\n # Collect garbage to remove any prior CorrData objects\n gc.collect()\n\n # Fetch and remove the first item in the list\n file_ = self.files[self._file_ptr]\n self._file_ptr += 1\n\n # Set up product selection\n # NOTE: this probably doesn't work with stacked data\n prod_sel = None\n if self.only_autos:\n rd = andata.CorrReader(file_)\n prod_sel = np.array(\n [ii for (ii, pp) in enumerate(rd.prod) if pp[0] == pp[1]]\n )\n\n # Load file\n if (\n isinstance(self.freq_sel, slice)\n and (prod_sel is None)\n and (self.datasets is None)\n ):\n self.log.info(\n \"Reading file %i of %i. (%s) [fast io]\",\n self._file_ptr,\n len(self.files),\n file_,\n )\n ts = andata.CorrData.from_acq_h5_fast(\n file_, freq_sel=self.freq_sel, comm=self.comm\n )\n else:\n self.log.info(\n \"Reading file %i of %i. (%s) [slow io]\",\n self._file_ptr,\n len(self.files),\n file_,\n )\n ts = andata.CorrData.from_acq_h5(\n file_,\n datasets=self.datasets,\n distributed=True,\n comm=self.comm,\n freq_sel=self.freq_sel,\n prod_sel=prod_sel,\n )\n\n # Store file name\n ts.attrs[\"filename\"] = file_\n\n # Use a simple incrementing string as the tag\n if \"tag\" not in ts.attrs:\n tag = \"file%03i\" % self._file_ptr\n ts.attrs[\"tag\"] = tag\n\n # Add a weight dataset if needed\n if \"vis_weight\" not in ts.flags:\n weight_dset = ts.create_flag(\n \"vis_weight\",\n shape=ts.vis.shape,\n dtype=np.uint8,\n distributed=True,\n distributed_axis=0,\n )\n weight_dset.attrs[\"axis\"] = ts.vis.attrs[\"axis\"]\n\n # Set weight to maximum value (255), unless the vis value is\n # zero which presumably came from missing data. NOTE: this may have\n # a small bias\n weight_dset[:] = np.where(ts.vis[:] == 0.0, 0, 255)\n\n # Return timestream\n if self.use_draco_container:\n ts = containers.CHIMETimeStream.from_corrdata(ts)\n\n return ts", "def fill_values(self, array: List[float]) -> int:\n\n if self._max_collected_data_time <= 0:\n return 0\n\n current_time = self.start_time\n index = 0\n while current_time < self._max_collected_data_time and index < len(array):\n array[index] = self.feedback(current_time)\n index += 1\n current_time = index * self.time_step\n\n # subtract one since index - 1 is the actual last index that was\n # written to\n return index - 1", "def get_size(self, fileobject):\n # move the cursor to the end of the file\n fileobject.seek(0, 2)\n size = fileobject.tell()\n # move the cursor to the begin of the file\n fileobject.seek(0)\n return size", "def file_num(self):\n command = \"SELECT COUNT(id) FROM files;\"\n return self.c.execute(command)", "def number_idx(self, filename):\n with open(filename) as fh:\n firstline = fh.readline()\n parts = firstline.split('\\t')\n # only add if there are 4 parts\n if len(parts) != 4:\n return\n\n count = 1\n def writeline(fho, line, count):\n fho.write(line.rstrip() + '\\t' + str(count) + '\\n')\n\n with open(filename + '.tmp', 'w+b') as fho:\n writeline(fho, firstline, count)\n count += 1\n for line in fh:\n writeline(fho, line, count)\n count += 1\n\n shutil.move(filename + '.tmp', filename)", "def get_data_id(self):\n self.data_id = len(glob.glob(osp.join(self.save_dir, 'depth', '*npy')))\n return self.data_id", "def len(self):\n # print(self.processed_file_names)\n return self.len_", "def extra_memory(self):\n if not self.in_memory:\n # --------------------------------------------------------\n # The subarray is on disk so getting the partition's data\n # array will require extra memory\n # --------------------------------------------------------\n extra_memory = True\n else:\n # --------------------------------------------------------\n # The subarray is already in memory\n # --------------------------------------------------------\n config = self.config\n\n p_part = self.part\n if p_part:\n extra_memory = True\n elif not config[\"unique_subarray\"]:\n extra_memory = True\n else:\n p_data = self._subarray\n\n if not numpy_ma_isMA(p_data):\n # The p_data is not a masked array\n extra_memory = isinstance(p_data.base, numpy_ndarray)\n else:\n # The p_data is a masked array\n memory_overlap = isinstance(\n p_data.data.base, numpy_ndarray\n )\n if not (\n p_data.mask is numpy_ma_nomask\n or not numpy_ma_is_masked(p_data)\n ):\n # There is at least one missing data point\n memory_overlap |= isinstance(\n p_data.mask.base, numpy_ndarray\n )\n\n extra_memory = memory_overlap\n # --- End: if\n\n p_dtype = p_data.dtype\n\n if not extra_memory:\n if config[\"func\"] is not None:\n extra_memory = True\n else:\n p_units = self.Units\n units = config[\"units\"]\n if (\n not p_units.equals(units)\n and bool(p_units) is bool(units)\n and not (\n p_data.flags[\"C_CONTIGUOUS\"]\n and p_dtype.kind == \"f\"\n )\n ):\n extra_memory = True\n\n # ------------------------------------------------------------\n # Extra memory is required if the dtype needs changing\n # ------------------------------------------------------------\n if not extra_memory:\n dtype = config[\"dtype\"]\n if dtype is not None and dtype != p_data.dtype:\n extra_memory = True\n # --- End: if\n\n # ------------------------------------------------------------\n # Amount of extra memory (in bytes) required to access the\n # array\n # ------------------------------------------------------------\n return self.nbytes if extra_memory else 0", "def sacar_ficha(self, letra):\n \n self.__estado[letra][\"cantidad\"] = self.__estado[letra][\"cantidad\"] - 1\n self.__cant_fichas = self.__cant_fichas - 1\n self.__fichas_disponibles.remove(letra)\n self.__actualizar_letra(letra)", "def file_count(self) -> str:\n return pulumi.get(self, \"file_count\")", "def array(self):\n config = self.config\n\n unique_array = config[\"unique_subarray\"]\n\n p_axes = self.axes\n p_flip = self.flip\n p_part = self.part\n p_units = self.Units\n p_shape = self.shape\n p_location = self.location\n subarray = self._subarray\n\n len_p_axes = len(p_axes)\n\n if not self.in_memory:\n # --------------------------------------------------------\n # The subarray is not in memory.\n #\n # It could be in a file on disk or implied by a FileArray\n # object, etc.\n # --------------------------------------------------------\n self._original = self.copy()\n\n unique_array = True\n update = True\n copy = False\n\n if not p_part:\n indices = Ellipsis\n else:\n indices = tuple(p_part)\n\n # Read from a file into a numpy array\n p_data = subarray[indices]\n\n # We've just copied p_data from disk, so in place changes\n # are not possible\n in_place_changes = False\n else:\n # --------------------------------------------------------\n # The subarray is in memory\n # --------------------------------------------------------\n update = config[\"update\"]\n\n if p_part:\n p_data = get_subspace(subarray, p_part)\n elif not unique_array:\n p_data = subarray.view()\n else:\n p_data = subarray\n\n copy = config[\"extra_memory\"]\n\n # In place changes to p_data might be possible if we're not\n # copying the data\n in_place_changes = not copy\n\n if not p_data.ndim and isinstance(p_data, (numpy_number, numpy_bool_)):\n # --------------------------------------------------------\n # p_data is a numpy number (like numpy.int64) which does\n # not support assignment, so convert it to a numpy array.\n # --------------------------------------------------------\n p_data = numpy_array(p_data)\n # We've just copied p_data, so in place changes are\n # not possible\n copy = False\n in_place_changes = False\n\n masked = numpy_ma_isMA(p_data)\n if masked:\n # The p_data is a masked array\n if p_data.mask is numpy_ma_nomask or not numpy_ma_is_masked(\n p_data\n ):\n # There are no missing data points so recast as an\n # unmasked numpy array\n p_data = p_data.data\n masked = False\n # --- End: if\n\n if masked:\n # Set the hardness of the mask\n if config[\"hardmask\"]:\n p_data.harden_mask()\n else:\n p_data.soften_mask()\n # --- End: if\n\n self.masked = masked\n\n # ------------------------------------------------------------\n # Make sure that the data array has the correct units. This\n # process will deep copy the data array if required (e.g. if\n # another partition is referencing this numpy array), even if\n # the units are already correct.\n # ------------------------------------------------------------\n func = config.get(\"func\")\n units = config[\"units\"]\n if func is None:\n if not p_units.equals(units) and bool(p_units) is bool(units):\n func = Units.conform\n\n if func is not None:\n inplace = not copy\n p_data = func(p_data, p_units, units, inplace)\n p_units = units\n\n if not inplace:\n # We've just copied p_data, so in place changes are\n # not possible\n copy = False\n in_place_changes = False\n # --- End: if\n\n flip = config.get(\"flip\", None)\n if flip or p_flip:\n flip_axes = set(p_flip).symmetric_difference(flip)\n else:\n flip_axes = None\n\n axes = config[\"axes\"]\n\n if p_data.size > 1:\n # --------------------------------------------------------\n # Flip axes\n # --------------------------------------------------------\n if flip_axes:\n indices = [\n (\n slice(None, None, -1)\n if axis in flip_axes\n else slice(None)\n )\n for axis in p_axes\n ]\n p_data = p_data[tuple(indices)]\n\n # --------------------------------------------------------\n # Transpose axes\n # --------------------------------------------------------\n if p_axes != axes:\n iaxes = [p_axes.index(axis) for axis in axes if axis in p_axes]\n\n if len_p_axes > len(iaxes):\n for i in range(len_p_axes):\n if i not in iaxes:\n # iaxes.append(i)\n iaxes.insert(i, i)\n # --- End: if\n\n p_data = numpy_transpose(p_data, iaxes)\n # --- End: if\n\n # ------------------------------------------------------------\n # Remove excessive/insert missing size 1 axes\n # ------------------------------------------------------------\n if p_shape != p_data.shape:\n # if len_p_axes != len(p_shape):\n p_data = p_data.reshape(p_shape)\n\n # ------------------------------------------------------------\n # Apply the auxiliary mask\n # ------------------------------------------------------------\n auxiliary_mask = config[\"auxiliary_mask\"]\n if auxiliary_mask:\n for mask in auxiliary_mask:\n if mask.any():\n if not masked:\n p_data = p_data.view(numpy_ma_MaskedArray)\n masked = True\n\n p_data.mask = (mask | p_data.mask).array\n # --- End: for\n\n self.masked = True\n\n # ------------------------------------------------------------\n # Convert the array's data type\n # ------------------------------------------------------------\n p_dtype = p_data.dtype\n dtype = config.get(\"dtype\", None)\n if dtype is not None and dtype != p_dtype:\n try:\n p_data = p_data.astype(dtype) # Note: returns a copy\n except ValueError:\n raise ValueError(\n \"Can't recast partition array from {} to {}\".format(\n p_dtype.name, dtype.name\n )\n )\n else:\n # We've just copied p_data, so in place changes are\n # not possible\n copy = False\n in_place_changes = False\n # --- End: if\n\n # ------------------------------------------------------------\n # Copy the array\n # -----------------------------------------------------------\n if copy:\n if p_dtype.char != \"O\":\n if not masked or p_data.ndim > 0:\n p_data = p_data.copy()\n else:\n # This is because numpy.ma.copy doesn't work for\n # scalar arrays (at the moment, at least)\n p_data = numpy_ma_masked_all((), p_data.dtype)\n\n # We've just copied p_data, so in place changes are\n # not possible\n in_place_changes = False\n else:\n # whilst netCDF4.netcdftime.datetime is mucking bout,\n # don't copy!!!!\n # p_data = _copy(p_data)\n pass\n # --- End: if\n\n # ------------------------------------------------------------\n # Update the partition\n # ------------------------------------------------------------\n if update:\n self.subarray = p_data # ?? DCH CHECK\n self.Units = p_units\n self.part = []\n self.axes = axes\n self.flip = flip\n self.flatten = []\n self.shape = p_shape\n self.location = p_location\n\n self._in_place_changes = in_place_changes\n\n # ------------------------------------------------------------\n # Return the numpy array\n # ------------------------------------------------------------\n return p_data", "def len(self):\n return self._fsize - self._fp.tell()", "def files_processed(self) -> float:\n return pulumi.get(self, \"files_processed\")", "def DeleteFiles(self, min_size=0):\n\n ndeleted = 0\n for filename, counts in list(self.mCounts.items()):\n if counts < min_size:\n os.remove(filename)\n ndeleted += 1\n\n return ndeleted", "def setNextFile(self):\n\n if (self.nReadBlocks >= self.processingHeaderObj.dataBlocksPerFile):\n self.nReadFiles=self.nReadFiles+1\n if self.nReadFiles > self.nTotalReadFiles:\n self.flagNoMoreFiles=1\n raise schainpy.admin.SchainWarning('No more files to read')\n\n print('------------------- [Opening file] ------------------------------',self.nReadFiles)\n self.nReadBlocks = 0\n #if self.nReadBlocks==0:\n # self.readFirstHeader()", "def fileno(self):\n return self.file.fileno()", "def len(self):\n return self._fsize - self._tell", "def _setcounter():\n fname = os.path.basename(camera.status.lastfile)\n tname = fname.split('.')[0]\n i = len(tname)-1\n if i > -1:\n while tname[i].isdigit() and i>-1:\n i = i - 1\n nname = fname[:-4]\n bname = tname[:i+1]\n for file in glob.glob('/data/counters/'+bname+'[0-9][0-9][0-9].cntr'):\n os.remove(file)\n for file in glob.glob('/data/counters/'+bname+'[0-9][0-9][0-9][0-9].cntr'):\n os.remove(file)\n f = open('/data/counters/'+nname+'cntr','w')\n f.close()", "def fileno(self):\n raise io.UnsupportedOperation", "def getFirstObjectIndex(self):\n if not self.fileInfo.isEsp(): raise StateError(_('FileRefs.renumberObjects is for esps only.'))\n for cell in self.cells:\n objects = cell.getObjects()\n for object in objects.list():\n if object[0] == 0:\n return object[1]\n return 0", "def test_op_sub_array_int(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=int)\n o = a + 1\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a - o\n\n offl_a = stream.bind(a)\n offl_r = offl_a - o\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def __calculate_number_of_frames(self):\n # Save current position\n current_pos = self.__file_object.tell()\n\n # Go to start of first frame\n self.__file_object.seek(self.__first_frame_raw_data_position)\n self.number_of_frames = 0\n\n while True:\n if not self.__file_object.read(self.__frame_raw_data_size):\n break\n\n self.__file_object.readline()\n self.number_of_frames += 1\n\n # Restore file pointer\n self.__file_object.seek(current_pos)\n print('Number of frames:', self.number_of_frames)", "def trace_file_len(fname):\n try:\n with open(fname) as f:\n for i, l in enumerate(f):\n pass\n return i - 1\n except FileNotFoundError:\n return 0", "def get_file(self) -> int:\r\n return self.file", "def prune_empty(self): # EntryList.prune_empty\n prevCount = self.count_deleted()\n for name, e in allFiles.contents.iteritems():\n e.prune_empty()\n return allFiles.count_deleted() - prevCount", "def __len__(self):\n\n return math.ceil(len(self.img_files) * self.gen_count / self.batch_size)", "def refreshSize(self):\n if self.isLoaded:\n return 0\n else:\n return self.fileInfo.size", "def Rear(self):\n return -1 if self.isEmpty() else self.__buffer[(self.__start+self.__size-1) % len(self.__buffer)]", "def seek(self, *args) -> \"int\":\n return _ida_fpro.qfile_t_seek(self, *args)", "def in_cached_file(self):\n return isinstance(self._subarray, CachedArray)", "def getModificationNumber(self) -> long:\n ...", "def size(self) -> int:\n return sum(p.size for p in self.iterfiles())", "def tell(self):\n return self._upload_position", "def has_subfile(self) -> bool:\n\t\tself._update_subfiles()\n\t\treturn bool(len(self.subfiles))", "def __len__(self):\n return len(self.array)", "def __len__(self):\n return len(self.array)" ]
[ "0.7747842", "0.63648546", "0.6072333", "0.60351396", "0.5918244", "0.57059807", "0.56674904", "0.5664642", "0.56031275", "0.5573989", "0.5520654", "0.5453089", "0.5448838", "0.54281026", "0.5422772", "0.5378375", "0.5307265", "0.5244931", "0.52217174", "0.5220032", "0.5216291", "0.51331407", "0.5115629", "0.5106185", "0.50907373", "0.5089161", "0.5085614", "0.507725", "0.5066728", "0.50590724", "0.50359875", "0.5020529", "0.50147223", "0.49775988", "0.49749458", "0.49738547", "0.49649253", "0.49604303", "0.49502134", "0.49456447", "0.4944183", "0.49393135", "0.49268013", "0.49217892", "0.48981854", "0.48959568", "0.48843467", "0.48842198", "0.48836866", "0.4881064", "0.4877542", "0.48724556", "0.4844187", "0.4844187", "0.48379225", "0.48361284", "0.4816804", "0.48031083", "0.4801541", "0.47996238", "0.4778122", "0.47719982", "0.47565272", "0.47562754", "0.4756017", "0.47559637", "0.47401142", "0.4727482", "0.47270516", "0.4725971", "0.47178772", "0.47169724", "0.47143152", "0.4712884", "0.47011933", "0.47006902", "0.4695959", "0.46883664", "0.46880865", "0.46875745", "0.46849707", "0.46837255", "0.46647176", "0.4656943", "0.46559966", "0.465177", "0.46486926", "0.4644861", "0.46445367", "0.4642497", "0.46409014", "0.46387452", "0.4635584", "0.46266317", "0.4622208", "0.46218398", "0.46215257", "0.46171033", "0.4611914", "0.4611914" ]
0.6648334
1
Add the lock files listed in lock_files to the list of lock files managed by other ranks.
def _update_lock_files(self, lock_files): _, _lock_file, _other_lock_files = _temporary_files[ self._subarray._partition_file ] _other_lock_files.update(set(lock_files)) if _lock_file in _other_lock_files: # If the lock file managed by this rank is in the list of # lock files managed by other ranks, remove it from there _other_lock_files.remove(_lock_file)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def LockFiles(self, entries):\n self._model.lock(entries)", "def add_mock_files(self, file_list):\n self._mock_file_list.extend(file_list)", "def thread_file_list(self):\n # Establish connection for this thread\n connection = self.connect()\n\n # Set working directory on server\n connection.chdir(self.settings.server_dir)\n\n while len(self.files) > 0:\n self.lock.acquire()\n file = self.files.pop()\n self.lock.release()\n\n # Pass popped file to function\n try:\n self.upload_file(connection, file)\n except EOFError as error:\n self._logger.log(logging.CRITICAL, \"Connection lost during \"\n \"file transfer\")\n self._logger.log(logging.CRITICAL, str(error))\n\n # Establish connection for this thread\n connection = self.connect()\n\n # Set working directory on server\n connection.chdir(self.settings.server_dir)\n\n # Lock and append filename to list to retry\n self.lock.acquire()\n self.files.append(file)\n self.lock.release()\n\n except FileNotFoundError as error:\n self._logger.log(logging.CRITICAL, \"File \" + file + \" not \"\n \"found\")\n self._logger.log(logging.CRITICAL, str(error))\n\n except IOError:\n self.lock.acquire()\n self.files.append(file)\n self.lock.release()\n\n connection.close()", "def get_file_list(self):\n try:\n for filename in os.listdir(SHARED_DIR):\n self.file_list.append(filename)\n except Exception as e:\n print \"Error: retriving file list, %s\" % e", "def _distribute_files(self, distribution='one'):\n for k, files in self.file_lists.items():\n self.idle[k] = False\n if distribution.lower() == 'single':\n self.distribution_comms[k] = None\n if self.comm.rank >= 1:\n self.local_file_lists[k] = None\n self.idle[k] = True\n else:\n self.local_file_lists[k] = files\n elif distribution.lower() == 'even':\n if len(files) <= self.comm.size:\n if self.comm.rank >= len(files):\n self.local_file_lists[k] = None\n self.distribution_comms[k] = None\n self.idle[k] = True\n else:\n self.local_file_lists[k] = [files[self.comm.rank],]\n self.distribution_comms[k] = self.comm.Create(self.comm.Get_group().Incl(np.arange(len(files))))\n else:\n files_per = int(np.floor(len(files) / self.comm.size))\n excess_files = int(len(files) % self.comm.size)\n if self.comm.rank >= excess_files:\n self.local_file_lists[k] = list(files[int(self.comm.rank*files_per+excess_files):int((self.comm.rank+1)*files_per+excess_files)])\n else:\n self.local_file_lists[k] = list(files[int(self.comm.rank*(files_per+1)):int((self.comm.rank+1)*(files_per+1))])\n self.distribution_comms[k] = self.comm", "def removeLocks():\n global lockFnames\n for lockFname in lockFnames:\n if isfile(lockFname):\n logging.debug('Removing lockfile %s' % lockFname)\n os.remove(lockFname)\n\n lockFnames = []", "def pipfile_lock_names(self):\n return ext_split(self.pipfile_locks, \"Pipfile.lock\")", "def add_filelist_to_cache(self, file_list=None):\n if file_list is None:\n return False\n for fileinfo in file_list:\n fn_ = fileinfo.filename\n self.cache_file_list_dict[fn_] = fileinfo\n return True", "def add(self, files, mask):\n pass", "def _assignUIDs(self):\n for messagePath in self.maildir:\n\n messageFile = os.path.basename(messagePath)\n\n if not messageFile in self.metadata['uids']:\n\n self.metadata['uids'][messageFile] = self.metadata['uidnext']\n\n self.metadata['uidnext'] += 1\n\n self.saveMetadata()", "def add_mock_files_after_load(self, file_list):\n self._mock_file_list_after.extend(file_list)", "def add_files(self, files, commit_msg):\n paths = []\n for rpath in files:\n path = os.path.join(self.repodir, rpath)\n paths.append(path)\n with open(path, 'w') as f:\n f.write(files[rpath])\n if paths:\n self.git_cmd(['add'] + paths)\n self.commit(commit_msg)", "def lock(self):\n logging.debug(\"Locking %s (and subdirectories)\" % self.directory)\n LOCK_ACL.append(target=self.directory)\n for subdirectory in self._subdirectories():\n LOCK_ACL.append(target=subdirectory)", "def create_files(self):\n self._do_action_under_lock(self._create_files)", "def _save_sync_list(self):\n\t\tfp = open(self.sync_file, 'w')\n\t\tself.sync_list.write(fp)\n\t\tfp.close()", "def _find_locked_by(self):\n fstat_flags = NTR('otherLock | otherOpen0 & headType=*+l')\n any_locked_files = {} # depot_path : user\n for branch_chunk in self.ctx.iter_writable_branch_chunks():\n # Skip any newly defined branches: they're new, won't contain any\n # files yet, and won't get a view definition until later at per-\n # commit preflight time.\n bvl = [b for b in branch_chunk if b.view_lines]\n if not bvl:\n continue\n with self.ctx.switched_to_union(bvl):\n r = self.ctx.p4run('fstat', '-F', fstat_flags, '-m1',\n '//{}/...'.format(self.ctx.p4.client),\n log_warnings=logging.DEBUG)\n # Collect a dictionary of the locked files from the writable union of branch views\n for lf in r:\n user = lf['otherOpen'][0] if 'otherOpen' in lf else NTR('<unknown>')\n any_locked_files[lf['depotFile']] = user\n return any_locked_files", "def add_list(self, files):\n if files:\n if not list:\n self.set_list(files)\n else:\n self.playlist.extend(files)", "def cmd_sync(self, args, list_only=False):\n if not list_only:\n log.info('synchronizing repository files...')\n for curdir, dirs, files in os.walk(self.files_path):\n for f in files:\n ignore_file = False\n repo_path = os.path.join(curdir, f).replace(self.files_path, '')\n for ignored in self.ignored_files:\n if ignored.startswith('/'):\n f = os.path.join(repo_path, f)\n if fnmatch(f, ignored):\n log.debug('ignored file ({}): {}'.format(ignored, repo_path[1:]))\n ignore_file = True\n break\n if ignore_file:\n continue\n fpath = os.path.join(curdir, f)\n linkpath = fpath.replace(self.files_path, self.homedir)\n if not os.path.exists(linkpath) and not os.path.islink(linkpath):\n log.info('synced: {}'.format(linkpath))\n if not list_only:\n log.debug('creating link: {}'.format(linkpath))\n os.symlink(fpath, linkpath)\n else:\n if os.path.islink(linkpath):\n # target path already exists\n frealpath = os.path.realpath(linkpath)\n if frealpath != fpath:\n log.warning('conflict (wrong link): {} -> {}'.format(linkpath, frealpath))\n if not list_only:\n if not args.force:\n if not log.ask_yesno('overwrite existing link?', default='n'):\n continue\n log.debug('installing link in place of existing link: {}'.format(linkpath))\n os.unlink(linkpath)\n os.symlink(fpath, linkpath)\n else:\n log.info('OK: {}'.format(linkpath))\n else: # linkpath is a regular file\n log.warning('conflict (file already exists): {}'.format(linkpath))\n if not list_only:\n if not args.force:\n if not log.ask_yesno('overwrite existing file?', default='n'):\n continue\n log.debug('installing link in place of existing file: {}'.format(linkpath))\n os.unlink(linkpath)\n os.symlink(fpath, linkpath)\n log.info('done')", "def loadFileList(self):\r\n try:\r\n data = open(self.filelist_file, 'rb')\r\n except IOError:\r\n '''print \"No SRTM cached file list. Creating new one!\"'''\r\n if self.offline == 0:\r\n self.createFileList()\r\n return\r\n try:\r\n self.filelist = pickle.load(data)\r\n except:\r\n '''print \"Unknown error loading cached SRTM file list. Creating new one!\"'''\r\n if self.offline == 0:\r\n self.createFileList()", "def add_files(self, filenames):\n for filename in filenames:\n self.add_file(filename)", "def half_sync(self,delay):\n self.count = 1\n while not self.shutdown and self.loggedin.autosync:\n time.sleep(delay)\n self.count += 1\n self.filelist = self.loggedin.list()\n print \"Pinged server for changes\"\n self.synced = []\n if self.filelist:\n for f in self.filelist:\n path = self.loggedin.sanitize_path(f['path'])\n path = os.path.join(self.onedirrectory, path)\n if not os.path.exists(path):\n os.makedirs(path)\n if f['name'] and not self.loggedin.exists(f):\n exists, data = self.loggedin.getfile(f)\n if exists:\n with open(self.loggedin.make_path(f), 'a') as new_file:\n new_file.write(data)\n new_file.close()\n elif f['name'] and str(self.loggedin.hash_file(f)) != str(f['hash']):\n self.loggedin.sendfile(f['name'], f['path'])\n if self.loggedin.make_path(f) not in self.synced:\n self.synced.append(self.loggedin.make_path(f))\n os_walk = os.walk(self.loggedin.onedirrectory)\n for directory in os_walk:\n for f in directory[2]:\n if f.startswith('.'):\n continue\n path = os.path.join(directory[0], f)\n if path not in self.synced:\n try:\n os.remove(path)\n except OSError, e:\n print (\"Error: %s - %s.\" % (e.filename,e.strerror))", "def ingest(self, files):\n for file in files:\n self.files.add(file)", "def updateFileList(self, fileList):\n\n if fileList == self.fileList:\n return 0\n\n self.mutex.acquire()\n # init = time.time()\n # \n # while(self.bussy):\n # sleep(0.1)\n # if time.time() - init > 2*self.period:\n # return 0\n \n self.fileList = fileList\n self.mutex.release()\n return 1", "def update_list(self):\n\t\ttry:\n\t\t\tassert(not self.master.TransactionInProgress)\n\t\t\tself.master.Vacuum()\n\n\t\t\tself.fetch_repo_file(\"/torrent\", self.config[\"daemon\"][\"rootdir\"] + \"/torrent\", \"wb\")\n\t\t\tself.master.master = json.loads(self.fetch_repo_file(\"/package-index.json\", True).decode('utf-8'))\n\t\t\tself.torrent_info = lt.torrent_info(self.config[\"daemon\"][\"rootdir\"] + \"/torrent\")\n\n\t\t\t\"\"\" Find pre-downloaded files \"\"\"\n\t\t\tpre_downloaded = {}\n\t\t\ti = 0\n\t\t\tfor f in self.torrent_info.files():\n\t\t\t\tif self.valid_tpkg_file(f.path):\n\t\t\t\t\tpre_downloaded[i] = f\n\t\t\t\ti += 1\n\n\n\t\t\t\"\"\" Default torrent params \"\"\"\n\t\t\tparams = {\n\t\t\t\t\"save_path\": self.config[\"daemon\"][\"rootdir\"],\n\t\t\t\t\"ti\": self.torrent_info\n\t\t\t}\n\t\t\t\n\t\t\t\"\"\" Set torrent handler \"\"\"\n\t\t\tself.handler = self.ses.add_torrent(params)\n\n\t\t\t\"\"\" Set chunk priority to 0 (don't download) \"\"\"\n\t\t\tfor p in range(self.torrent_info.num_pieces()):\n\t\t\t\tself.handler.piece_priority(p, 0)\n\n\t\t\tfor i in self.torrent_info.files():\n\t\t\t\tif i in pre_downloaded:\n\t\t\t\t\tpr = self.torrent_info.map_file(i, 0, pre_downloaded[i].size)\n\t\t\t\t\tn_pieces = pr.length / self.torrent_info.piece_length() + 1\n\n\t\t\t\t\tfor p in range(self.torrent_info.num_pieces()):\n\t\t\t\t\t\tif p in range(pr.piece, pr.piece + n_pieces):\n\t\t\t\t\t\t\tself.handler.piece_priority(p, 7)\n\n\t\texcept Exception as e:\n\t\t\tsys.stderr.write(\"Failed to update package list: {0}\\n\".format(e))\n\t\t\ttraceback.print_exc()\n\t\t\tself.write_line(\"Error: XXX - Failed to update package list.\")", "def _add_files(self, index_key, media_key,\n new_list, fundamentals):\n _index=fundamentals.get(index_key, {})\n _media=fundamentals.get(media_key, {})\n for _file in new_list:\n _data=self._item_from_index(_file, 'data', _media)\n if not _data:\n self.log('Failed to write file %s due to no data'%_file)\n continue\n if self._item_from_index(_file, None, _index) is None:\n _origin=self._item_from_index(_file, 'origin', _media)\n if _origin=='ringers':\n _path=self.protocolclass.RT_PATH\n elif _origin=='sounds':\n _path=self.protocolclass.SND_PATH\n elif _origin=='images':\n _path=self.protocolclass.PIC_PATH\n else:\n selg.log('File %s has unknown origin, skip!'%_file)\n continue\n _file_name=_path+'/'+_file\n try:\n self.writefile(_file_name, _data)\n except:\n self.log('Failed to write file '+_file_name)\n if __debug__:\n raise", "def __add_files(self, snapshot):\n\n # Why partition()?\n # Don't delete a parent after adding its child:\n # M 100644 deba01f cookbooks/apt/README\n # D cookbooks/apt <== BUG, would also delete/omit README\n\n partitioned = p4gf_util.partition(lambda x:x.is_delete(), snapshot)\n for p4file in partitioned:\n path = self.__relative_path(p4file)\n if not path:\n continue\n if path == p4gf_const.P4GF_EMPTY_CHANGELIST_PLACEHOLDER:\n # Perforce-only artifact. Never copy this into Git.\n continue\n if p4file.is_delete():\n self.__append(\"D {0}\\n\".format(path))\n else:\n if p4file.sha1 == \"\":\n LOG.debug(\"skipping missing revision {}#{}\".format(path, p4file.revision))\n continue\n if p4file.is_x_type():\n mode = \"100755\"\n elif p4file.is_symlink():\n mode = \"120000\"\n else:\n mode = \"100644\"\n self.__append(\"M {0} {1} {2}\\n\".\n format(mode, p4file.sha1, path))", "def addFiles(self, filePaths): \n \n for filePath in filePaths: \n self.addFile(filePath)", "def add(self,filelist):\n\n self.ws.execute('svn add %s' % (' '.join(filelist)))", "def add_files(self,count=None):\n message_buffer =[]\n if count is None:\n count = len(self.files)\n while count:\n count -= 1\n message_buffer.append((count,base64.b64encode(self.files.pop()),0)) # required to maintain compatibility with\n if len(message_buffer) > 9:\n self.queue.write_batch(message_buffer)\n message_buffer = []\n self.queue.write_batch(message_buffer)", "def write_manifests( file_lists, target_dir, output_dir ):\n for i, lst in enumerate( file_lists ):\n with open( os.path.join( output_dir, \"manifest-{}.txt\".format( i ) ), \"w\" ) as fout:\n for r in lst:\n fout.write( insert_rsync_marker( r, target_dir ) + \"\\n\" )", "def sync_dir(self):\n\n # mark the trajectories that we have seen\n trajectories = os.listdir(self.trajectory_dir)\n \n for trajectory_file in trajectories:\n\n if trajectory_file not in self.seen_trajectories:\n\n created = self.upload_trajectory(trajectory_file)\n self.seen_trajectories.add(trajectory_file)\n\n if created is True:\n print \"Total of %s solved trajectories\" % \\\n SolvedTrajectory.objects.count(), created", "def add_files(self, paths):\n for path in paths:\n self.add_file(path)", "def update_scores_list(list_file, score_file):\n fnames = []\n head, tail = os.path.split(score_file)\n if os.path.exists(list_file):\n with open(list_file, \"r\") as f:\n fnames = json.loads(f.read())\n if tail not in fnames:\n fnames.append(tail)\n fnames.sort()\n fnames.reverse()\n else:\n fnames.append(tail)\n\n with open(list_file, \"w\") as f:\n print \"writing %s...\" % list_file\n f.write(json.dumps(fnames))", "def _populate_index(self):\n os.makedirs(self.cache_dir, exist_ok=True)\n local_files = glob('{}/*'.format(self.cache_dir))\n for file in local_files:\n self._add_to_index(os.path.basename(file), os.path.getsize(file))", "def test_multiple_file_locks(tmp_path, monkeypatch):\n monkeypatch.setenv(\"RAY_TMPDIR\", str(tmp_path))\n with TempFileLock(path=\"abc.txt\"):\n with TempFileLock(path=\"subdir/abc.txt\"):\n assert RAY_LOCKFILE_DIR in os.listdir(tmp_path)\n # We should have 2 locks, one for abc.txt and one for subdir/abc.txt\n assert len(os.listdir(tmp_path / RAY_LOCKFILE_DIR)) == 2", "def get_locker_room_assgnmnt_files_list():\n files = os.listdir(lrs_path)\n files.sort(reverse=True) # Sort list newest to oldest\n return files", "def SetLocks(self):\n\t\tthisRun = self.RunFlag\n\t\tself.CPUTempLock = FileLock(self.BuildFilePath(thisRun, \"cpu.png\"))\n\t\tself.MEMTempLock = FileLock(self.BuildFilePath(thisRun, \"mem.png\"))\n\t\tself.SWPTempLock = FileLock(self.BuildFilePath(thisRun, \"swp.png\"))\n\t\tself.LATTempLock = FileLock(self.BuildFilePath(thisRun, \"lat.png\"))\n\t\tself.LEGTempLock = FileLock(self.BuildFilePath(thisRun, \"legenda.png\"))\n\t\tself.CPUStatLock = FileLock(self.BuildFilePath(thisRun, \"cpu_stat.png\"))\n\t\tself.MEMStatLock = FileLock(self.BuildFilePath(thisRun, \"mem_stat.png\"))\n\t\tself.SWPStatLock = FileLock(self.BuildFilePath(thisRun, \"swp_stat.png\"))\n\t\tself.LATStatLock = FileLock(self.BuildFilePath(thisRun, \"lat_stat.png\"))", "def get_hash_curr_files(self):\n temp = None\n for f in self.file_list:\n if not os.stat(f).st_size:\n self.print_to_log('Skipping Zero Length File: ' + f)\n else:\n try:\n\n batch_file = open(f,'U')\n time_stamp = self.get_timestamp()\n temp = ['pass',\n time_stamp,\n self.get_hash(batch_file),\n '1',\n time_stamp,\n batch_file.name[batch_file.name.rfind('\\\\') + 1 :]]\n\n batch_file.close()\n self.hash_curr_files[temp[2]] = temp\n self.print_to_log(\"successfully hashed file: \" + temp[5])\n except IOError:\n self.print_to_log('Cannot Open File: ' + f)\n except:\n self.print_to_log('Unknown Error, Exiting')\n raise", "def update(self):\n if os.path.isdir(self.full_path):\n self.file_list = os.listdir(self.full_path)\n else:\n self.file_list = []", "def append_archive_to_already_processed_list(config: Config, file_name: str) -> None:\n with open(config.already_processed, 'a') as f:\n f.write(f\"{file_name}\\n\")", "def addfiles(self, filelist):\r\n for tmpc in filelist:\r\n self._filelist.append(tmpc)\r\n tmp_energy=self.readenergy(filelist)\r\n for tmpdat in tmp_energy:\r\n self._energy.append(tmpdat)\r\n return tmp_energy", "def _set_locks(locks):\n _local.entry_transaction = locks", "def copy_unmanaged_files(self, reader):\n for entry in reader.index:\n if (\n entry.path == \"l10n.toml\"\n or entry.path.startswith(\"templates/\")\n or entry.path.startswith(\"locales/\")\n ):\n continue\n\n self.index.add(entry)", "def updateDiskFileList(self):\n\n if self.m_curPath:\n # Get me just the files please.\n for _, _, files in os.walk(self.m_curPath):\n break\n else:\n files = []\n\n files.sort()\n if files != self.m_diskNames:\n self.m_diskNames[:] = files\n self.m_newNames[:] = []\n\n self.populateFileList()", "def _expireReadLocks(self):\n readLockFileName = ReadLock.fileName\n for name in os.listdir(self.dir):\n if not name.startswith(readLockFileName):\n continue\n LockDir = os.path.join(self.dir, name)\n ExclusiveLock(LockDir, self.readlocktimeout).expire()", "def handleFileNames(self):\n \n # expand the wild cards - but do not create the full directory path\n # as the work sub directories have yet to be created.\n if not os.path.exists(self.shareArea):\n m = 'Cannot set self.auxfiles due to non-existent share directory: %s' % self.shareArea\n self.logger.fatal(m)\n raise RTTCodingError(m)\n\n # resolve auxFile patterns to file names\n auxFiles = []\n for pattern in self.auxFilePatterns:\n base, fnpattern = os.path.split(pattern)\n srcDir = os.path.normpath(os.path.join(self.shareArea, base))\n filesInShare = os.listdir(srcDir)\n auxFiles.extend([os.path.join(base,file) for file in filesInShare if fnmatch.fnmatch(file, fnpattern)])\n\n self.auxFiles = unique(auxFiles)", "def _lock_all_partitions(self, shared=False):\n pass", "def add_files(*files):\n if _files is None:\n return\n\n for file in files:\n _files.add(file)", "def updateCache(self):\n for root, dirs, files in os.walk(cachedFilesPath):\n for file in files:\n if file.endswith(cachedFileExtensionSuffix):\n path = os.getcwd()+'/'+cachedFilesPath+file\n with open(path, mode='r') as f:\n payload_json = f.read()\n payload_obj=jsonpickle.decode(payload_json)\n r= self.upload(payload_obj)\n if isinstance(r, types.NoneType):\n #do nothing\n print(\"\")\n else:\n if r.status_code == 200 :\n #uploaded!\n if cacheArhive:\n #move it to archive\n dst=os.getcwd()+'/'+cachedArchivePath+file\n shutil.move(path, dst)\n print(\"archived log: \", file)\n else:\n #delete it\n os.remove(path)", "def _add_paths_to_queue(self, file_paths_to_enqueue: list[str], add_at_front: bool):\n new_file_paths = list(p for p in file_paths_to_enqueue if p not in self._file_path_queue)\n if add_at_front:\n self._file_path_queue.extendleft(new_file_paths)\n else:\n self._file_path_queue.extend(new_file_paths)\n Stats.gauge(\"dag_processing.file_path_queue_size\", len(self._file_path_queue))", "def _update_cfg_from_files(self, files):\n\t\tfor file in files:\n\t\t\twith open(self.SettingsFolder + file) as f:\n\t\t\t\tself._add_cfg_to_list(file[:-4], yaml.load(f))", "def resortFiles(fileList):\n if fileList is None or not len(fileList):\n print \"SRT:nofiles in the dictionary.\"\n sys.exit()\n\n new_file_list = list()\n for f in fileList:\n new_file_list.append(PFileStat(dir_source, f, os.lstat(dir_source + \"/\" + f)))\n\n new_file_list.sort(key=lambda i: i.st_mtime)\n return new_file_list", "def create_modad_lock(self):\n\n Logger.create_lock_file()\n\n with open(\"modad.lock\", \"w\") as file:\n file.write(json.dumps(self.commit_hashes))", "def load_installed_file_list(self):\n listpath = os.path.join(self._build_root, 'src', 'gromacs', 'installed-headers.txt')\n with open(listpath, 'r') as installedfp:\n for line in installedfp:\n path = line.strip()\n if not os.path.isabs(path):\n self._reporter.input_error(\n \"installed file not specified with absolute path: {0}\"\n .format(path))\n continue\n relpath = self._get_rel_path(path)\n if relpath not in self._files:\n self._reporter.input_error(\n \"installed file not in source tree: {0}\".format(path))\n continue\n self._files[relpath].set_installed()", "def insertfiles(self, pos, filelist):\r\n for i in range(0, len(filelist)):\r\n self._filelist.insert(pos+i, filelist[i])\r\n tmp_energy=self.readenergy(filelist)\r\n for i in range(0, len(tmp_energy)):\r\n self._energy.insert(pos+i, tmp_energy[i])\r\n return tmp_energy", "def sync_all_lists(self):\r\n print(\"Started syncing influencer master lists with DB\")\r\n screen_names_on_lists = []\r\n self._add_or_update(screen_names_on_lists)\r\n print(\"Removing entries which are no longer on any list\")\r\n self._delete_entries_not_in_list(screen_names_on_lists) # remove entries from DB if they are on no list\r\n print(\"Sync complete\")", "def AddInitiators(self, initiatorList):\n # Append the IQNs to the existing list\n full_iqn_list = self.initiators\n for iqn in initiatorList:\n if iqn.lower() in full_iqn_list:\n mylog.debug(iqn + \" is already in group \" + self.name)\n else:\n full_iqn_list.append(iqn)\n\n # Modify the VAG on the cluster\n params = {}\n params[\"volumeAccessGroupID\"] = self.ID\n params[\"initiators\"] = full_iqn_list\n libsf.CallApiMethod(self.mvip, self.username, self.password, \"ModifyVolumeAccessGroup\", params, ApiVersion=5.0)", "def add(self, transfer):\n assert transfer.lock.hashlock not in self.locked\n self.locked[transfer.lock.hashlock] = transfer\n self._cached_lock_hashes.append(sha3(transfer.lock.as_bytes))\n self._cached_root = None", "def add(args):\n files = []\n for path in args.files:\n if os.path.isdir(path):\n ft = filetree(path)\n files.extend(ft.filelist())\n else:\n files.append(path)\n for path in files:\n path = os.path.normpath(os.path.relpath(path, args.base))\n if path not in args.cache:\n args.cache.append(path)\n args.update = True\n return", "def update_my_ownership(self, add_comp_list):\n all_transfer_event_received = []\n transfer_comp_event = self.msg.get_transfer_cmp_event()\n acc_updater_map_version = self.msg.get_acc_updater_map_version()\n self.msg.load_gl_map()\n while float_comp(acc_updater_map_version, \\\n self.msg.get_acc_updater_map_version()) >= 0:\n time.sleep(20)\n self.logger.info(\"Account updater map version is not updated old\"\\\n \"map version:%s, new map version:%s\" %(acc_updater_map_version,\\\n self.msg.get_acc_updater_map_version()))\n self.msg.load_gl_map()\n\n transfer_comp_event.set()\n self.msg.load_ownership()\n self.updated_comp_list = self.msg.get_ownershipList()\n while not self.check_ownership_updated(add_comp_list):\n time.sleep(20)\n self.msg.load_ownership()\n self.updated_comp_list = self.msg.get_ownershipList()\n\n self.logger.info(\"Updating ownership :%s\" %self.updated_comp_list)\n self.msg.set_ownershipList(self.updated_comp_list)\n\n while len(all_transfer_event_received) != 4:\n all_transfer_event_received.append(self.msg.get_from_Queue())\n transfer_comp_event.clear()\n self.logger.info(\"transfer/accept component event is cleared\")\n add_comp_list = []", "def sync():\n for filename, requirements in _sync():\n _write_requirements(filename, requirements)", "def add_mock_dirs(self, dir_list):\n self._mock_dir_list.extend(dir_list)", "def whitelist_file(self, fkey):\n self.whitelist.update([fkey])", "def localfiles_for_update(self, localfiles, obsfiles):\n upload_local_files = []\n obs_dict = {}\n for key, mtime, size in obsfiles:\n obs_dict[key.strip('/')] = mtime\n\n for localfile in localfiles:\n filepath, key = localfile\n fullkey = key + '/' + os.path.basename(filepath)\n fullkey = fullkey.strip('/')\n if fullkey in obs_dict.keys():\n localfile_timestamp = os.path.getmtime(filepath)\n obsfile_timestamp = time.mktime(time.strptime(obs_dict[fullkey], \"%Y/%m/%d %H:%M:%S\"))\n\n if localfile_timestamp > obsfile_timestamp:\n upload_local_files.append(localfile)\n else:\n upload_local_files.append(localfile)\n return upload_local_files", "def svn_fs_access_add_lock_token(*args):\r\n return _fs.svn_fs_access_add_lock_token(*args)", "def files(self, only_unlocked=False):\n # pylint: disable=no-member\n xdd = builtins.__xonsh_env__.get('XONSH_DATA_DIR')\n xdd = expanduser_abs_path(xdd)\n\n fs = [f for f in glob.iglob(os.path.join(xdd, 'xonsh-*.json'))]\n files = []\n for f in fs:\n try:\n lj = LazyJSON(f, reopen=False)\n if only_unlocked and lj['locked']:\n continue\n # info: closing timestamp, number of commands, filename\n files.append((lj['ts'][1] or time.time(),\n len(lj.sizes['cmds']) - 1,\n f))\n lj.close()\n except (IOError, OSError, ValueError):\n continue\n files.sort()\n return files", "def addMasters(self,masterNames):\n #--Load Masters\n #--Master FileRefs\n proItems = []\n totSize = 0\n for masterName in masterNames:\n #--Don't have fileRef? FileRef out of date?\n masterInfo = modInfos[masterName]\n fileRefs = masterInfo.extras.get('FileRefs')\n if not fileRefs:\n fileRefs = masterInfo.extras['FileRefs'] = FileRefs(masterInfo,True,True)\n fileRefs.setDebrisIds()\n refreshSize = fileRefs.refreshSize()\n if refreshSize:\n proItems.append((fileRefs,refreshSize))\n totSize += refreshSize\n #--Refresh masters\n cumSize = 0\n for (fileRefs,size) in proItems:\n self.progress.setBaseScale(1.0*cumSize/totSize, 1.0*size/totSize)\n fileRefs.progress = self.progress\n fileRefs.refresh()\n cumSize += size\n #--Do Mapping\n del proItems[:]\n totSize = 0\n for masterName in masterNames:\n size = len(modInfos[masterName].extras['FileRefs'].cells)\n proItems.append((masterName,size))\n totSize += size\n cumSize = 0\n for (masterName,size) in proItems:\n if size: self.progress.setBaseScale(1.0*cumSize/totSize, 1.0*size/totSize)\n self.addMaster(masterName)\n cumSize += size", "def set_in_files():\r\n\tindatadir = '/nobackup/ejblom/reddit'\r\n\tcom_dir = '/comments'\r\n\tsubm_dir = '/submissions'\r\n\tglob_end = '/filtered*'\r\n\tcom_glob_str = indatadir + com_dir + glob_end\r\n\tsubm_glob_str = indatadir + subm_dir + glob_end\r\n\tinfilenames = sorted(glob.glob(com_glob_str)) + sorted(glob.glob(subm_glob_str))\r\n\treturn infilenames", "def add_files_and_directories_rename(self, **kwargs):\n self.rename_files_or_directories_objects.append(\n RenameFilesOrDirectories(\n root_directory=self.root_directory,\n exclude_directories=self.exclude_directories,\n exclude_files=self.exclude_files,\n **kwargs\n )\n )", "def synchronize(self):\n self.increment_pc()\n shares = [self._exchange_shares(player, GF256(0))\n for player in self.players]\n result = gather_shares(shares)\n result.addCallback(lambda _: None)\n return result", "def wingrep(self):\n for folder, files_ in self.walk():\n listed_files = self.list_appro_files(folder, files_)\n for file_o in self.open_files(listed_files=listed_files):\n self.search_in(file_o)", "def defineMergeJobs(self, mergeableFiles):\n mergeJobFileSize = 0\n mergeJobEvents = 0\n mergeJobFiles = []\n earliestInsert = 999999999999999\n\n mergeableFiles.sort(key=cmp_to_key(fileCompare))\n\n for mergeableFile in mergeableFiles:\n if mergeableFile[\"file_size\"] > self.maxMergeSize or \\\n mergeableFile[\"file_events\"] > self.maxMergeEvents:\n self.createMergeJob([mergeableFile])\n continue\n elif mergeableFile[\"file_size\"] + mergeJobFileSize > self.maxMergeSize or \\\n mergeableFile[\"file_events\"] + mergeJobEvents > self.maxMergeEvents:\n if mergeJobFileSize > self.minMergeSize or \\\n self.forceMerge == True or \\\n time.time() - mergeableFile['insert_time'] > self.maxWaitTime:\n self.createMergeJob(mergeJobFiles)\n mergeJobFileSize = 0\n mergeJobEvents = 0\n mergeJobFiles = []\n else:\n continue\n\n mergeJobFiles.append(mergeableFile)\n mergeJobFileSize += mergeableFile[\"file_size\"]\n mergeJobEvents += mergeableFile[\"file_events\"]\n if mergeableFile['insert_time'] < earliestInsert:\n earliestInsert = mergeableFile['insert_time']\n\n if mergeJobFileSize > self.minMergeSize or self.forceMerge == True or \\\n time.time() - earliestInsert > self.maxWaitTime:\n if len(mergeJobFiles) > 0:\n self.createMergeJob(mergeJobFiles)\n\n return", "def populateFileList(self):\n\n self.m_fileList.SetForegroundColour(wx.NullColour)\n\n # We'll need to track which file names are modified and which\n # file names duped.\n applicable, dupes = set(), set()\n\n if not self.m_validPatterns:\n # Regex's don't compile yet, just use the raw filename list.\n newNames = self.m_diskNames\n\n else:\n # Apply the substitution to the filename list to produce a\n # destination-name list, and identify whether the patterns\n # actually affect anything.\n #\n newNames, modifiedIndexes = [], []\n\n matcher = re.compile(self.m_reFromCtl.Value).subn\n subs = self.m_reToCtl.Value\n\n for filename in self.m_diskNames:\n # Perform the sub\n (filename, numChanges) = matcher(subs, filename)\n\n # Was there a modification?\n if numChanges:\n # Record the affected name.\n applicable.add(filename)\n if filename in newNames:\n dupes.add(filename)\n\n # Add to the primary list\n newNames.append(filename)\n\n # Does this produce a different list than we already had? If so,\n # clear the file list and replace it with the new one.\n #\n if newNames != self.m_newNames:\n\n self.m_fileList.Clear()\n\n # Figure out the longest name so we can create a cleanly-formatted\n # set of prefix/suffix characters for the modified/duped annotation.\n #\n maxLen = max(map(len, newNames))\n decorate = '{m} {fn:<{ml}} {m}'.format\n\n # Now build a list of display elements.\n for filename in newNames:\n mark = ' ' if filename not in applicable else '|'\n if filename in dupes:\n mark = '*'\n self.m_fileList.Append(decorate(m=mark, fn=filename, ml=maxLen))\n\n # Keep the list.\n self.m_newNames[:] = newNames\n\n # Update the apply button, we only want it enabled when the user\n # has a valid set of patterns that affect any files and have no\n # dupes produced as a result.\n #\n self.m_applyBtn.Enabled = bool(applicable) and not dupes\n\n if dupes:\n # Emphasize the presence of dupes.\n self.m_fileList.SetForegroundColour(wx.RED)\n\n # Draw the list.\n self.m_fileList.Refresh()", "def filter_filelist(files: list, hour_mod: int = 12, min_mod: int = 60) -> list:\n files_restricted = []\n if hour_mod == 0 and min_mod == 0:\n files_restricted.append(sorted(files)[-1])\n else:\n for file in files:\n hour = int(file.split(\"_\")[3][8:10])\n minute = int(file.split(\"_\")[3][10:12])\n if hour % hour_mod == 0 and minute % min_mod == 0:\n files_restricted.append(file)\n logging.debug(f'Remote file added: {file}')\n else:\n logging.debug(f'Remote file ignored: {file}')\n logging.info('Files to be downloaded has been reduced from {} to {}'.format(len(files), len(files_restricted)))\n return files_restricted", "def git_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None\n for dirpath, _dirnames, filenames in os.walk(os.path.join(data_context().content.root, '.git')):\n paths = [os.path.join(dirpath, filename) for filename in filenames]\n files.extend((path, os.path.relpath(path, data_context().content.root)) for path in paths)", "def _haveReadLocks(self): \n readLockFileName = ReadLock.fileName\n for name in os.listdir(self.dir):\n if name.startswith(readLockFileName):\n return True\n return False", "def merge_files():\n # abs path of data folder\n work_folder = os.path.join(CURRENT_FOLDER, \"..\\\\Data\\\\weather_data\\\\KORD\")\n file_list = os.listdir(work_folder)\n with open(os.path.join(work_folder, \"..\\\\merged_history_KORD.csv\"), \"w\") as outfile:\n for line in open(os.path.join(work_folder, file_list[0])):\n outfile.write(line)\n print \"write the first line\"\n for i in range(1, len(file_list)):\n with open(os.path.join(work_folder, file_list[i])) as infile:\n infile.next()\n for line in infile:\n outfile.write(line)", "def write_cache_file_list(self, file_list=None):\n if file_list:\n self.add_filelist_to_cache(file_list)\n cache_list = []\n for finfo in self.cache_file_list_dict.values():\n cache_list.append(finfo.output_cache_tuple())\n return self.write_pickle_object_to_file(tuple(cache_list))", "def setfiles(self, filelist):\r\n self._filelist=filelist\r\n self._energy=self.readenergy(filelist)", "def upload(self, folder_list, files):\n current_folder_id = self.top_folder_id\n for fname in folder_list:\n current_folder_id = self._fetch_or_create_folder(fname, current_folder_id)\n for file in files:\n self._upload_detail(file, current_folder_id)", "def main(root, filelist):\n #print \"got %s: %s\" % (root, filelist)\n rename(root, filelist)", "def getLockInfoOfNonDerivedFiles(self, ids, wspLockId):\n sql = \"\"\"\n SELECT cdb_file.cdb_lock,\n cdb_file.cdb_lock_id,\n cdb_file.cdbf_object_id,\n cdb_file.cdb_object_id,\n angestellter.name AS mapped_cdb_lock_name\n FROM\n cdb_file\n LEFT JOIN\n angestellter\n ON\n cdb_file.cdb_lock = angestellter.personalnummer\n WHERE\n cdb_file.cdb_classname = 'cdb_file'\n AND (cdb_file.cdb_belongsto='' OR cdb_file.cdb_belongsto IS NULL)\n \"\"\"\n records = partionedSqlQuery(sql, \"cdb_file.cdbf_object_id\", ids)\n res = defaultdict(dict)\n for r in records:\n status = u\"not\"\n lockerName = u\"\"\n locker = r.cdb_lock\n if locker:\n lockerName = r.mapped_cdb_lock_name\n if lockerName is None:\n misc.cdblogv(misc.kLogMsg, 0,\n \"WsObjectCache, warning: file '%s' of document '%s' is locked\"\n \" by unknown user '%s' (no matching name in 'angestellter')\"\n % (r.cdb_object_id, r.cdbf_object_id, locker))\n lockerName = u\"\"\n if locker == auth.persno:\n status = u\"self\"\n lockId = r.cdb_lock_id\n if lockId and wspLockId:\n if lockId != wspLockId:\n status = u\"other_ws\"\n else:\n status = u\"other\"\n res[r.cdbf_object_id][r.cdb_object_id] = {'status': status, 'locker': lockerName}\n return res", "def sync():\n _ownered_project = []\n _tmp_project_list = get_user_repo_list(current_user.username)\n if _tmp_project_list:\n for project in _tmp_project_list:\n _ownered_project.append((project, project))\n # Add upperstream_repo\n upperstream_repo = get_upperstream_repo(project)\n if upperstream_repo is not None:\n _ownered_project.append((upperstream_repo, upperstream_repo + \"(Upperstream of %s)\" % project))\n\n User.objects(username=current_user.username).update_one(set__owned_repo_sync_time=datetime.utcnow())\n\n # mongoDB don't support key value contains '.'\n for i in range(len(_ownered_project)):\n _ownered_project[i] = (_ownered_project[i][0].replace('.', '[dot]'), _ownered_project[i][1])\n User.objects(username=current_user.username).update_one(set__owned_repo=dict(_ownered_project))\n\n flash('Refresh your own GitHub repositories list successfully!', 'success')\n return redirect(url_for('main.load_from_github'))", "def _rnlst(self, path, filelist):\n path = self._cleanpath(path)\n dirdict = self.parsedir(path)\n print(dirdict)\n \n trycwds = dirdict.get('trycwds', [])\n names = dirdict.get('names', [])\n \n for trycwd, name in zip(trycwds, names): \n if trycwd: # name is a directory\n self._rnlst(self.remotepathsep.join([path, name]), filelist)\n else: \n filelist.append(self.remotepathsep.join([path, name]))\n \n return filelist", "def sync_ready_files(self, sync_config=None):\n\n if sync_config is None:\n sync_config = config.getSettingJson(config.CONSENT_SYNC_BUCKETS)\n\n hpos_sync_config = sync_config['hpos']\n orgs_sync_config = sync_config['orgs']\n\n filters = {\n hpo_name: {\n 'exclude_types': [\n ConsentType(excluded_type_str) for excluded_type_str in options['exclude_types']\n ]\n }\n for hpo_name, options in hpos_sync_config.items()\n if 'exclude_types' in options\n }\n file_list: List[ConsentFile] = self.consent_dao.get_files_ready_to_sync(\n hpo_names=hpos_sync_config.keys(),\n org_names=orgs_sync_config.keys(),\n additional_filters=filters\n )\n\n pairing_info_map = self._build_participant_pairing_map(file_list)\n\n # Build out a FileSync for each possible PairingInfo\n sync_pairing_map: Dict[ParticipantPairingInfo, BaseFileSync] = {}\n for pairing_info in pairing_info_map.values():\n if pairing_info not in sync_pairing_map:\n org_config = orgs_sync_config.get(pairing_info.org_name)\n if org_config:\n config_data = org_config\n else:\n config_data = hpos_sync_config.get(pairing_info.hpo_name)\n\n if not config_data:\n # No need to build sync handlers for anything not in the config\n continue\n\n sync_pairing_map[pairing_info] = self._build_sync_handler(\n zip_files=config_data['zip_consents'],\n bucket=config_data['bucket'],\n pairing_info=pairing_info\n )\n\n for file in file_list:\n pairing_info = pairing_info_map.get(file.participant_id, None)\n if not pairing_info:\n # Skip files for unpaired participants\n continue\n\n # Retrieve the sync handler based on the pairing information\n file_group = sync_pairing_map.get(pairing_info)\n if not file_group:\n # Ignore participants paired to an org or hpo we aren't syncing files for\n continue\n\n file_group.files_to_sync.append(file)\n\n with self.consent_dao.session() as session:\n for file_group in sync_pairing_map.values():\n files_synced = file_group.sync_file_list()\n\n # Update the database after each group syncs so ones\n # that have succeeded so far get saved if a later one fails\n if len(files_synced):\n self.consent_dao.batch_update_consent_files(session=session, consent_files=files_synced)\n session.commit()\n\n # Queue tasks to rebuild consent metrics resource data records (for PDR)\n dispatch_rebuild_consent_metrics_tasks([file.id for file in files_synced])", "def files_distribute(self):\n self._post('files/distribute')", "def merge_lock(poetry: Poetry) -> None:\n lock_data = load(poetry.locker)\n save(poetry.locker, lock_data, poetry.package)", "def _replace_files(self, index_key, media_key, new_list, fundamentals):\n _index=fundamentals.get(index_key, {})\n _media=fundamentals.get(media_key, {})\n for _file in new_list:\n _data=self._item_from_index(_file, 'data', _media)\n if not _data:\n self.log('Failed to write file %s due to no data'%_file)\n continue\n _file_name=self._item_from_index(_file, 'filename', _index)\n if _file_name:\n _stat=self.statfile(_file_name)\n if _stat and _stat['size']!=len(_data):\n try:\n self.writefile(_file_name, _data)\n except:\n self.log('Failed to write BREW file '+_file_name)\n if __debug__:\n raise", "def blocking_transfer(self, filelist):\n miscutils.fwdebug_print(\"\\tNumber files to transfer: %d\" % len(filelist))\n if miscutils.fwdebug_check(1, \"ARCHIVETRANSFER_DEBUG\"):\n miscutils.fwdebug_print(\"\\tfilelist: %s\" % filelist)\n\n srcroot = self.src_archive_info['root']\n dstroot = self.dst_archive_info['root']\n\n files2copy = copy.deepcopy(filelist)\n for _, finfo in files2copy.items():\n finfo['src'] = '%s/%s' % (srcroot, finfo['src'])\n finfo['dst'] = '%s/%s' % (dstroot, finfo['dst'])\n\n transresults = disk_utils_local.copyfiles(files2copy, None)\n\n return transresults", "def _lock(self):\n self._lockFile = open(self._lockFilename, \"w\")\n self._lockFile.write(\"%d\" % (os.getpid()))\n self._lockFile.flush()", "def prepare_list_of_files(kernel_name, kernel_file_list, params, grid, threads, block_size_names):\n temp_files = dict()\n\n kernel_string = get_kernel_string(kernel_file_list[0], params)\n name, kernel_string = prepare_kernel_string(kernel_name, kernel_string, params, grid, threads, block_size_names)\n\n if len(kernel_file_list) > 1:\n for f in kernel_file_list[1:]:\n #generate temp filename with the same extension\n temp_file = get_temp_filename(suffix=\".\" + f.split(\".\")[-1])\n temp_files[f] = temp_file\n #add preprocessor statements to the additional file\n _, temp_file_string = prepare_kernel_string(kernel_name, get_kernel_string(f, params), params, grid, threads, block_size_names)\n write_file(temp_file, temp_file_string)\n #replace occurences of the additional file's name in the first kernel_string with the name of the temp file\n kernel_string = kernel_string.replace(f, temp_file)\n\n return name, kernel_string, temp_files", "def create_lock_file():\n\n print(\"Creating lock file\")", "def add_files_to_zip(\n file_list, common_root_directory, zip_handler, put_all_files_in_shared_root_dir\n):\n for file_path in file_list:\n rel_path = file_path\n if common_root_directory is not None:\n rel_path = os.path.relpath(file_path, common_root_directory)\n else:\n # If we don't have a common root dir then, on Windows, path will begin with drive letter\n # e.g. 'C:\\' - remove this for adding to the ZIP\n if platform.system() == \"Windows\":\n rel_path = rel_path.replace(\":\", \"\")\n try:\n if put_all_files_in_shared_root_dir and common_root_directory is not None:\n zip_handler.write(\n file_path,\n arcname=os.path.join(os.path.basename(common_root_directory), rel_path),\n )\n else:\n zip_handler.write(file_path, arcname=rel_path)\n except IOError:\n printer(\n \"'{}' no longer present in folder - zip creation aborted\".format(file_path),\n \"error\",\n True,\n )\n raise\n except OSError:\n printer(\"OSError on '{}' - zip creation aborted\".format(file_path), \"error\", True)\n raise", "def _add_manifest_files(zip_file, dir_name, payload_info_list, tag_info_list):\n for checksum_algorithm in _get_checksum_algorithm_set(payload_info_list):\n _add_tag_file(\n zip_file,\n dir_name,\n tag_info_list,\n _gen_manifest_file_tup(payload_info_list, checksum_algorithm),\n )", "def upload_files(self, files):\n\n for f in files:\n self.scp.put(f, recursive=True)", "def process_files(exp_folders):\n pool = mp.Pool()\n results = pool.imap_unordered(read_and_serialize, exp_folders)\n\n stat = []\n for res in results:\n print(res)\n stat.append(res)\n\n pool.close()\n pool.join()", "def syncfolder():", "def loadInputFiles(self):\n\t\tfor filename in self.input_filename_list:\n\t\t\tfor module in self.modules:\n\t\t\t\tmodule.Add(filename)", "def add_files(self):\n file_paths = tkinter.filedialog.askopenfilenames(parent=self)\n\n if not file_paths:\n return\n for file_path in file_paths:\n self.files_treeview.insert(\"\", \"end\", values=(file_path,))\n self.files_treeview.selection_set(self.files_treeview.get_children()[-1])", "def createFileList(self):\r\n if self.childFileListDownload is None or not self.childFileListDownload.is_alive():\r\n self.childFileListDownload = multiprocessing.Process(target=self.createFileListHTTP, args=(self.server, self.directory))\r\n self.childFileListDownload.start()" ]
[ "0.5858453", "0.5857905", "0.5442941", "0.5427439", "0.5369485", "0.534254", "0.5242692", "0.5233011", "0.52133423", "0.51889026", "0.51700175", "0.51625514", "0.5118121", "0.5110031", "0.50916535", "0.5077471", "0.5056525", "0.5047517", "0.50415224", "0.50407803", "0.50310415", "0.50032467", "0.49873388", "0.49867135", "0.49853197", "0.49367034", "0.49278733", "0.49048927", "0.48945892", "0.48774955", "0.48757705", "0.4868424", "0.48330647", "0.48270535", "0.48059884", "0.4800957", "0.47996476", "0.47986957", "0.47923377", "0.4784794", "0.4770063", "0.4757243", "0.4756429", "0.4740982", "0.4740402", "0.47184512", "0.47064188", "0.4701494", "0.46890834", "0.4686908", "0.46833098", "0.46705067", "0.46669143", "0.4660952", "0.46540475", "0.46505398", "0.4643907", "0.46393213", "0.46392995", "0.4629129", "0.46290788", "0.46219367", "0.4620757", "0.46154115", "0.46036127", "0.45879766", "0.4587104", "0.45858932", "0.45855638", "0.45670807", "0.4564829", "0.45554876", "0.45554143", "0.45544338", "0.45447424", "0.45409322", "0.45369944", "0.45357588", "0.4534035", "0.45301998", "0.45283294", "0.45270038", "0.45166636", "0.45147595", "0.4512996", "0.45063812", "0.45049688", "0.45010754", "0.45004067", "0.44998848", "0.4498591", "0.44961262", "0.44917104", "0.44913235", "0.44774613", "0.447644", "0.44691503", "0.44686478", "0.4458599", "0.44559982" ]
0.785469
0
If we don't define this, it will use the regular dictionary __iter__ which does not call SortedDictionary.keys().
def __iter__(self): for each in list(self.keys()): yield each
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __iter__(self):\n return self.ordered_keys.__iter__()", "def __iter__(self):\n return iter(self.keys())", "def __iter__(self):\n if self._len_keys == 1:\n yield from self._dict.keys()\n else:\n for key in self._dict.keys():\n yield tuple(sorted(list(key)))", "def __iter__(self):\n for x in sorted(self.keys()):\n yield self[x]", "def __iter__(self):\n for key in sorted(self.keys):\n yield key, self[key]", "def __iter__(self):\n # Return an iterator for the keys in the underlying dictionary.\n return iter(self.data)", "def __iter__(self) -> iter:\n return iter(self._dict)", "def iterkeys(self):\n return DictKeysIterator(self)", "def __iter__(self):\n return iter(self._key_order)", "def __iter__(self):\n\t\treturn self.keys()", "def __iter__(self):\n return self.keys()", "def iterkeys(self):", "def iterkeys(self):", "def iterkeys(self):\n return self.__iter__()", "def __iter__(self):\n\n return iter([key for key in self._data.keys()])", "def _map___iter__(self):\n return self.iterkeys()", "def __iter__(self) -> Generator:\n for k in self.raw.keys():\n yield k", "def __iter__(self) -> Iterator[str]:\n return iter(self._keys)", "def __iter__(self):\n\t\tfor key, value in self.__dict__.iteritems():\n\t\t\tif key[0] != '_':\n\t\t\t\tyield value", "def iterkeys(d):\r\n return iter(getattr(d, _iterkeys)())", "def __iter__(self, *args, **kwargs):\n for key in self.keys(*args, **kwargs):\n yield key", "def __iter__(self):\n return iter({})", "def __iter__(self):\n\n return self._entries.__iter__()", "def __iter__(self):\n return iter(self._internals.values())", "def __iter__(self):\n for domain in self.keys():\n yield domain", "def iterkeys(d):\n return iter(getattr(d, _iterkeys)())", "def __iter__(self):\n return self._data_dict.__iter__()", "def __iter__(self):\n return iter(self.items)", "def __iter__(self):\n return iter(self.items)", "def __iter__(self):\n for value in self.__dict__.values():\n yield value", "def iteritems(self):\n return DictItemsIterator(self)", "def __iter__(self):\n cursor=0\n while cursor<len(self):\n yield self._item[cursor].key\n cursor+=1", "def iterator(self):\n return self.KeyIterator()", "def __iter__(self):\n alt_locs = self.keys()\n alt_locs.sort()\n for alt_loc in alt_locs:\n yield self[alt_loc]", "def __iter__(self):\n\n # For each key in set of keys\n for key in self.keys_set:\n\n # Yield that key and associated value\n yield key, self.__getitem__(key)", "def __iter__(self):\r\n return iter(self._items)", "def __iter__(self):\r\n return iter(self._items)", "def __iter__(self):\n for bucket in self._table:\n if bucket is not None:\n for key in bucket:\n yield key", "def iteritems(self):\n def make_iter(self=self):\n keys = self.iterkeys()\n while True:\n key = keys.next()\n yield (key, self[key])\n return make_iter()", "def __iter__(self):\n for acronym in self.keys:\n yield acronym, self.dict[acronym]", "def iteritems(self):", "def __iter__(self):\n yield from self._type_keys", "def iterkeys(self, *args, **kwargs):\n self.__iter__(*args, **kwargs)", "def __iter__(self):\n for v in self._items:\n yield v", "def __init__(self):\n super(KeyIterator, self).__init__()\n self.iterator = self.ValueIterator()", "def __iter__(self):\r\n for item in self._data:\r\n yield item # yield the KEY\r", "def __iter__(self):\n return iter(self._items)", "def __iter__(self):\n return iter(self._items)", "def __iter__(self):\n return iter(self._d)", "def __iter__(self):\n return self.in_order", "def __iter__():", "def __iter__():", "def __iter__():", "def __iter__():", "def iteritems(self):\n for key in self:\n yield key, self[key]", "def iteritems(self):\n for key in self:\n yield (key, self[key])", "def __iter__(self):\n for key in sorted(self._points):\n yield key", "def iterentries(self):\n for key in self.iterkeys():\n yield self.get(key)", "def iterkeys(self):\r\n for wr in self.data.iterkeys():\r\n obj = wr()\r\n if obj is not None:\r\n yield obj", "def iterkeys(self):\r\n for wr in self.data.iterkeys():\r\n obj = wr()\r\n if obj is not None:\r\n yield obj", "def iterkeys(self):\n if PY2:\n return self._tagged.iterkeys()\n else:\n return self._tagged.keys()", "def __iter__(self):\n for value in dict.__iter__(self):\n for count in range(self[value]):\n yield value", "def __iter__ (self):\n return iter (self.containments.keys ())", "def iterkeys(self):\n return iter(self._sequence)", "def __iter__(self): # pragma: no cover\r\n return ((k, v) for k, v in vars(self).items() if not k.startswith(\"_\"))", "def __iter__(self):\n yield from chain.from_iterable(self.data.values())", "def __iter__(self):\n return iter(self.vert_dict.values())", "def __iter__(self):\n return iterkeys(self._ngrams)", "def __iter__( self ) :\n\n for entry in self.__entries : yield entry", "def __iter__(self):\n with SessionContext(self.SessionClass) as session:\n keys = session.query(PAW2_DBObject.key)\n keys = [c[0] for c in keys]\n random.shuffle(keys)\n return keys.__iter__()", "def __iter__(self):\n return iter(self.__iter())", "def __iter__(self):\n \n return iter(self.vert_dict.values())", "def iterkeys(d, **kw):\r\n return iter(getattr(d, _iterkeys)(**kw))", "def iterkeys(d, **kw):\r\n return iter(getattr(d, _iterkeys)(**kw))", "def __iter__(self) -> 'Dictionary':\n return copy.deepcopy(self)", "def __iter__(self):\n pass", "def __iter__(self):\n pass", "def __iter__(self):\n pass", "def __iter__(self):\n pass", "def __iter__(self):\n pass", "def __iter__(self):\n pass", "def __iter__(self):\n pass", "def __iter__(self):\n # This could be as simple as \"return self._getKeyList().__iter__()\"\n # but this performs some extra consistency checking to make sure the\n # key we iterate to actually exists, to keep us from crashing if\n # our db is a little out of sync with itself.\n\n # This is a nasty hack because our db seems prone to circular links\n nItems = 0\n for item in self._getKeyList():\n if item in self:\n yield item\n nItems += 1\n # NASTY HACK!\n if nItems > 1000:\n self.reindex()\n raise Exception(\"Circular link corrected, try again\")\n else:\n self._delKey(item)", "def Keys(self) -> NameObjectCollectionBase.KeysCollection:", "def __iter__(self):\n self._deduplicate()\n return iter(self._entries)", "def __iter__(cls):\n return iter(cls.__by_number.values())", "def iteritems(self):\r\n return six.iteritems(self._as_dict())", "def keys(self):\n return iter(k for k, _ in self._pairs())", "def __iter__(self):\n\n result = []\n\n # d - dict, p - path (keys sequence)\n def recurs_iter(d, p=None):\n p = p or []\n\n # k - key, v - value\n for k, v in iteritems(d):\n next_p = p + [k]\n if isinstance(v, dict):\n recurs_iter(v, next_p)\n else:\n result.append(tuple(next_p))\n\n recurs_iter(self.__dict__)\n\n return iter(result)", "def keys(self):\n raise NotImplementedError('keys() should have been replaced by a metaclass')", "def itervalues(self):\n for key in self:\n yield self[key]", "def __iter__(self):\n try:\n i = self.db[self._headKey]\n while True:\n yield i\n i = self.db[self._getNextKey(i)]\n except KeyError:\n pass", "def iterkeys(self):\n return iter(kvp.key for kvp in self.keyvaluepair_set.all())", "def itervalues(self):\n return DictValuesIterator(self)", "def iterkeys(self):\n return self._d.iterkeys()", "def __iter__(self):\n return iter(self._items)\n # to use a generator, it would look like this...\n # for item in self._items: yield item", "def iteroriginal(self):\n for key in self:\n vals = _dict_getitem(self, key)\n for val in vals[1:]:\n yield vals[0], val", "def __iter__(self):\n if self.empty():\n return\n for node in self.root:\n yield node.key", "def __iter__(self):\n return iter(self._cached)", "def __iter__(self):\n raise NotImplementedError(\"__iter__\")" ]
[ "0.8112754", "0.7881003", "0.7754839", "0.76607627", "0.764876", "0.7564836", "0.7504256", "0.75019187", "0.74793774", "0.7474488", "0.74243957", "0.7364466", "0.7364466", "0.73356414", "0.7253679", "0.72350866", "0.7233559", "0.7106118", "0.7090561", "0.7053434", "0.69872856", "0.6984348", "0.69795036", "0.6972673", "0.69265544", "0.6869416", "0.6839497", "0.6831577", "0.6831577", "0.680716", "0.6785401", "0.6784434", "0.67601216", "0.6744813", "0.6743477", "0.67005265", "0.67005265", "0.67004776", "0.6691876", "0.66811323", "0.6667628", "0.66597563", "0.66523415", "0.663862", "0.6625783", "0.66251564", "0.6622411", "0.6622411", "0.66188383", "0.6582584", "0.65758693", "0.65758693", "0.65758693", "0.65758693", "0.65719956", "0.6571139", "0.65688634", "0.656793", "0.65648437", "0.65648437", "0.65610987", "0.65422887", "0.6512388", "0.6477373", "0.6475892", "0.6474237", "0.6432229", "0.64262855", "0.63843524", "0.63768554", "0.6374607", "0.6357363", "0.6341298", "0.6341298", "0.6341179", "0.6336935", "0.6336935", "0.6336935", "0.6336935", "0.6336935", "0.6336935", "0.6336935", "0.63262063", "0.6315084", "0.63013166", "0.62942415", "0.62913424", "0.62866557", "0.62833667", "0.62828916", "0.6281372", "0.62757266", "0.6270366", "0.62667423", "0.62600166", "0.62467104", "0.62404144", "0.6238442", "0.6236912", "0.6223479" ]
0.7525102
6
Create a CourseGraph, fetching unitary weights and edge weights from database, creating CourseNodes for each course, and
def __init__(self, database, session, max_suggestions=5, max_courses=30, cache_mult=4): self._nodes = dict() # dict with courseid keys, CourseNode vals self._max_suggestions = max_suggestions self._max_courses = max_courses self._cache_mult = cache_mult db = database # Get dict mapping courses to unitary weights unitary_dict = db.get_unitary_dict(session) # Get dict mapping courses to adjacent courses and weights edge_dict = db.get_edges_dict(session) # Create CourseNodes for courseid in unitary_dict: courseNode = CourseGraph.CourseNode(courseid=courseid, edges=dict(), popularity=unitary_dict[courseid]) self._nodes[courseid] = courseNode # Create course edge dict for each CourseNode for courseid in edge_dict: node = self._nodes[courseid] # get node of interest adj_courses = edge_dict[courseid] # get inner dict {otherid: edge_weight} for otherid in adj_courses: other_node = self._nodes[otherid] node.addEdge(other_node, adj_courses[otherid])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _CreateGraph(self):\n self.nodes = []\n self.edges = []\n for i, r in self.airports.set_index('airport_id').iterrows():\n self.nodes.append((i,r.to_dict()))\n for i, r in self.routes.set_index(['src_id','dst_id']).iterrows():\n self.edges.append((i[0],i[1],r.to_dict()))\n # print('node ex: {}'.format(self.nodes[0]))\n # print('edge ex: {}'.format(self.edges[0]))\n\n self.graph = self._CreateAdjacencyListGraph()", "def prepare_graph(\n self,\n adjacency,\n weights,\n weighted=False,\n undirected=False,\n force_dense=True,\n noselfloop=True,\n verbose=True,\n ):\n\n # df_adj = pd.read_csv(in_folder + adj_name, index_col=0) # read adjacency file\n print(\"\\nAdjacency shape: {0}\".format(adjacency.shape), flush=True)\n\n # create the graph adding nodes and edges\n A = self.read_graph(\n adj=adjacency,\n weights=weights,\n weighted=weighted,\n undirected=undirected,\n noselfloop=noselfloop,\n verbose=verbose,\n )\n\n nodes = list(A[0].nodes)\n print(\"\\nNumber of nodes =\", len(nodes), flush=True)\n print(\"Number of layers =\", len(A), flush=True)\n if verbose:\n self.print_graph_stat(A)\n\n # save the multilayer network in a tensor with all layers\n if force_dense:\n B = self.build_B_from_A(A, nodes=nodes)\n else:\n B = self.build_sparse_B_from_A(A)\n\n return A, B, nodes", "def build_graph(self):\n\t\tself._create_placeholders()\n\t\tself._create_embedding()\n\t\tself._create_recurrent_layers()\n\t\tself._create_de_embedding()\n\t\tself._create_loss()\n\t\tself._create_optimizer()\n\t\tself._create_summaries()", "def build_graph(self):\n self.__create_placeholders()\n self.__create_encoder()\n self.__create_latent()\n self.__create_decoder()\n self.__create_loss()\n self.__create_generate()\n self.__create_reconstruct()\n self.__create_optimizer()\n self.__create_summary()", "def init_graph(self):\n import dgl\n\n adj_list = []\n for rel_type in range(1, self.n_relations, 1):\n edge_idxs = self.ckg.filter_edges(\n lambda edge: edge.data[\"relation_id\"] == rel_type\n )\n sub_graph = (\n dgl.edge_subgraph(self.ckg, edge_idxs, preserve_nodes=True)\n .adjacency_matrix(transpose=False, scipy_fmt=\"coo\")\n .astype(\"float\")\n )\n rowsum = np.array(sub_graph.sum(1))\n d_inv = np.power(rowsum, -1).flatten()\n d_inv[np.isinf(d_inv)] = 0.0\n d_mat_inv = sp.diags(d_inv)\n norm_adj = d_mat_inv.dot(sub_graph).tocoo()\n adj_list.append(norm_adj)\n\n final_adj_matrix = sum(adj_list).tocoo()\n indices = torch.LongTensor([final_adj_matrix.row, final_adj_matrix.col])\n values = torch.FloatTensor(final_adj_matrix.data)\n adj_matrix_tensor = torch.sparse.FloatTensor(indices, values, self.matrix_size)\n return adj_matrix_tensor.to(self.device)", "def _initilise_graph_db(self):\n for collector in self.collectors:\n collector.init_graph_db()", "def __init__(self, nodes, edges, start_kind='Compound', end_kind='Disease', max_length=4, w=0.4, n_jobs=1):\n # Initialize important class variables\n self.w = w\n self.n_jobs = n_jobs\n self.metagraph = None\n self.start_kind = start_kind\n self.end_kind = end_kind\n\n # Placeholders for variables to be defined later\n self.node_file = None\n self.edge_file = None\n self.nodes = None\n self.metaedges = None\n self.adj_matrices = None\n self.out_degree = dict()\n self.in_degree = dict()\n self.degree_weighted_matrices = None\n\n # Mappers to be used later\n self.nid_to_index = None\n self.index_to_nid = None\n self.id_to_metanode = None\n self.metanode_to_ids = None\n self.nid_to_name = None\n self.metanode_to_edges = dict()\n self._modified_edges = None\n self._weighted_modified_edges = None\n self._orig_in_degree = dict()\n self._orig_out_degree = dict()\n\n # Read and/or store nodes as DataFrame\n if type(nodes) == str:\n self.node_file = nodes\n print('Reading file information...')\n self._read_node_file()\n elif type(nodes) == pd.DataFrame:\n self.node_df = gt.remove_colons(nodes).copy()\n self._validate_nodes()\n\n # Read and/or store edges as DataFrame\n if type(edges) == str:\n self.edge_file = edges\n self._read_edge_file()\n elif type(edges) == pd.DataFrame:\n self.edge_df = gt.remove_colons(edges).copy()\n self._validate_edges()\n\n # Process the Node and Edge information\n print('Processing node and edge data...')\n self._process_nodes()\n self._process_edges()\n\n # Initalize the metagraph and determine the metapaths available\n self._make_metagraph()\n self._determine_metapaths(start_kind, end_kind, max_length)\n self._map_metanodes_to_metaedges()\n\n # Generate the adjacency matrices.\n print('Generating adjacency matrices...')\n time.sleep(0.5)\n self._generate_adjacency_matrices()\n\n # Make Degree Weighted matrices.\n print('\\nDetermining degrees for each node and metaedge'.format(w))\n time.sleep(0.5)\n self._compute_node_degrees()\n\n # Make Degree Weighted matrices.\n print('\\nWeighting matrices by degree with dampening factor {}...'.format(w))\n time.sleep(0.5)\n self._generate_weighted_matrices()", "def neo4j_to_lkg():\n node_types = [\"judge\", \"keyword\", \"case\", \"catch\", \"act\", \"year\"]\n from backend.graph_formation.base.legal_knowledge_graph import LegalKnowledgeGraph\n\n lkg = LegalKnowledgeGraph()\n db = GraphDatabase(ENV[\"DB_URL\"], username=ENV[\"DB_USERNAME\"], password=ENV[\"DB_PASSWORD\"])\n # Authentication for NEO4J Browser\n\n for node_type in node_types:\n q = \"MATCH (c:{}) return c\".format(node_type) #Quering for all nodes in the graph\n results = db.query(q)\n for record in results:\n props={}\n node = record[0]\n if node:\n label = node[\"metadata\"][\"labels\"]\n node_id = node[\"data\"][\"id\"]\n node[\"data\"].pop(\"id\",None)\n props = node[\"data\"]\n props[\"type\"] = label\n lkg.add_node(id, **props)\n for node_type_1 in node_types:\n for node_type_2 in node_types:\n q = \"MATCH (c:{})-[r]->(m:{}) return c,m\".format(node_type_1, node_type_2) # Quering for all Relationships in the graph\n results = db.query(q)\n for record in results:\n node1 , node2 = record\n lkg.add_edge(node1[\"data\"][\"id\"], node2[\"data\"][\"id\"])\n return(lkg)", "def build_graph(self, name='', dump=None, nodes=None, depth_goal=1,\n filter_top=True, remove_isolates=True, add_years=True,\n fill_empty_years=True, model=None, dct=None,\n compute_core_periphery=True, compute_communities=True,\n compute_community_cores=True):\n self.graph = nx.DiGraph()\n self.graph.name = name\n if not dump:\n raise AttributeError('wiki.Net: Provide wiki.Dump object.')\n print('wiki.Net: traversing Wikipedia...')\n Net.bft(self.graph, dump, nodes, depth_goal=depth_goal, \n nodes=nodes, filter_top=filter_top)\n if remove_isolates:\n print('wiki.Net: removing isolates...')\n self.graph.remove_nodes_from(nx.isolates(self.graph))\n if add_years:\n print('wiki.Net: adding years...')\n for node in self.graph.nodes:\n dump.load_page(node)\n self.graph.nodes[node]['year'] = dump.years[0] if len(dump.years)>0 else []\n self.graph.graph['num_years'] = sum(\n [bool(y) for y in nx.get_node_attributes(self.graph, 'year').values()]\n )\n if fill_empty_years:\n print('wiki.Net: filling empty years...')\n nodes_filled = True\n while nodes_filled:\n nodes_filled = Net.fill_empty_nodes(self.graph, full_parents=True)\n nodes_filled = True\n while nodes_filled:\n nodes_filled = Net.fill_empty_nodes(self.graph, full_parents=False)\n for node in self.graph.nodes:\n if not self.graph.nodes[node]['year']:\n self.graph.nodes[node]['year'] = Net.MAX_YEAR\n if model and dct:\n print('wiki.Net: calculating weights...')\n self.graph.graph['tfidf'] = Net.compute_tfidf(self.nodes, dump, model, dct)\n Net.set_weights(self.graph)\n if compute_core_periphery:\n print('wiki.Net: computing core-periphery...')\n Net.assign_core_periphery(self.graph)\n if compute_communities:\n print('wiki.Net: computing communities...')\n Net.assign_communities(self.graph)\n if compute_community_cores:\n print('wiki.Net: computing cores within communities...')\n Net.assign_cores_to_communities(self.graph)", "def build_graph(self):\n edge_data_by_type, all_edges, all_nodes = self.load_training_data(\n self.train_edges_file,\n slf_loop=self.config['slf_loop'],\n symmetry_edge=self.config['symmetry_edge'])\n\n num_nodes = len(all_nodes)\n node_features = {\n 'index': np.array(\n [i for i in range(num_nodes)], dtype=np.int64).reshape(-1, 1)\n }\n\n self.graph = heter_graph.HeterGraph(\n num_nodes=num_nodes,\n edges=edge_data_by_type,\n node_types=None,\n node_feat=node_features)\n\n self.edge_types = sorted(self.graph.edge_types_info())\n logging.info('total %d nodes are loaded' % (self.graph.num_nodes))", "def build_graph():\n file = open(\"../data/data.json\", \"r\")\n data = json.load(file)\n node_dict = {}\n for id in data:\n node_dict[id] = Node(data[id][\"name\"], data[id][\"product\"], data[id][\"production_volume\"])\n for id in data:\n current_node = node_dict[id]\n for costumer_id in data[id][\"costumers\"]:\n current_node.costumers.append(node_dict[str(costumer_id)])\n current_node.out_edge_capacity_drop[node_dict[str(costumer_id)].name] = 0\n for supplier_id in data[id][\"suppliers\"]:\n current_node.suppliers.append(node_dict[str(supplier_id)])\n current_node.in_edge_capacity_drop[node_dict[str(supplier_id)].name] = 0\n return node_dict", "def create_graph(self, lat, lon):\n # Open connection to the database (nodes)\n cur = armaps.model.get_db()\n\n # Get the waypoints\n cur.execute(\n \"SELECT * FROM waypoints WHERE venue_id = %s\", \n (self.venue_id,)\n )\n waypoints = cur.fetchall()\n\n # Get the paths (edges)\n cur.execute(\n \"SELECT * FROM paths WHERE venue_id = %s\",\n (self.venue_id,)\n )\n paths = cur.fetchall()\n\n # Transform list of waypoints into dictionary with key = waypoint_id\n for waypoint in waypoints:\n self.waypoints[int(waypoint[\"waypoint_id\"])] = {\n \"lat\": float(waypoint[\"latitude\"]),\n \"lon\": float(waypoint[\"longitude\"]),\n \"waypoint_id\": int(waypoint[\"waypoint_id\"])\n }\n\n # Calculate weights of edges in graph\n for path in paths:\n # Get two nodes (waypoints) associated with edge\n inNode = int(path[\"innode\"])\n outNode = int(path[\"outnode\"])\n\n # Get the coordinates of nodes\n inNode_coords = (self.waypoints[inNode][\"lat\"], self.waypoints[inNode][\"lon\"])\n outNode_coords = (self.waypoints[outNode][\"lat\"], self.waypoints[outNode][\"lon\"])\n distance = geopy.distance.distance(inNode_coords, outNode_coords).miles\n\n # Add to graph (both ways for undirected)\n self.graph.add_edge(inNode, outNode, distance)\n self.graph.add_edge(outNode, inNode, distance)", "def populate_graph(self):", "def gen_graph(self):", "def build_graph(self):\n for node in self.graph.nodes():\n self.c2py[node] = PyNode(node)\n for _input in node.inputs():\n if _input not in self.c2py:\n self.c2py[_input] = PyNode(_input, True)\n if _input in self.forward_edge:\n self.forward_edge[_input].append(node)\n else:\n self.forward_edge[_input] = [node]\n for output in node.outputs():\n if output not in self.c2py:\n self.c2py[output] = PyNode(output, True)\n if node in self.forward_edge:\n self.forward_edge[node].append(output)\n else:\n self.forward_edge[node] = [output]", "def generate_graph(self):\n\t\tif self.joins == None:\n\t\t\tself.get_joins()\n\t\tprint('generating Networkx DiGraph object of {database} from query results'.format(**self.__dict__))\n\t\t# save distinct Child column values\n\t\tchilds = set([j.Child for j in self.joins])\n\t\t# save distinct Parent column values\n\t\tparents = set([j.Parent for j in self.joins])\n\t\t# save names of Leaf tables\n\t\tleafs = list(childs - parents)\n\t\tself._traverse_joins(leafs)", "def create_wiki_graph(self):\n\n print 'Creating wiki corpus graph representation'\n\n for path, subdirs, files in os.walk(self.wk_path):\n\n here = os.path.split(path)[1]\n parent = os.path.split(os.path.split(path)[0])[1]\n\n self.categories.add_edge(parent, here)\n\n self.categories[parent][\"path\"] = path\n self.categories[here][\"path\"] = path\n\n for name in files:\n if fnmatch(name, \"*.yaml\") and \"Index\" not in name and \"index\" not in name: # check if there is a text file\n \n category_name = name[0:-5]\n yaml_file_path = os.path.join(\n path, category_name + \".yaml\")\n\n # yaml\n yaml_file = open(yaml_file_path, \"r\")\n docs = yaml.load_all(yaml_file)\n\n # category_name\n for doc in docs:\n cat_parent = doc[\"CategoryPath\"][0]\n\n self.categories.add_edge(\n slugify(cat_parent), slugify(category_name))\n self.categories[slugify(cat_parent)][\"path\"] = path\n self.categories[slugify(category_name)][\"path\"] = path\n\n for cat in doc[\"Categories\"][0][self.language]:\n self.categories.add_edge(\n slugify(category_name), slugify(cat))\n self.categories[slugify(cat)][\"path\"] = path\n\n print(\"The categories graph %s has %d nodes with %d edges\"\n % (self.categories.name,\n nx.number_of_nodes(self.categories),\n nx.number_of_edges(self.categories)))\n for node in nx.nodes(self.categories):\n self.get_corpus_from_node(node)\n\n pickle.dump(self.categories, open(self.graph_path, 'w'))\n\n print \"Graph saved as %s\"%(self.graph_path)", "def generate_graph(self):\n glw = GraphLineWeights()\n\n node_id = 0\n last_key = list(self.storage.keys())[-1]\n\n for key in tqdm.tqdm(self.storage):\n for key_line in self.storage[key]:\n for node in self.storage[key][key_line]:\n # set unique node id and calculate centroid\n node.id = node_id\n node.center_x = node.left + int(node.width / 2)\n node.center_y = node.top + int(node.height / 2)\n node_id += 1\n for key in self.storage:\n for key_line in self.storage[key]:\n for node_with_id in self.storage[key][key_line]:\n # print(node_with_id.word)\n # print(node_with_id.left, node_with_id.top, node_with_id.width, node_with_id.height)\n # consider 4 sides: top, right, bottom, left\n # glw: 0 -> 1 -> 2 -> 3\n # 1. top, verified\n min_dist = self.get_top_node(node_with_id, key - 1, key_line, last_key)\n glw.add_node_id_connection(node_with_id.id, 0, node_with_id.top_node_id, min_dist)\n # 2. bottom\n min_dist = self.get_bottom_node(node_with_id, key + 1, key_line, last_key)\n glw.add_node_id_connection(node_with_id.id, 2, node_with_id.bottom_node_id, min_dist)\n # 3. left\n min_dist = self.get_left_node(node_with_id, key, key_line, last_key)\n glw.add_node_id_connection(node_with_id.id, 3, node_with_id.left_node_id, min_dist)\n # 4. right\n min_dist = self.get_right_node(node_with_id, key, key_line, last_key)\n glw.add_node_id_connection(node_with_id.id, 1, node_with_id.right_node_id, min_dist)\n\n return glw", "def generate_courses():\r\n for category in CourseCategory.objects.all():\r\n Course.objects.create(name=category.name, category=category, is_active=True,\r\n is_featured=True)", "def _build_graph(self):\n pass", "def _construct_graph(self):\n raise NotImplementedError", "def __init__(self, nodes, edges, weights='weight', start_kind='Compound', end_kind='Disease',\n scale_weights=True, max_length=4, w=0.4, n_jobs=1):\n\n super().__init__(nodes, edges, start_kind, end_kind, max_length, w, n_jobs)\n\n # Validate the weights\n if isinstance(weights, str):\n # Make sure that the weights is in the column\n assert weights in self.edge_df.columns\n # Ensure that weights are numberic\n assert np.issubdtype(self.edge_df[weights].dtype, np.number)\n # Store the column name\n self.weights = weights\n\n elif isinstance(weights, collections.Iterable):\n # Ensure that there's a weight for every edge\n assert len(weights) == len(self.edge_df)\n # Make sure the weights are numbers\n assert all(isinstance(w, (int, float)) for w in weights)\n # Store the weights and columname\n self.edge_df['weight'] = weights\n self.weights = 'weight'\n self.scale_weights = scale_weights\n if self.scale_weights:\n self.orig_weights = self.weights\n self._scale_weights_to_degree()\n self._scaling_skipped = False\n\n # Make special matrices required for weighted calculations\n self._generate_weighted_adj_matrices()\n self._degree_weight_weighted_matrices()\n self._modified_weighted_adj_matrices = None", "def _generate_graph(self) -> None:\n self.g_ = nx.random_partition_graph(list(self._community_sizes),\n p_in=self.community_p_in,\n p_out=self.community_p_out,\n seed=self.seed)\n\n for _, nv in self.g_.nodes.data():\n nv[\"infected\"] = 0\n nv[\"immune\"] = False\n nv[\"alive\"] = True\n nv[\"_edges\"] = []\n nv[\"isolated\"] = False\n nv[\"mask\"] = 0.0", "def build_graph(self):\n pass", "def make_graph(self):\n\n # the root node\n self.graph.node(self.playbook_filename, style=\"dotted\", id=\"root_node\")\n\n # loop through the plays\n for play_counter, play in enumerate(self.playbook.get_plays(), 1):\n\n # the load basedir is relative to the playbook path\n if play._included_path is not None:\n self.data_loader.set_basedir(play._included_path)\n else:\n self.data_loader.set_basedir(self.playbook._basedir)\n self.display.vvv(\"Loader basedir set to {}\".format(self.data_loader.get_basedir()))\n\n play_vars = self.variable_manager.get_vars(play)\n play_hosts = [h.get_name() for h in self.inventory_manager.get_hosts(self.template(play.hosts, play_vars))]\n play_name = \"Play #{}: {} ({})\".format(play_counter, clean_name(play.get_name()), len(play_hosts))\n play_name = self.template(play_name, play_vars)\n\n self.display.banner(\"Graphing \" + play_name)\n\n play_id = \"play_\" + str(uuid.uuid4())\n\n self.graph_representation.add_node(play_id)\n\n with self.graph.subgraph(name=play_name) as play_subgraph:\n color, play_font_color = get_play_colors(play)\n # play node\n play_subgraph.node(play_name, id=play_id, style=\"filled\", shape=\"box\", color=color,\n fontcolor=play_font_color, tooltip=\" \".join(play_hosts))\n\n # edge from root node to plays\n play_edge_id = \"edge_\" + str(uuid.uuid4())\n play_subgraph.edge(self.playbook_filename, play_name, id=play_edge_id, style=\"bold\",\n label=str(play_counter), color=color, fontcolor=color)\n\n # loop through the pre_tasks\n self.display.v(\"Graphing pre_tasks...\")\n nb_pre_tasks = 0\n for pre_task_block in play.pre_tasks:\n nb_pre_tasks = self._include_tasks_in_blocks(current_play=play, graph=play_subgraph,\n parent_node_name=play_name, parent_node_id=play_id,\n block=pre_task_block, color=color,\n current_counter=nb_pre_tasks, play_vars=play_vars,\n node_name_prefix=\"[pre_task] \")\n\n # loop through the roles\n self.display.v(\"Graphing roles...\")\n role_number = 0\n for role in play.get_roles():\n # Don't insert tasks from ``import/include_role``, preventing duplicate graphing\n if role.from_include:\n continue\n\n # the role object doesn't inherit the tags from the play. So we add it manually.\n role.tags = role.tags + play.tags\n if not role.evaluate_tags(only_tags=self.options.tags, skip_tags=self.options.skip_tags,\n all_vars=play_vars):\n self.display.vv(\"The role '{}' is skipped due to the tags.\".format(role.get_name()))\n # Go to the next role\n continue\n\n role_number += 1\n role_name = \"[role] \" + clean_name(role.get_name())\n\n with self.graph.subgraph(name=role_name, node_attr={}) as role_subgraph:\n current_counter = role_number + nb_pre_tasks\n role_id = \"role_\" + str(uuid.uuid4())\n edge_id = \"edge_\" + str(uuid.uuid4())\n\n role_subgraph.node(role_name, id=role_id)\n # edge from play to role\n role_subgraph.edge(play_name, role_name, label=str(current_counter), color=color,\n fontcolor=color, id=edge_id)\n\n self.graph_representation.add_link(play_id, edge_id)\n self.graph_representation.add_link(edge_id, role_id)\n\n # loop through the tasks of the roles\n if self.options.include_role_tasks:\n role_tasks_counter = 0\n for block in role.compile(play):\n role_tasks_counter = self._include_tasks_in_blocks(current_play=play,\n graph=role_subgraph,\n parent_node_name=role_name,\n parent_node_id=role_id, block=block,\n color=color, play_vars=play_vars,\n current_counter=role_tasks_counter,\n node_name_prefix=\"[task] \")\n role_tasks_counter += 1\n self.display.v(\"{} roles added to the graph\".format(role_number))\n\n # loop through the tasks\n self.display.v(\"Graphing tasks...\")\n nb_tasks = 0\n for task_block in play.tasks:\n nb_tasks = self._include_tasks_in_blocks(current_play=play, graph=play_subgraph,\n parent_node_name=play_name, parent_node_id=play_id,\n block=task_block, color=color,\n current_counter=role_number + nb_pre_tasks,\n play_vars=play_vars, node_name_prefix=\"[task] \")\n\n # loop through the post_tasks\n self.display.v(\"Graphing post_tasks...\")\n for post_task_block in play.post_tasks:\n self._include_tasks_in_blocks(current_play=play, graph=play_subgraph, parent_node_name=play_name,\n parent_node_id=play_id, block=post_task_block, color=color,\n current_counter=nb_tasks, play_vars=play_vars,\n node_name_prefix=\"[post_task] \")\n\n self.display.banner(\"Done graphing {}\".format(play_name))\n self.display.display(\"\") # just an empty line\n # moving to the next play", "def create_nodes(nd=None):\n\n if not nd:\n raise ValueError(\"No nodes data provided.\")\n\n nodes = []\n\n # Create Bus objects from buses table\n busd = {}\n\n for i, b in nd[\"buses\"].iterrows():\n if b[\"active\"]:\n bus = solph.Bus(label=b[\"label\"])\n nodes.append(bus)\n\n busd[b[\"label\"]] = bus\n if b[\"excess\"]:\n nodes.append(\n solph.Sink(\n label=b[\"label\"] + \"_excess\",\n inputs={\n busd[b[\"label\"]]: solph.Flow(\n variable_costs=b[\"excess costs\"]\n )\n },\n )\n )\n if b[\"shortage\"]:\n nodes.append(\n solph.Source(\n label=b[\"label\"] + \"_shortage\",\n outputs={\n busd[b[\"label\"]]: solph.Flow(\n variable_costs=b[\"shortage costs\"]\n )\n },\n )\n )\n\n # Create Source objects from table 'commodity sources'\n for i, cs in nd[\"commodity_sources\"].iterrows():\n if cs[\"active\"]:\n nodes.append(\n solph.Source(\n label=cs[\"label\"],\n outputs={\n busd[cs[\"to\"]]: solph.Flow(\n variable_costs=cs[\"variable costs\"]\n )\n },\n )\n )\n\n # Create Source objects with fixed time series from 'renewables' table\n for i, re in nd[\"renewables\"].iterrows():\n if re[\"active\"]:\n # set static outflow values\n outflow_args = {\n \"nominal_value\": re[\"capacity\"]\n }\n # get time series for node and parameter\n for col in nd[\"timeseries\"].columns.values:\n if col.split(\".\")[0] == re[\"label\"]:\n outflow_args[col.split(\".\")[1]] = nd[\"timeseries\"][col]\n\n # create\n nodes.append(\n solph.Source(\n label=re[\"label\"],\n outputs={\n busd[re[\"to\"]]: solph.Flow(**outflow_args)\n },\n )\n )\n\n # Create Sink objects with fixed time series from 'demand' table\n for i, de in nd[\"demand\"].iterrows():\n if de[\"active\"] and not pd.isnull(de['active']):\n # set static inflow values\n inflow_args = {\n \"nominal_value\": de[\"nominal value\"]\n }\n # get time series for node and parameter\n for col in nd[\"timeseries\"].columns.values:\n if col.split(\".\")[0] == de[\"label\"]:\n inflow_args[col.split(\".\")[1]] = nd[\"timeseries\"][col]\n\n # create\n nodes.append(\n solph.Sink(\n label=de[\"label\"],\n inputs={\n busd[de[\"from\"]]: solph.Flow(**inflow_args)\n },\n )\n )\n\n # Create Transformer objects from 'transformers' table\n for i, t in nd[\"transformers\"].iterrows():\n if t[\"active\"]:\n # set static inflow values\n inflow_args = {\"variable_costs\": t[\"variable input costs\"]}\n # get time series for inflow of transformer\n for col in nd[\"timeseries\"].columns.values:\n if col.split(\".\")[0] == t[\"label\"]:\n inflow_args[col.split(\".\")[1]] = nd[\"timeseries\"][col]\n # create\n nodes.append(\n solph.Transformer(\n label=t[\"label\"],\n inputs={busd[t[\"from\"]]: solph.Flow(**inflow_args)},\n outputs={\n busd[t[\"to\"]]: solph.Flow(nominal_value=t[\"capacity\"])\n },\n conversion_factors={busd[t[\"to\"]]: t[\"efficiency\"]},\n )\n )\n\n for i, s in nd[\"storages\"].iterrows():\n if s[\"active\"]:\n nodes.append(\n solph.components.GenericStorage(\n label=s[\"label\"],\n inputs={\n busd[s[\"bus\"]]: solph.Flow(\n nominal_value=s[\"capacity inflow\"],\n variable_costs=s[\"variable input costs\"],\n )\n },\n outputs={\n busd[s[\"bus\"]]: solph.Flow(\n nominal_value=s[\"capacity outflow\"],\n variable_costs=s[\"variable output costs\"],\n )\n },\n nominal_storage_capacity=s[\"nominal capacity\"],\n loss_rate=s[\"capacity loss\"],\n initial_storage_level=s[\"initial capacity\"],\n max_storage_level=s[\"capacity max\"],\n min_storage_level=s[\"capacity min\"],\n inflow_conversion_factor=s[\"efficiency inflow\"],\n outflow_conversion_factor=s[\"efficiency outflow\"],\n )\n )\n\n for i, p in nd[\"powerlines\"].iterrows():\n if p[\"active\"]:\n bus1 = busd[p[\"bus_1\"]]\n bus2 = busd[p[\"bus_2\"]]\n nodes.append(\n solph.custom.Link(\n label=\"powerline\" + \"_\" + p[\"bus_1\"] + \"_\" + p[\"bus_2\"],\n inputs={bus1: solph.Flow(), bus2: solph.Flow()},\n outputs={\n bus1: solph.Flow(nominal_value=p[\"capacity\"]),\n bus2: solph.Flow(nominal_value=p[\"capacity\"]),\n },\n conversion_factors={\n (bus1, bus2): p[\"efficiency\"],\n (bus2, bus1): p[\"efficiency\"],\n },\n )\n )\n\n return nodes", "def build_graph(self):\n for each_list in self.lab.look():\n vertice = self._add_vertice(each_list)\n if vertice:\n self.unvisited.add(vertice)\n self.graph.addEdge((self.current, vertice))\n \n self.unvisited -= self.visited\n self._connect_neighbours()", "def generate_model(self):\n rootpath = 'c:\\\\Users\\\\Gamelab\\\\Desktop\\\\RT\\\\Others\\\\Thesis\\\\Thesis_coding\\\\ABM\\\\' \n \n df = pd.read_csv(rootpath+'data\\\\subset_initialized_latlonvalues.csv')\n df = df.drop(columns='Unnamed: 0')\n households_in_block = {}\n household_ids_in_block = {}\n # holds all the graphs indexed by blockid [geoid]\n \n def add_and_remove_edges(G, p_new_connection, p_remove_connection): \n\n new_edges = [] \n rem_edges = [] \n for node in G.nodes(): \n # find the other nodes this one is connected to \n connected = [to for (fr, to) in G.edges(node)] \n # and find the remainder of nodes, which are candidates for new edges \n unconnected = [n for n in G.nodes() if not n in connected] \n\n # probabilistically add a random edge \n if len(unconnected): # only try if new edge is possible \n if random.random() < p_new_connection: \n new = random.choice(unconnected) \n G.add_edge(node, new) \n #print(\"\\tnew edge:\\t {} -- {}\".format(node, new) \n new_edges.append( (node, new) ) \n # book-keeping, in case both add and remove done in same cycle \n unconnected.remove(new) \n connected.append(new) \n\n # probabilistically remove a random edge \n if len(connected): # only try if an edge exists to remove \n if random.random() < p_remove_connection: \n remove = random.choice(connected) \n G.remove_edge(node, remove) \n #print \"\\tedge removed:\\t {} -- {}\".format(node, remove) \n rem_edges.append( (node, remove) ) \n # book-keeping, in case lists are important later? \n connected.remove(remove) \n unconnected.append(remove) \n return rem_edges, new_edges\n\n\n\n\n #now i need to get number of geoids unique \n for block in df['geoid'].unique(): \n G_temp=nx.Graph()\n households_in_block[block] = df[df['geoid']==block] # contains all the information about the households \n household_ids_in_block[block] = df[df['geoid']==block]['CASE_ID'].values \n # contains only their ID\n # you only need id to initialize a node\n tempdf = households_in_block[block]\n for household in household_ids_in_block[block]:\n lon = tempdf.loc[tempdf['CASE_ID']==household,'lon'].values[0]\n lat = tempdf.loc[tempdf['CASE_ID']==household,'lat'].values[0] \n \n G_temp.add_node(str(household), pos=(lon,lat))\n self.G.add_node(str(household), pos=(lon,lat))\n \n ## add G to the dictionary\n self.graph_dict[block] = G_temp\n \n \n rem_edges, new_edges = add_and_remove_edges(self.G, 0.5, 0.5)\n self.G.remove_edges_from(rem_edges)\n self.G.add_edges_from(new_edges)\n\n \n\n self.grid= NetworkGrid(self.G)\n \n for _, row in df.iterrows(): # index, row in ...\n \n agent = Household(unique_id = str(row['CASE_ID']),\n model = self, \n income = row['income'],\n age= row['age'],\n size= row['household_'],\n ami_category = row['ami_categ'],\n elec_consumption= row['elec_consumption'],\n attitude = row['attitude'],\n pbc = row['pbc'],\n subnorms = row['subnorms'],\n geoid = row['geoid'],\n tract = row['tract'],\n bgid = row['bgid'],\n adoption_status = 0)\n \n \n\n if agent:\n self.schedule.add(agent)\n y = row['lat']\n x = row['lon']\n self.grid.place_agent(agent, node_id=agent.unique_id)\n #self.space.place_agent(agent, (x, y))\n #agent.pos = (x, y)", "def __create_graph(self):\n # create the nodes\n for h in range(self.height):\n row: List[JuncNode] = list()\n for w in range(self.width):\n jnodes: List[Node] = [self.add_node() for _ in range(4)]\n jn = JuncNode(jnodes, (h, w))\n row.append(jn)\n self.__juncs.append(row)\n # create all connections\n self.__create_connections()", "def createOptimizedGraph(routes):\n\n\tgraphClass = Graph(routes, directed=True)\n\n\treturn graphClass.getGraph()", "def iGraphFromTuples(association_tuples):\n \n# #get unique words\n# vocab = set()\n# uppercase_tuples = []\n# for (s,r), stren in association_tuples:\n# uppercase_tuples.append((s.upper(), r.upper(), stren))\n# vocab.update(word_pair)\n \n# vocab = list(vocab) #convert to ordered list\n# \n# \n# graph = Graph(len(vocab), directed=True)\n# graph.vs[\"name\"] = vocab #set vertex names\n# edges, _ = zip(*association_tuples)\n# graph.add_edges(edges)\n #association_tuples = [(s.upper(),r.upper(),stren) for (s,r), stren in association_tuples]\n association_tuples = [(s,r,stren) for (s,r), stren in association_tuples]\n graph = Graph.TupleList(association_tuples, directed=True, weights=True)\n \n graph.vs[\"id\"] = graph.vs[\"name\"]\n \n #add weights\n# for s, r , stren in association_tuples:\n# graph[(s,r)] = stren\n neg_log_proportions = []\n for e in graph.es:\n neg_log_proportions.append(-log10(e[\"weight\"]))\n \n graph.es[\"-log weight\"] = neg_log_proportions\n \n assoc_object = AssociationIGraph()\n assoc_object.graph = graph\n return assoc_object", "def create_graph_network(start_node, connections):\n graph = nx.Graph()\n graph.add_node(start_node)\n print(connections.index)\n graph.add_nodes_from(connections.index)\n edge_list = list(zip(itertools.repeat(start_node), connections.index))\n print(\"edge list is \", edge_list)\n graph.add_edges_from(edge_list)\n for i in graph.edges():\n graph[i[0]][i[1]]['weight'] = connections.loc[i[1]]['count']\n # graph[i[0]][i[1]]['proposal_number'] = connections.loc[i[1]]['proposal_number']\n # graph[i[0]][i[1]]['institution'] = connections.loc[i[1]]['institution']\n # graph[i[0]][i[1]]['proposal_title'] = connections.loc[i[1]]['proposal_title']\n # graph[i[0]][i[1]]['project_status'] = connections.loc[i[1]]['project_status']\n\n # Adding random position data to the graph.\n # pos = nx.spring_layout(graph, k=1)\n pos = nx.circular_layout(graph)\n nx.set_node_attributes(graph, 'pos', pos)\n return graph", "def generate_graph(number_of_nodes):\n cities = []\n size = int(math.sqrt(number_of_nodes))\n if size*size != number_of_nodes:\n raise ArgumentError(\"At the moment generate_graph() only takes perfect squares (3, 16, 25 etc.). Feel free to improve it.\")\n test = 0\n for position in range(0, number_of_nodes):\n city = City()\n city.x_position = (position) % size\n city.y_position = int(position / size)\n cities.append(city)\n\n for i_city in range(0, len(cities)):\n city = cities[i_city]\n x_pos = city.x_position\n y_pos = city.y_position\n\n if x_pos != 0:\n city.adjacent_cities.append(cities[i_city - 1])\n\n if x_pos != size-1:\n city.adjacent_cities.append(cities[i_city + 1])\n\n if y_pos != 0:\n city.adjacent_cities.append(cities[i_city - size])\n\n if y_pos != size-1:\n city.adjacent_cities.append(cities[i_city + size])\n\n return cities", "def buildGraph(self):\r\n\r\n print 'Building graph...'\r\n\r\n self.buildQ()\r\n self.buildP()\r\n self.buildReconstructionTerm()\r\n self.buildConditionalPriorTerm()\r\n self.buildWPriorTerm()\r\n self.buildZPriorTerm()\r\n\r\n self.buildObjective()\r\n self.buildGrad()", "def create_weighted_bipartite_graph(G,d):\n\n\tfor k in d.keys():\n\t\tfor v in d[k]:\n\t\t\tG.add_node(v[0],bipartite='code')\n\t\t\tG.add_edge(k,v[0],weight=v[1])\n\n\treturn G", "def __init__(self, graphs: List[Graph], graph_ids: Set[str]) -> None:\n self.graph_ids = graph_ids\n\n # count of link given source & object\n self.c_l_given_so: Dict[Tuple[bytes, bytes], Dict[bytes, int]] = {}\n # count of nodes\n self.c_n: Dict[bytes, int] = {}\n # count of link given source\n self.c_l_given_s: Dict[bytes, Dict[bytes, int]] = {}\n\n # COMPUTE counting\n for g in graphs:\n for link in g.iter_links():\n s = link.get_source_node().label\n o = link.get_target_node().label\n\n # COMPUTE c_l_given_s\n if s not in self.c_l_given_s:\n self.c_l_given_s[s] = {}\n if link.label not in self.c_l_given_s[s]:\n self.c_l_given_s[s][link.label] = 0\n self.c_l_given_s[s][link.label] += 1\n\n # COMPUTE c_l_given_so\n if link.get_target_node().is_data_node():\n # no need to estimate this prob, since it will be result from semantic labeling\n pass\n else:\n if (s, o) not in self.c_l_given_so:\n self.c_l_given_so[(s, o)] = {}\n if link.label not in self.c_l_given_so[(s, o)]:\n self.c_l_given_so[(s, o)][link.label] = 0\n self.c_l_given_so[(s, o)][link.label] += 1\n\n # COMPUTE c_n\n for n in g.iter_nodes():\n if n.label not in self.c_n:\n self.c_n[n.label] = 0\n self.c_n[n.label] += 1\n\n # cached\n self.p_critical_l_given_s = {}\n for s, counts in self.c_l_given_s.items():\n l, c_l = max(counts.items(), key=lambda x: x[1])\n self.p_critical_l_given_s[s] = (l, c_l / self.c_n[s])", "def prepare_graph(label, nodes, edges, graphID):\n features = {'label': label}\n\n G = nx.DiGraph()\n nodes[\"id\"] = nodes[\"id\"].apply(lambda x : str(x))\n features['num_nodes'] = nodes.shape[0]\n op_node = None\n times = []\n friends = []\n followers = []\n for index, row in nodes.iterrows():\n G.add_node(row['id'], time=row['time'], friends=row['friends'], followers = row['followers'])\n times.append(row['time'])\n friends.append(2**row['friends'])\n followers.append(2**row['followers'])\n if row['time'] == 0:\n features['poster_friend_cnt'] = 2**row['friends']\n features['poster_follower_cnt'] = 2**row['followers']\n tweeter_id = row['id']\n op_node = row['id']\n features['avg_time'] = np.mean(times)\n features['avg_friends'] = np.mean(friends)\n features['avg_followers'] = np.mean(followers)\n features['max_followers'] = max(followers)\n features['max_friends'] = max(friends)\n features['friends_25th_percentile'] = np.percentile(friends, 25)\n features['friends_75th_percentile'] = np.percentile(friends, 75)\n features['followers_25th_percentile'] = np.percentile(followers, 25)\n features['followers_75th_percentile'] = np.percentile(followers, 75)\n node_list = []\n edge_count = 0\n for pair in edges:\n node1, node2 = pair.split()[0], pair.split()[1]\n node_list.append(node1)\n node_list.append(node2)\n G.add_edge(node1, node2)\n edge_count += 1\n features['num_edges'] = edge_count\n sum_users_without_followers = sum([1 for (node, val) in G.in_degree() if val==0])\n features['ratio_users_w/out_followers'] = sum_users_without_followers / len(G.nodes)\n features['num_connected_components'] = nx.number_strongly_connected_components(G)\n features['number_of_OPs_followers_who_retweeted'] = G.in_degree(tweeter_id)\n features['percentage_of_OPs_followers_who_retweeted'] = G.in_degree(tweeter_id) / features['poster_follower_cnt']\n features['avg_clustering'] = nx.average_clustering(G)\n features['op_clustering'] = nx.clustering(G,op_node)\n features['transitivity'] = nx.transitivity(G)\n node_list = list(set(node_list))\n features['nodeID_list'] = np.array(node_list)\n features['graph_id'] = graphID\n return features, G", "def make_complete_graph(num_nodes):\n xgraph = {} #Create a Blank Dict\n if num_nodes - 1 < 0: # checks to see if the num_nodes is less then 0 (negative number) if it is return empty graph (dict). Could probably combine the If statments for negative nodes and 1 node together\n return xgraph\n if num_nodes - 1 == 0: # If the number of nodes is 1 or returns a one node dict because there are no edges to compute\n xgraph[0] = set([]) # creates a dict that represents a single node graph as per the requirement\n return xgraph # the empty Graph\n else:\n for base_node in range(num_nodes): # This portion starts the build phase. for each node it will compute the theretical maximum amount of edges\n xlist = set([]) # defines an empty list. We first build a list for each node and the append to a dict. This list is erased with each iteration\n #print base_node # testing - REMOVE\n for edge_node in range(num_nodes):\n #print edge_node # testing - REMOVE\n if edge_node != base_node: #No Looping is allowed for this project. Therefor we check to insure the we are not counting a self node connection (edge_node NOT equal base_node)\n xlist.add(edge_node) # Populating list that will be added to dict\n\n xgraph[base_node] = xlist # Appending created list to the dict\n\n return xgraph # returning populated dict", "def make_complete_graph(num_nodes):\n xgraph = {} #Create a Blank Dict\n if num_nodes - 1 < 0: # checks to see if the num_nodes is less then 0 (negative number) if it is return empty graph (dict). Could probably combine the If statments for negative nodes and 1 node together\n return xgraph\n if num_nodes - 1 == 0: # If the number of nodes is 1 or returns a one node dict because there are no edges to compute\n xgraph[0] = set([]) # creates a dict that represents a single node graph as per the requirement\n return xgraph # the empty Graph\n else:\n for base_node in range(num_nodes): # This portion starts the build phase. for each node it will compute the theretical maximum amount of edges\n xlist = set([]) # defines an empty list. We first build a list for each node and the append to a dict. This list is erased with each iteration\n #print base_node # testing - REMOVE\n for edge_node in range(num_nodes):\n #print edge_node # testing - REMOVE\n if edge_node != base_node: #No Looping is allowed for this project. Therefor we check to insure the we are not counting a self node connection (edge_node NOT equal base_node)\n xlist.add(edge_node) # Populating list that will be added to dict\n\n xgraph[base_node] = xlist # Appending created list to the dict\n\n return xgraph # returning populated dict", "def gen_nodes(modelfile, starting_genes):\n # read json file with final model variables\n shape, top_genes, weights, output_key, biases = read_json(modelfile)\n\n # initialize database\n database = db.Database()\n\n # create list to store all layers\n NN = []\n\n # get input probe sequences\n input_seqs_df = inputs.probes_df(top_genes)\n # each layer is a dictionary with keys as names of strands and values as a list of seqs\n l_0 = {}\n probe_seqs = []\n for probe in input_seqs_df[\"Probe Sequences\"]:\n index = 0\n size = database.size\n while database.size < size + 1:\n try:\n database.database_insert(Seq(probe[index]))\n index += 1\n # except block handles case that NONE of the probe sequences were accepted into the database\n # ***TEMPORARY FIX***\n except IndexError:\n index -= 1\n break\n probe_seqs.append(Seq(probe[index]))\n l_0[\"Probe Sequence\"] = probe_seqs\n print(\"Layer 0: \", l_0)\n NN.append(l_0)\n\n # add the tether and promotor to the database\n database.database_insert(starting_genes[\"Tether\"])\n database.database_insert(starting_genes[\"T7 Promoter\"])\n\n # generate all the sequences for every node in each layer\n for layer in range(1, len(shape)):\n # add the cage and tether sequences to the layer dictionary\n l_i = {}\n l_i[\"Cage Sense\"] = [starting_genes[\"Cage Sense\"]] * shape[layer]\n l_i[\"Cage Antisense\"] = [starting_genes[\"Cage Antisense\"]] * shape[layer]\n l_i[\"Tether\"] = [starting_genes[\"Tether\"]] * shape[layer]\n\n print(\"getting anchor strands\")\n tether_length = len(starting_genes[\"Tether\"])\n size = database.size\n # generate anchor strands until all of them have been accepted into the database\n while database.size < size + shape[layer]:\n anchor = oligo.oligo(tether_length)\n database.database_insert(anchor)\n anchor_seqs = [Seq(x) for x in database.contents['Strand'][size:]]\n print(\"DONE\")\n\n print(\"getting transcription factors\")\n threshold_energy = 9 # variable that can be changed, pos integer, see gen_tf for description\n static_tf_seqs = []\n tf_seqs = []\n for anchor in anchor_seqs:\n static_tf, tf = gen_tf(anchor, starting_genes[\"Tether\"], threshold_energy)\n static_tf_seqs.append(static_tf)\n tf_seqs.append(tf)\n print(\"DONE\")\n\n print(\"getting outputs\")\n output_length = 25 # length of dna transcript from one node\n size = database.size\n while database.size < size + shape[layer]:\n output = oligo.oligo(output_length).sequence\n database.database_insert(output)\n transcript_seqs = [Seq(x) for x in database.contents['Strand'][size:]]\n print(\"DONE\")\n\n # assemble longer strands in the node\n l_i[\"Static TF + Transcript Sense\"] = [static_tf_seqs[i] + starting_genes[\"T7 Promoter\"] + transcript_seqs[i]\n for i in range(shape[layer])]\n l_i[\"Transcript Antisense + Anchor\"] = [\n oligo.complement(transcript_seqs[i]) + oligo.complement(starting_genes[\"T7 Promoter\"]) + anchor_seqs[i] for\n i in range(shape[layer])]\n\n # intermediates are the strands that determine weights in toehold-mediated displacement\n print(\"getting intermediate\")\n toe_length = 20 # standard length for all toehold sequences\n # get the 2D matrix for this layer and round the values to one decimal place\n weight_matrix = np.array(weights[layer - 1])\n weight_matrix = np.round(weight_matrix, 1)\n intermediate_seqs = []\n tf_appendage_seqs = []\n for i in range(shape[layer - 1]):\n if layer == 1:\n output = NN[0][\"Probe Sequence\"][i]\n else:\n output = NN[layer - 1][\"Static TF + Transcript Sense\"][i][-output_length:]\n inters = []\n top_toe = output[:toe_length]\n b_dom = output[toe_length:]\n tf_appendage_seqs.append(b_dom)\n # get all the possible sequences for toehold weights between 0 and 1\n weight_dict = quant.find_quanta(top_toe)\n for j in range(shape[layer]):\n w = weight_matrix[j, i]\n tf = tf_seqs[j]\n a_star_tf = tf[:len(tf) // 2]\n if w < 0:\n # negative weights\n inters.append(a_star_tf + oligo.complement(b_dom) + weight_dict[w * -1])\n else:\n # positive weights\n inters.append(oligo.complement(a_star_tf) + oligo.complement(b_dom) + weight_dict[w])\n\n intermediate_seqs.append(inters)\n # each list in the nested list is for one node in the layer, get nodes row-wise\n l_i[\"Intermediate\"] = np.array(intermediate_seqs).T.tolist()\n print(\"DONE\")\n\n # TF and TF Inhibitor are products of toehold-mediated displacement for pos and neg weights, respectively\n full_tf_seqs_2D = []\n attack_seqs_2D = []\n for tf in tf_seqs:\n full_tf_seqs = []\n attack_seqs = []\n for appendage in tf_appendage_seqs:\n full_tf_seq = appendage + tf\n attack_seq = appendage + oligo.complement(tf[:len(tf) // 2])\n full_tf_seqs.append(full_tf_seq)\n attack_seqs.append(attack_seq)\n full_tf_seqs_2D.append(full_tf_seqs)\n attack_seqs_2D.append(attack_seqs)\n l_i[\"TF\"] = full_tf_seqs_2D\n l_i[\"TF Inhibitor\"] = attack_seqs_2D\n\n print(\"Layer {}: \".format(layer), l_i)\n # add the completed layer to the NN list\n NN.append(l_i)\n\n return NN", "def build_graph(nodes):\n\n job_instances_map = {}\n\n # first create node structure\n nodes_map = {}\n root_nodes = []\n for node in nodes:\n new_node = JobGraphNode(node, job_instances_map)\n nodes_map[node.id] = new_node\n # check if it is root node\n try:\n node.relationships.next()\n except StopIteration:\n root_nodes.append(new_node)\n\n # then set relationships\n for _, child in nodes_map.iteritems():\n for relationship in child.cfy_node.relationships:\n parent = nodes_map[relationship.target_node.id]\n parent.add_child(child)\n child.add_parent(parent)\n\n return root_nodes, job_instances_map", "def create_initial_graph(self):\n # Initialise weights\n for link in self.gene_links:\n link.weight = random.uniform(weight_init_min, weight_init_max)\n # Initialise biases\n for node in self.gene_nodes:\n node.bias = random.uniform(bias_init_min, bias_init_max)\n if node.can_modify:\n node.act_func = self.act_set.get_random_activation_func()\n if node.act_func in [activations.gaussian, activations.sin]:\n if node.act_func.__name__[0] == \"g\":\n node.freq += random.uniform(-guass_freq_adjust, guass_freq_adjust)\n elif node.act_func.__name__[0] == \"s\":\n node.freq += random.uniform(-sin_freq_adjust, sin_freq_adjust)\n node.amp += random.uniform(-func_amp_adjust, func_amp_adjust)\n node.vshift += random.uniform(-func_vshift_adjust, func_vshift_adjust)", "def build_graph(self):\n for node in self.nodes:\n self.graph.add_node(node.id, node_obj=node)\n edges = []\n for i in range(0, len(self.nodes)):\n for j in range(i+1, len(self.nodes)):\n if (self.nodes[i].distance(self.nodes[j]) < self.radio_range):\n edges.append((self.nodes[i].id, self.nodes[j].id,1))\n self.graph.add_weighted_edges_from(edges)", "def build_graph(self):\n raise NotImplementedError", "def __initialize_connection_strengths(G):\n G_prime = G.__deepcopy__() # construct a deepcopy of the graph\n # for every vertex in the graph, initialize the connection strength to zero\n for node in G_prime.get_nodeset(): node.add_attribute(StoerWagner.CONNECTION_STRENGTH_ATTRIBUTE, float(0))\n return G_prime # return the new graph", "def build(self):\n self.logger.info('Rebuilding adjacency information')\n self.edges = collections.defaultdict(list)\n\n topic_to_publisher = collections.defaultdict(list)\n topic_to_subscribers = collections.defaultdict(list)\n node_to_missing_deps = collections.defaultdict(list)\n\n result = True\n\n for node in self.nodes.values():\n for topic in node.provided_topics.keys():\n topic_to_publisher[topic].append(node)\n\n for topic in node.required_topics:\n topic_to_subscribers[topic].append(node)\n\n for dep in node.additional_dependencies:\n if dep not in self.nodes:\n node_to_missing_deps[node].append(dep)\n\n if len(node_to_missing_deps) > 0:\n result = False\n msg = io.StringIO()\n print('Found [{}] managed processes with missing dependencies'.format(len(node_to_missing_deps)), file=msg)\n fmt = ' Managed process [{}] is missing [{}]'\n\n for (node, missing) in node_to_missing_deps.items():\n print(fmt.format(node.name, ', '.join(missing)), file=msg)\n self.logger.error(msg.getvalue())\n\n missing_publishers = []\n for topic in topic_to_subscribers.keys():\n if topic not in topic_to_publisher:\n missing_publishers.append(topic)\n\n if len(missing_publishers) > 0:\n result = False\n msg = io.StringIO()\n print('Found [{}] topics that do not have publishers'.format(len(missing_publishers)), file=msg)\n fmt = ' Topic [{}] with subscribers [{}]'\n\n for topic in missing_publishers:\n print(fmt.format(topic, ', '.join([x.name for x in topic_to_subscribers[topic]])), file=msg)\n self.logger.error(msg.getvalue())\n\n if not result:\n self.logger.error('Found errors when building adjacency information')\n raise GraphBuildError(\n 'Found errors when building adjacency information / graph edges. Check log for details')\n\n # Now we have enough information to build our edges. Phase 1: pub/sub stuff\n for (topic, subscribers) in topic_to_subscribers.items():\n publishers = topic_to_publisher[topic]\n\n for p in publishers:\n for s in subscribers:\n self.edges[p].append(s)\n\n # Phase 2: additional dependencies\n for node in self.nodes.values():\n for dep in node.additional_dependencies:\n src = self.nodes[dep]\n self.edges[src].append(node)", "def build_graph(self):\n self._build_model()\n if self.mode == 'train':\n self._build_train_op()", "def generate_random_graph(num_nodes):\n root = Node()\n nodes = set([root])\n edge_count = 0\n num_edges = int(math.log(num_nodes, 1.7)) * num_nodes\n\n for i in range(1, num_nodes):\n node = Node()\n node.edges.add(random.sample(nodes, 1)[0])\n nodes.add(node)\n edge_count += 1\n\n # Generate edges until \n for j in range(edge_count, num_edges):\n tail, head = random.sample(nodes, 2)\n while head in tail.edges:\n tail, head = random.sample(nodes, 2)\n tail.edges.add(head)\n edge_count += 1\n \n # Convert our graph to CSR representation by first creating an adjacency\n # matrix and then transforming it to a CSR\n\n # Generating adjacency matrix\n adjacency_matrix = [[0] * num_nodes for i in range(num_nodes)]\n sums = defaultdict(int)\n stack = [root]\n visited = set()\n while stack:\n curr = stack.pop()\n if curr not in visited:\n visited.add(curr)\n for node in curr.edges:\n stack.append(node)\n adjacency_matrix[curr.id][node.id] = 1.0\n sums[curr.id] += 1\n\n # Adjacency matrix -> CSR\n offset = 0\n csr = [[] for i in range(3)]\n nonzeros = np.nonzero(adjacency_matrix)\n last_row = -1\n for i in range(len(nonzeros[0])):\n row = nonzeros[0][i]\n col = nonzeros[1][i]\n outdegree = sums[row]\n if last_row != row:\n csr[1].append(offset)\n csr[0].append(adjacency_matrix[row][col] / outdegree)\n csr[2].append(col)\n offset += 1\n last_row = row\n csr[1].append(offset)\n\n # Write to txt and pickle\n with open(generate_filepath_txt(num_nodes), \"w\") as fp:\n fp.write(' '.join(str(i) for i in csr[0]) + '\\n')\n fp.write(' '.join(str(i) for i in csr[1]) + '\\n')\n fp.write(' '.join(str(i) for i in csr[2]))\n with open(generate_filepath_pickle(num_nodes), \"wb\") as fp:\n pickle.dump(csr, fp)", "def makeGraph(self):\n self.floorGraph = graph.Graph()\n file = open(\"edges.csv\")\n edges = file.readlines()\n for edge in edges:\n params = edge.split(\",\")\n self.floorGraph.addEdge(params[0],params[1],float(params[2]))\n self.floorGraph.addEdge(params[1],params[0],float(params[2]))", "def create_graph_domain():\n \n \"\"\"\n Fetch data\n \"\"\"\n \n from input.read_input import read_item_data\n df = read_item_data()\n df['item_id'] = df.index\n dct_title = df['title'].to_dict()\n dct_domain = df['domain_id'].to_dict()\n dct_cat= df['category_id'].to_dict()\n \n dct_price = df['price'].to_dict()\n \n \"\"\" Ratio stuff \"\"\" \n from input.create_ratio import get_ratio\n dct_ratio_dom = get_ratio(which='domain_id')\n \n ratio_df = get_ratio(which='item_id',full=True)\n ratio_df['popularity'] = 100.0*ratio_df['bought'] + ratio_df['searched']\n dct_ratio_item_b = ratio_df['popularity'].to_dict()\n \n \n \n \"\"\"\n JSON\n \n \"\"\"\n check = lambda x: x <= np.round(413163*0.8).astype(np.int32)\n \n DATA_PATH = path.join(DATA_DIR,'train_dataset.jl')\n line_i = 0\n \n \n\n \"\"\"\n Create graph vertices\n \"\"\"\n g = ig.Graph() \n from input.read_input import get_mappings\n counter, f_map_func, r_map_func = get_mappings()\n \n num_items = df.shape[0]\n for k in dct_title.keys():\n g.add_vertex(value=k,deg=dct_ratio_item_b[k],domain_id=dct_domain[k],price=dct_price[k],cat='item_id')\n\n \"\"\" ['item_id','domain_id','category_id','product_id'] \"\"\"\n \n for k in pd.unique(df['domain_id']):\n g.add_vertex(value=k,cat='domain_id')\n\n\n for k in pd.unique(df['category_id']):\n g.add_vertex(value=k,cat='category_id')\n\n\n for k in pd.unique(df['product_id']):\n g.add_vertex(value=k,cat='product_id')\n\n \n \n \"\"\"\n Create edges\n \"\"\"\n E1 = []\n E2 = []\n \n with jsonlines.open(DATA_PATH) as reader:\n for line_i, obj in enumerate(reader):\n if check(line_i):\n print(line_i)\n L = []\n for h in obj['user_history']:\n if h['event_type'] == 'view':\n #print(\"Viewed {}\".format(dct[h['event_info']]))\n L.append(h['event_info'])\n elif h['event_type'] == 'search':\n #print(\"Searched {}\".format(h['event_info']))\n pass\n L_domain = [dct_domain[k] for k in L]\n L_domain = pd.unique(L_domain)\n L_cat = [dct_cat[k] for k in L]\n L_cat = pd.unique(L_cat)\n \n for i in range(len(L)):\n E1.append(dct_domain[L[i]])\n E2.append(dct_domain[obj['item_bought']] )\n\n \n \n E1 = f_map_func['domain_id'](E1)\n E2 = f_map_func['domain_id'](E2)\n \n \n E = pd.Series(list(zip(E1,E2))).value_counts()\n g.add_edges(E.index)\n g.es[\"weight\"] = E.values\n \n \n g.write_pickle(fname=path.join(DATA_DIR,'graph_domain_to_domain.pkl'))", "def populate_graph(self):\n if self.edges and self.vertices:\n graph = Graph()\n for edge in self.edges:\n graph.add_edge(edge)\n self.graph = graph\n else:\n print(\"Populate edges & vertices first, then populate graph!\")", "def createWeightedGraph(contourdf, file_list, column_name):\r\n start_time_creating_weighted_graph = time.time()\r\n weights = np.full((len(contourdf)), 1) # initialize weights to one\r\n contourdf['weights'] = weights\r\n # group the dataframe to count path_length(number of nodes in the path)\r\n path_length_df = contourdf.groupby(['level', 'path']).size().reset_index(name='path_length')\r\n path_length_1_df = path_length_df[path_length_df['path_length'] == 1]\r\n cntr_data_weight_0 = contourdf[(np.isin(contourdf['level'], path_length_1_df['level'])) &\r\n (np.isin(contourdf['path'], path_length_1_df['path']))]\r\n cntr_data_weight_0['weights'] = 0\r\n cntr_data__weight_1 = contourdf[~(np.isin(contourdf['level'], path_length_1_df['level'])) |\r\n ~(np.isin(contourdf['path'], path_length_1_df['path']))]\r\n\r\n cntr_data_weight_1_diffrence = (cntr_data__weight_1.shift() - cntr_data__weight_1)\r\n cntr_data_weight_1_diffrence['calculated_weight'] = (np.sqrt(\r\n (cntr_data_weight_1_diffrence['node_x'].values) ** 2 + (\r\n cntr_data_weight_1_diffrence['node_y'].values) ** 2).tolist())\r\n\r\n cntr_data__weight_1['calculated_weight'] = cntr_data_weight_1_diffrence['calculated_weight'].tolist()\r\n cntr_data__weight_1['path_diff'] = cntr_data_weight_1_diffrence['path'].tolist()\r\n weight_list = cntr_data__weight_1['calculated_weight'].tolist()\r\n # for index,row in cntr_data__weight_1.iterrows():\r\n # if(row['path_diff'] != 0):\r\n # weight_list[index] = weight_list[index + 1]\r\n indices = cntr_data__weight_1.loc[cntr_data__weight_1['path_diff'] != 0]\r\n for index, row in indices.iterrows():\r\n weight_list[index] = weight_list[index + 1]\r\n cntr_data__weight_1['act2'] = weight_list\r\n cntr_data__weight_1['actual_weight'] = weight_list\r\n cntr_data__weight_1 = cntr_data__weight_1[['level', 'node_x', 'node_y', 'path', 'actual_weight']]\r\n cntr_data_weight_0['actual_weight'] = cntr_data_weight_0['weights']\r\n cntr_data_weight_0 = cntr_data_weight_0[['level', 'node_x', 'node_y', 'path', 'actual_weight']]\r\n weighted_df = pd.concat([cntr_data_weight_0, cntr_data__weight_1])\r\n weighted_df = weighted_df.sort_values(['level', 'path'])\r\n weighted_df['aggregated_weight'] = weighted_df.groupby(['level', 'path'])['actual_weight'].transform('sum')\r\n weighted_df = weighted_df[['level', 'node_x', 'node_y', 'path', 'aggregated_weight', 'actual_weight']]\r\n weighted_df['normalized'] = (weighted_df['aggregated_weight'] - weighted_df['aggregated_weight'].min()) / (\r\n weighted_df['aggregated_weight'].max() - weighted_df['aggregated_weight'].min())\r\n\r\n data = fetch_direction(file_list, column_name)\r\n\r\n data['node_x_1'] = data['longitude']\r\n data['node_y_1'] = data['latitude']\r\n\r\n weighted_df['node_x_1'] = weighted_df['node_x'] // 1\r\n weighted_df['node_y_1'] = weighted_df['node_y'] // 1\r\n\r\n merged_df = weighted_df.merge(data, how='left')\r\n merged_df = merged_df[['res_x', 'res_y', 'node_x_1', 'node_y_1']]\r\n\r\n weighted_df['res_dir_x'] = merged_df['res_x'].tolist()\r\n weighted_df['res_dir_y'] = merged_df['res_y'].tolist()\r\n\r\n weighted_df['res_dir_x_1'] = weighted_df['res_dir_x'] * weighted_df['actual_weight']\r\n weighted_df['res_dir_y_1'] = weighted_df['res_dir_y'] * weighted_df['actual_weight']\r\n\r\n weighted_df['res_dir_x_1'] = weighted_df.groupby(['level', 'path'])['res_dir_x_1'].transform('sum') / weighted_df[\r\n 'aggregated_weight']\r\n weighted_df['res_dir_y_1'] = weighted_df.groupby(['level', 'path'])['res_dir_y_1'].transform('sum') / weighted_df[\r\n 'aggregated_weight']\r\n\r\n weighted_df['resultant'] = weighted_df['res_dir_x_1'] + weighted_df['res_dir_y_1']\r\n weighted_df['mag'] = np.sqrt(np.square(weighted_df['res_dir_x_1']) + np.square(weighted_df['res_dir_y_1']))\r\n\r\n print(\"For creating a weighted graph %s seconds\" % (time.time() - start_time_creating_weighted_graph))\r\n\r\n return weighted_df", "def get_data(nodes=[]):\n\n # get nodes\n if not nodes:\n nodes = mc.ls(sl=1)\n\n # decipher if the nodes are constraints themselves or are driven by constraints\n nodes = mc.ls(nodes)\n constraints = [n for n in nodes if mc.nodeType(n) in constraint_types]\n non_con_nodes = [n for n in nodes if n not in constraints]\n constraints.extend(utils.get_constraints(non_con_nodes))\n\n data = {}\n\n for constraint in constraints:\n\n # get driven target nodes\n ntype = mc.nodeType(constraint)\n constraint_func = get_constraint_func(ntype)\n driven = mc.listConnections(constraint+'.constraintParentInverseMatrix') or []\n drivers = constraint_func(constraint, q=1, tl=1)\n\n if not ntype in constraint_types or not driven or not drivers:\n continue\n\n driven = list(set(driven))\n weight_alias_list = constraint_func(constraint, q=1, wal=1)\n\n con_data = {\n 'con_type': ntype,\n 'drivers': drivers,\n 'driven': driven,\n 'weight_list': [mc.getAttr(constraint+'.'+w) for w in weight_alias_list]\n }\n\n # Create dict entry for constrant types with upvectors\n if ntype in ['aimConstraint', 'tangentConstraint', 'normalConstraint']:\n\n aim = constraint_func(constraint, q=1, aim=1)\n upv = constraint_func(constraint, q=1, u=1)\n wupv = constraint_func(constraint, q=1, wu=1)\n wut = constraint_func(constraint, q=1, wut=1)\n wuo = constraint_func(constraint, q=1, wuo=1)\n\n if type(wuo) == list:\n wuo = wuo[0]\n\n con_data['aim'] = aim\n con_data['u'] = upv\n con_data['wu'] = wupv\n con_data['wut'] = wut\n con_data['wuo'] = wuo\n\n if mc.objExists(constraint+'.interpType'):\n con_data['interp_type'] = mc.getAttr(constraint+'.interpType')\n\n data[constraint] = con_data\n\n return data", "def build_square_test_graph_with_costs(directed=False):\n if directed:\n graph = DirectedGraph()\n else:\n graph = UndirectedGraph()\n\n graph.new_node()\n graph.new_node()\n graph.new_node()\n graph.new_node()\n graph.new_edge(1, 2, 2)\n graph.new_edge(1, 4, 10)\n graph.new_edge(2, 3, 3)\n graph.new_edge(3, 4, 1)\n\n return graph", "def get(self):\n self.network = gt.load_graph(self.dotfile)\n\n if self.strongcomponent:\n self.network=gt.extract_largest_component(\n self.network, directed=True, prune=True)\n\n if self.removeselfloops:\n gt.remove_self_loops(self.network)\n\n self.nm = self.network.new_vertex_property(\"string\")\n nm2 = self.network.new_vertex_property(\"string\")\n self.hl = self.network.new_vertex_property(\"bool\")\n self.network.vertex_properties[\"text\"] = self.nm\n self.network.vertex_properties[\"text\"] = nm2\n names=[]\n for v in self.network.vertices():\n if v.out_degree() > -1:\n self.nm[v]=self.short_name(\n self.network.vp.vertex_name[v],self.preflen)\n nm2[v]=self.short_name(\n self.network.vp.vertex_name[v],self.preflen)\n self.hl[v]=False\n else:\n nm2[v]=self.short_name(\n self.network.vp.vertex_name[v],self.preflen)\n self.nm[v]=''\n self.hl[v]=False\n names=names+[nm2[v]]\n\n NAMES=pd.Series(list(set(names)),\n name='varclass').reset_index().set_index('varclass')\n self.varclass = self.network.new_vertex_property(\"float\")\n self.network.vertex_properties[\"varclass\"] = self.varclass\n for v in self.network.vertices():\n self.varclass[v]=NAMES.loc[nm2[v]].values[0]\n\n self.od = self.network.new_vertex_property(\"float\")\n self.network.vertex_properties[\"size\"] = self.od\n for v in self.network.vertices():\n self.od[v]=self.f(v.out_degree(),\n A=self.minsize,\n E=self.exponentialscaling,\n exponent=self.exponent)+5\n self.ods = self.network.new_vertex_property(\"float\")\n self.network.vertex_properties[\"size\"] = self.ods\n for v in self.network.vertices():\n self.ods[v]=1*self.f(v.out_degree(),\n A=self.minsize,\n E=self.exponentialscaling,\n exponent=1)+2\n\n self.ew = self.network.new_edge_property(\"float\")\n self.network.edge_properties[\"eweight\"] = self.ew\n for e in self.network.edges():\n self.ew[e]=float(self.network.ep.weight[e])**1\n\n self.ew_pen = self.network.new_edge_property(\"float\")\n self.network.edge_properties[\"eweight_pen\"] = self.ew_pen\n for e in self.network.edges():\n self.ew_pen[e]=4/(1 + np.exp(-.05-np.fabs(float(self.network.ep.weight[e]))))\n\n self.e_marker = self.network.new_edge_property(\"string\")\n self.network.edge_properties[\"e_marker\"] = self.e_marker\n for e in self.network.edges():\n if float(self.network.ep.weight[e]) < 0:\n self.e_marker[e]='bar'\n else:\n self.e_marker[e]='arrow'\n\n self.deg = self.network.degree_property_map(\"out\")\n\n self.ecol = self.network.new_edge_property(\"vector<double>\")\n self.network.edge_properties[\"ecol\"] = self.ecol\n for e in self.network.edges():\n col=cm.ScalarMappable(mpl.colors.Normalize(vmin=-self.edgecollim,\n vmax=self.edgecollim),\n cmap=self.edgecolmap).to_rgba(float(self.ew[e]))\n col=list(col)\n col[3]=self.edgealpha\n self.ecol[e]=tuple(col)\n\n self.pos = gt.graphviz_draw(self.network,\n overlap=False,\n vsize=20,\n sep=self.nodesep,\n output=None)\n\n self.control = self.network.new_edge_property(\"vector<double>\")\n for e in self.network.edges():\n d = np.sqrt(np.sum((self.pos[e.source()].a\n - self.pos[e.target()].a) ** 2))\n d=d/2\n self.control[e] = [0.0,0.0,0, .2*d, 0.5, d,1,0]\n\n if self.outfile is not None:\n gt.graph_draw(self.network,nodesfirst=False,\n pos=self.pos,\n vertex_halo=self.hl,\n vertex_halo_color=[.2,.2,.2,.1],\n edge_pen_width=self.ew_pen,\n edge_end_marker=self.e_marker,\n vorder=self.deg,\n edge_marker_size=10,\n vertex_color=self.varclass,#[.5,.5,.5,.3],\n edge_color=self.ecol,#[.5,.5,.5,.5],\n vertex_pen_width=1.5,\n vertex_size=self.od,\n vertex_text=self.nm,\n vcmap=(self.cmap,self.alpha),\n edge_control_points=self.control,\n vertex_fill_color=self.varclass,#deg,\n vertex_font_size=self.ods,\n vertex_text_color=[.1,.1,.1,.8],\n #vertex_text_position=0,\n output=self.outfile)", "def bgll(self, graph, node_count, min_mod, max_pass):\n\n #the belonging of the node\n bl = [i for i in range(node_count)]\n #the node's weight in community\n _in = [0.0] * node_count\n #the node's weight in graph\n _tot = []\n #total weight of a node, just a copy of _tot\n k = []\n #the total weight of the graph\n m = []\n\n #inital the in-param\n network = [[0.0] * node_count for n in range(node_count)]\n for node, tag, weight in graph:\n network[node][tag] = weight\n for node in network:\n k.append(sum(node))\n _tot = k[:]\n m = sum(k)\n #inital the in-param\n\n def modularity():\n \"\"\"\n This function mainly computes the modularity of the network\n Return:\n mod->the modularity value\n \"\"\"\n\n q = 0.0\n for idx in range(0, node_count):\n if _tot[idx] > 0.0:\n q += (_in[idx] / m - math.pow(_tot[idx] / m, 2))\n return q\n\n def modularity_gain(n, c, dnodecomm):\n \"\"\"\n This function mainly computes the modularity gain of a node moving\n Param:\n n->node id\n c->community id\n dnodecomm->the weight between the node and the community\n Return:\n gain->modularity gain\n \"\"\"\n\n totc = _tot[c]\n degc = k[n]\n return dnodecomm - (totc * degc) / m\n\n def neigh_comm(n):\n \"\"\"\n This function mainly computes the weight between the node and it's neighbour community\n Param:\n n->node id\n Return:\n nc->the map of the weight between the node and it's neighbour community\n nc=>{cid,weight}\n \"\"\"\n\n nc = {bl[n]: 0.0}\n for idx in range(0, node_count):\n neigh = idx\n ncomm = bl[neigh]\n nei_weight = network[n][idx]\n if (neigh != n) & (nei_weight > 0.0):\n if ncomm in nc:\n nc[ncomm] += nei_weight\n else:\n nc[ncomm] = nei_weight\n return nc\n\n def insert(n, c, dnodecomm):\n \"\"\"\n This function mainly get the effect of insert the node into community\n Param:\n n->node id\n c->community id\n dnodecomm->the weight between the node and the community\n \"\"\"\n\n _tot[c] += k[n]\n _in[c] += 2 * dnodecomm + network[n][n]\n bl[n] = c\n\n def remove(n, c, dnodecomm):\n \"\"\"\n This function mainly get the effect of remove the node off community\n Param:\n n->node id\n c->community id\n dnodecomm->the weight between the node and the community\n \"\"\"\n\n _tot[c] -= k[n]\n _in[c] -= 2 * dnodecomm + network[n][n]\n bl[n] = -1\n\n def detect():\n \"\"\"\n This function mainly detect the community of the graph.\n \"\"\"\n\n _pass_done = 0\n _improve = True\n new_mod = modularity()\n cur_mod = -999999999.0\n rl = random.sample(range(0, node_count), node_count)\n while _improve & (_pass_done < max_pass) & (new_mod - cur_mod > min_mod):\n cur_mod = new_mod\n _improve = False\n _pass_done += 1\n for node_tmp in rl:\n n = node_tmp\n nc = bl[n]\n ncomm = neigh_comm(n)\n remove(n, nc, ncomm[nc])\n best_c = nc\n best_l = 0.0\n best_incre = 0.0\n for c in ncomm:\n incre = modularity_gain(n, c, ncomm[c])\n if incre > best_incre:\n best_incre = incre\n best_c = c\n best_l = ncomm[c]\n insert(n, best_c, best_l)\n if best_c != nc:\n _improve = True\n new_mod = modularity()\n print new_mod\n\n detect()\n return bl", "def build_graph(self):\n self.import_tree(ZOO_PATH, self.import_zoo, self.verify_zoos)\n self.import_tree(WILD_PATH, self.import_wild, self.verify_wilds)\n self.import_tree(PANDA_PATH, self.import_redpanda, self.verify_pandas)\n self.import_tree(MEDIA_PATH, self.import_media, self.verify_media)", "def build_graph(self):\n self.graph = tf.Graph()\n temp_connections = self.connections\n with self.graph.as_default():\n operations = {}\n\n # create Variables for input vertices\n for neuron_id in self.input_neurons:\n self.inputs[neuron_id] = tf.get_variable(name=str(neuron_id), shape=(),\n initializer=tf.zeros_initializer)\n deletion = []\n while len(temp_connections) > 0:\n for neuron_id in deletion:\n temp_connections.pop(neuron_id, None)\n deletion = []\n keys = list(temp_connections)\n random.shuffle(keys)\n # create input & output vertices\n for neuron_id in temp_connections:\n input_neuron_ids = temp_connections[neuron_id]\n if self.check(input_neuron_ids, operations):\n # weights\n v_weights = tf.constant(self.weights[neuron_id])\n # input vertices\n v_inputs = []\n\n for input_neuron_id in input_neuron_ids:\n if self.is_input_neuron(input_neuron_id):\n vertex = self.inputs[input_neuron_id]\n else:\n vertex = operations[input_neuron_id]\n\n v_inputs.append(vertex)\n\n deletion.append(neuron_id)\n\n # multiply weights and inputs\n mul = tf.multiply(v_inputs, v_weights, str(neuron_id))\n # sum multiplied values\n sum = tf.reduce_sum(mul, name='sum_' + str(neuron_id))\n # apply activation function\n if self.is_output_neuron(neuron_id):\n activation = tf.sigmoid(sum, name=\"output\")\n else:\n activation = tf.nn.leaky_relu(sum, alpha=0.2, name=\"relu_\" + str(neuron_id))\n\n operations[neuron_id] = activation\n if self.is_output_neuron(neuron_id):\n self.output = activation\n return self.graph, self.inputs, self.output", "def compose_school_graph(school_type, N_classes, class_size, N_floors, \n\t\tstudent_p_children, student_p_parents, teacher_p_adults,\n\t\tteacher_p_children, r_teacher_conversation, r_teacher_friend):\n\tassert N_classes % 2 == 0, 'number of classes needs to be even'\n\n\tG = nx.MultiGraph()\n\n\n\t# add students and their household members as nodes to the graph\n\tfamily_member_counter, family_counter = generate_students(G, school_type, \n\t\t\t\t N_classes, class_size, student_p_children, student_p_parents)\n\n\t# assign students to classes based on their age\n\tassign_classes(G, school_type, class_size, N_classes, N_floors)\n\n\t# add teachers and their household members as nodes to the graph\n\tgenerate_teachers(G, school_type, N_classes, family_member_counter, \n\t\t\t\t\t family_counter, teacher_p_adults, teacher_p_children)\n\n\t# set all contacts between members of families\n\tset_family_contacts(G)\n\n\t# generate intra-class contacts between all students in the same class and\n\t# additional (closer) contacts between table neighbours\n\tset_student_student_intra_class_contacts(G, N_classes)\n\n\t# add short (conversations) and long (meetings, friendships) contacts \n\t# between teachers and other teachers\n\tset_teacher_teacher_social_contacts(G, school_type, N_classes,\n\t\t\t\tr_teacher_conversation, r_teacher_friend)\n\n\t# generate the teacher teaching schedule based on the school type\n\tteacher_schedule = get_scheduler(school_type)(N_classes)\n\t# generate the student schedule based on whether or not there is daycare\n\t# for the given school type\n\tstudent_schedule = generate_student_schedule(school_type, N_classes,\n\t\t\t\t\t\tclass_size)\n\n\t# create teacher links due to team-teaching (currently only relevant for\n\t# lower secondary and upper secondary)\n\tset_teacher_teacher_teamteaching_contacts(G, school_type, teacher_schedule)\n\n\t# create links between teachers and students based on the teaching schedule\n\tset_teacher_student_teaching_contacts(G, school_type, N_classes, \n\t\tteacher_schedule, student_schedule)\n\n\t# generate links between teachers that supervise groups during daycare\n\t# together\n\tset_teacher_teacher_daycare_supervision_contacts(G, school_type, \n\t\tteacher_schedule)\n\n\t# create links between the teachers supervising the afternoon groups and\n\t# all students in the afternoon groups. Note: the information about \n\t# which students are in which afternoon group are taken from the student\n\t# schedule, because students are assigned to afternoon groups at random.\n\tset_teacher_student_daycare_supervision_contacts(G, school_type, N_classes, \n\t\tteacher_schedule, student_schedule)\n\n\t# add student contacts based on the groups they belong to druing the \n\t# afternoon daycare. Only relevant for schools with daycare\n\tset_student_student_daycare_contacts(G, school_type, student_schedule)\n\n\t#teacher_schedule = teacher_schedule.reset_index()\n\t#student_schedule = student_schedule.reset_index() \n\treturn G, teacher_schedule, student_schedule", "def generate_weighted_graph():\n \n Adj_Matrix = np.array([\n [0.0, 0.2, 0.2, 0.3, 0.2, 0.1],\n [0.1, 0.0, 0.3, 0.3, 0.1, 0.2],\n [0.3, 0.2, 0.0, 0.1, 0.2, 0.2],\n [0.1, 0.4, 0.2, 0.0, 0.2, 0.1],\n [0.2, 0.2, 0.2, 0.2, 0.0, 0.2],\n [0.2, 0.1, 0.1, 0.3, 0.3, 0.0]\n ])\n\n return Adj_Matrix", "def _instantiate_learning_graph(self, context=None):\n\n self.learningGraph = OrderedDict()\n self.learningExecutionGraph = OrderedDict()\n\n def build_dependency_sets_by_traversing_projections(sender_mech, process):\n\n # MappingProjections are legal recipients of learning projections (hence the call)\n # but do not send any projections, so no need to consider further\n from PsyNeuLink.Components.Projections.MappingProjection import MappingProjection\n if isinstance(sender_mech, MappingProjection):\n return\n\n # All other sender_mechs must be either a MonitoringMechanism or an ObjectiveMechanism with role=LEARNING\n elif not (isinstance(sender_mech, LearningMechanism) or\n (isinstance(sender_mech, ObjectiveMechanism) and sender_mech.role is LEARNING)):\n raise SystemError(\"PROGRAM ERROR: {} is not a legal object for learning graph;\"\n \"must be a LearningMechanism or an ObjectiveMechanism\".\n format(sender_mech))\n\n\n # MODIFIED 3/12/17 NEW:\n\n # MANAGE TARGET ObjectiveMechanism FOR INTERNAL or TERMINAL CONVERGENCE of PATHWAYS\n\n # If sender_mech is an ObjectiveMechanism, and:\n # - none of the mechanisms that project to it are are a TERMINAL mechanism for the current process, or\n # - all of the mechanisms that project to it already have an ObjectiveMechanism, then:\n # - do not include the ObjectiveMechanism in the graph;\n # - be sure that its outputState projects to the ERROR_SIGNAL inputState of a LearningMechanism\n # (labelled \"learning_mech\" here -- raise an exception if it does not;\n # - determine whether learning_mech's ERROR_SIGNAL inputState receives any other projections\n # from another ObjectiveMechanism or LearningMechanism (labelled \"error_signal_projection\" here)\n # -- if it does, be sure that it is from the same system and if so return;\n # (note: this shouldn't be true, but the test is here for completeness and sanity-checking)\n # - if learning_mech's ERROR_SIGNAL inputState does not receive any projections from\n # another objectiveMechanism and/or LearningMechanism in the system, then:\n # - find the sender to the ObjectiveMechanism (labelled \"error_source\" here)\n # - find the 1st projection from error_source that projects to the ACTIVATION_INPUT inputState of\n # a LearningMechanism (labelled \"error_signal\" here)\n # - instantiate a MappingProjection from error_signal to learning_mech\n # projected\n # IMPLEMENTATION NOTE: Composition should allow 1st condition if user indicates internal TARGET is desired;\n # for now, however, assuming this is not desired (i.e., only TERMINAL mechanisms\n # should project to ObjectiveMechanisms) and always replace internal\n # ObjectiveMechanism with projection from a LearningMechanism (if it is available)\n\n # FIX: RELABEL \"sender_mech\" as \"obj_mech\" here\n\n if isinstance(sender_mech, ObjectiveMechanism) and len(self.learningExecutionGraph):\n\n # TERMINAL CONVERGENCE\n # All of the mechanisms that project to sender_mech\n # project to another ObjectiveMechanism already in the learning_graph\n if all(\n any(\n (isinstance(receiver_mech, ObjectiveMechanism) and\n # its already in a dependency set in the learningExecutionGraph\n receiver_mech in set.union(*list(self.learningExecutionGraph.values())) and\n not receiver_mech is sender_mech)\n # receivers of senders to sender_mech\n for receiver_mech in [proj.receiver.owner for proj in\n mech.outputState.sendsToProjections])\n # senders to sender_mech\n for mech in [proj.sender.owner\n for proj in sender_mech.inputStates[SAMPLE].receivesFromProjections]):\n\n # Get the ProcessingMechanism that projected to sender_mech\n error_source_mech = sender_mech.inputStates[SAMPLE].receivesFromProjections[0].sender.owner\n\n # Get the other ObjectiveMechanism to which the error_source projects (in addition to sender_mech)\n other_obj_mech = next((projection.receiver.owner for projection in\n error_source_mech.outputState.sendsToProjections if\n isinstance(projection.receiver.owner, ObjectiveMechanism)), None)\n sender_mech = other_obj_mech\n\n # INTERNAL CONVERGENCE\n # None of the mechanisms that project to it are a TERMINAL mechanism\n elif not all(all(projection.sender.owner.processes[proc] is TERMINAL\n for proc in projection.sender.owner.processes)\n for projection in sender_mech.inputStates[SAMPLE].receivesFromProjections):\n\n # Get the LearningMechanism to which the sender_mech projected\n try:\n learning_mech = sender_mech.outputState.sendsToProjections[0].receiver.owner\n if not isinstance(learning_mech, LearningMechanism):\n raise AttributeError\n except AttributeError:\n raise SystemError(\"{} does not project to a LearningMechanism in the same process {}\".\n format(sender_mech.name, process.name))\n\n from PsyNeuLink.Components.Mechanisms.AdaptiveMechanisms.LearningMechanisms.LearningAuxilliary \\\n import ACTIVATION_INPUT, ERROR_SIGNAL\n\n # Get the ProcessingMechanism that projected to sender_mech\n error_source_mech = sender_mech.inputStates[SAMPLE].receivesFromProjections[0].sender.owner\n\n # Get the other LearningMechanism to which the error_source projects (in addition to sender_mech)\n error_signal_mech = next((projection.receiver.owner for projection in\n error_source_mech.outputState.sendsToProjections if\n projection.receiver.name is ACTIVATION_INPUT), None)\n\n\n # Check if learning_mech receives an error_signal_projection\n # from any other ObjectiveMechanism or LearningMechanism in the system;\n # If it does, get the first one found\n error_signal_projection = next ((projection for projection\n in learning_mech.inputStates[ERROR_SIGNAL].receivesFromProjections\n if (isinstance(projection.sender.owner,(ObjectiveMechanism,\n LearningMechanism)) and\n not projection.sender.owner is sender_mech and\n self in projection.sender.owner.systems.values())), None)\n # If learning_mech receives another error_signal projection,\n # reassign sender_mech to the sender of that projection\n if error_signal_projection:\n if self.verbosePref:\n warnings.warn(\"Although {} a TERMINAL mechanism for the {} process, it is an \"\n \"internal mechanism for other proesses in the {} system; therefore \"\n \"its ObjectiveMechanism ({}) will be replaced with the {} LearningMechanism\".\n format(error_source_mech.name,\n process.name,\n self.name,\n sender_mech.name,\n error_signal_mech))\n sender_mech = error_signal_projection.sender.owner\n\n # FIX: FINISH DOCUMENTATION HERE ABOUT HOW THIS IS DIFFERENT THAN ABOVE\n if error_signal_mech is None:\n raise SystemError(\"Could not find projection to an {} inputState of a LearningMechanism for \"\n \"the ProcessingMechanism ({}) that projects to {} in the {} process\"\n \"\".format(ACTIVATION_INPUT,\n error_source_mech.name,\n sender_mech.name,\n process.name))\n else:\n mp = MappingProjection(sender=error_signal_mech.outputStates[ERROR_SIGNAL],\n receiver=learning_mech.inputStates[ERROR_SIGNAL],\n matrix=IDENTITY_MATRIX)\n if mp is None:\n raise SystemError(\"Could not instantiate a MappingProjection \"\n \"from {} to {} for the {} process\".\n format(error_signal_mech.name, learning_mech.name))\n\n sender_mech = error_signal_mech\n # MODIFIED 3/12/17 END\n\n\n # Delete any projections to mechanism from processes or mechanisms in processes not in current system\n for input_state in sender_mech.inputStates.values():\n for projection in input_state.receivesFromProjections:\n sender = projection.sender.owner\n system_processes = self.processes\n if isinstance(sender, Process):\n if not sender in system_processes:\n del projection\n elif not all(sender_process in system_processes for sender_process in sender.processes):\n del projection\n\n # If sender_mech has no projections left, raise exception\n if not any(any(projection for projection in input_state.receivesFromProjections)\n for input_state in sender_mech.inputStates.values()):\n raise SystemError(\"{} only receives projections from other processes or mechanisms not\"\n \" in the current system ({})\".format(sender_mech.name, self.name))\n\n for outputState in sender_mech.outputStates.values():\n\n for projection in outputState.sendsToProjections:\n receiver = projection.receiver.owner\n try:\n self.learningGraph[receiver].add(sender_mech)\n except KeyError:\n self.learningGraph[receiver] = {sender_mech}\n\n # Use toposort to test whether the added dependency produced a cycle (feedback loop)\n # Do not include dependency (or receiver on sender) in learningExecutionGraph for this projection\n # and end this branch of the traversal if the receiver has already been encountered,\n # but do mark for initialization\n # Notes:\n # * This is because it is a feedback connection, which introduces a cycle into the learningGraph\n # that precludes use of toposort to determine order of execution;\n # however, the feedback projection will still be used during execution\n # so the sending mechanism should be designated as INITIALIZE_CYCLE\n # * Check for receiver mechanism and not its tuple,\n # since the same mechanism can appear in more than one tuple (e.g., with different phases)\n # and would introduce a cycle irrespective of the tuple in which it appears in the learningGraph\n\n if receiver in self.learningExecutionGraph:\n # if receiver in self.learning_execution_graph_mechs:\n # Try assigning receiver as dependent of current mechanism and test toposort\n try:\n # If receiver already has dependencies in its set, add sender_mech to set\n if self.learningExecutionGraph[receiver]:\n self.learningExecutionGraph[receiver].add(sender_mech)\n # If receiver set is empty, assign sender_mech to set\n else:\n self.learningExecutionGraph[receiver] = {sender_mech}\n # Use toposort to test whether the added dependency produced a cycle (feedback loop)\n list(toposort(self.learningExecutionGraph))\n # If making receiver dependent on sender produced a cycle, remove from learningGraph\n except ValueError:\n self.learningExecutionGraph[receiver].remove(sender_mech)\n receiver.systems[self] = CYCLE\n continue\n\n else:\n # Assign receiver as dependent on sender mechanism\n try:\n # FIX: THIS WILL ADD SENDER_MECH IF RECEIVER IS IN GRAPH BUT = set()\n # FIX: DOES THAT SCREW UP ORIGINS?\n self.learningExecutionGraph[receiver].add(sender_mech)\n except KeyError:\n self.learningExecutionGraph[receiver] = {sender_mech}\n\n if not sender_mech.systems:\n sender_mech.systems[self] = MONITORING\n\n # Traverse list of mechanisms in process recursively\n build_dependency_sets_by_traversing_projections(receiver, process)\n\n # Sort for consistency of output\n sorted_processes = sorted(self.processes, key=lambda process : process.name)\n\n # This assumes that the first mechanism in process.monitoringMechanisms is the last in the learning sequence\n # (i.e., that the list is being traversed \"backwards\")\n for process in sorted_processes:\n if process.learning and process._learning_enabled:\n build_dependency_sets_by_traversing_projections(process.monitoringMechanisms[0], process)\n\n # FIX: USE TOPOSORT TO FIND, OR AT LEAST CONFIRM, TARGET MECHANISMS, WHICH SHOULD EQUAL COMPARATOR MECHANISMS\n self.learningExecutionList = toposort_flatten(self.learningExecutionGraph, sort=False)\n # self.learningExecutionList = self._toposort_with_ordered_mech_tuples(self.learningExecutionGraph)\n\n # Construct monitoringMechanisms and targetMechanisms MechanismLists\n\n # MODIFIED 3/12/17 NEW: [MOVED FROM _instantiate_graph]\n self._monitoring_mech_tuples = []\n self._target_mech_tuples = []\n\n from PsyNeuLink.Components.Projections.MappingProjection import MappingProjection\n for item in self.learningExecutionList:\n if isinstance(item, MappingProjection):\n continue\n\n # If a learning_rate has been specified for the system, assign that to all LearningMechanisms\n # for which a mechanism-specific learning_rate has NOT been assigned\n if (isinstance(item, LearningMechanism) and\n self.learning_rate is not None and\n item.function_object.learning_rate is None):\n item.function_object.learning_rate = self.learning_rate\n\n mech_tuple = self._allMechanisms._get_tuple_for_mech(item)\n if not mech_tuple in self._monitoring_mech_tuples:\n self._monitoring_mech_tuples.append(mech_tuple)\n if isinstance(item, ObjectiveMechanism) and not mech_tuple in self._target_mech_tuples:\n self._target_mech_tuples.append(mech_tuple)\n self.monitoringMechanisms = MechanismList(self, self._monitoring_mech_tuples)\n self.targetMechanisms = MechanismList(self, self._target_mech_tuples)\n # MODIFIED 3/12/17 END\n\n # Instantiate TargetInputStates\n self._instantiate_target_inputs()", "def generate_computational_graph(RHS, schema):\n computational_graph=dict()\n for level in range(3):\n #use brute force to generate candidates for each level\n computational_graph[level]=[]\n if level== 0:\n for attribute in schema:\n if attribute !=RHS:\n computational_graph[level].append(set([attribute]))\n\n else:\n for element1 in computational_graph[level-1]:\n for element2 in computational_graph[0]:\n newelement = element1.union(element2)\n if newelement not in computational_graph[level]:\n if len(newelement)==level+1:\n computational_graph[level].append(newelement)\n\n return computational_graph", "def __init__(self, data=None, cliques=None, taxons=None, namesMap=None, **attr):\n \n if cliques is not None:\n if namesMap:\n nmap = namesMap.getMap()\n cliques = [ [ nmap[n] for n in nset ] for nset in cliques ]\n \n # prevent self-loops\n cliques = [ list(set(nset)) for nset in cliques ]\n \n # attributes dicts\n hyperb_weight = lambda ts: 1/(ts-1) \n e_attr_hyperbWeight=dict()\n e_attr_taxon=dict()\n e_attr_count=dict()\n \n # build edges from records\n if taxons is None: cliques_taxons = map( lambda c: (c,None), cliques)\n else: cliques_taxons = zip(cliques,taxons)\n for clique,taxon in cliques_taxons:\n teamsize=len(clique)\n edgesFromClique = itertools.combinations(clique,2)\n for e in edgesFromClique:\n e = tuple(sorted(e))\n e_attr_count[e] = e_attr_count.get(e,0)+1\n e_attr_taxon[e] = e_attr_taxon.get(e,[])+[taxon] if taxons is not None else None\n e_attr_hyperbWeight[e] = e_attr_hyperbWeight.get(e,0)+hyperb_weight(teamsize)\n \n edges = e_attr_count.keys()\n data = list(edges)\n \n super().__init__(incoming_graph_data=data,**attr)\n \n # insert nodes and set count attribute\n nodes_counts = Counter( col for clique in cliques for col in clique )\n nodes = nodes_counts.keys()\n \n self.add_nodes_from(nodes)\n networkx.set_node_attributes(self,values=nodes_counts,name='count')\n \n # set edges attributes\n networkx.set_edge_attributes(self,e_attr_count,'count')\n networkx.set_edge_attributes(self,e_attr_taxon,'taxons')\n networkx.set_edge_attributes(self,e_attr_hyperbWeight,'weight_hyperbolic')", "def _build_graph(self):\n self.X = tf.placeholder(tf.float32, [self.N, None])\n self.y = tf.placeholder(tf.float32, [self.C, None])\n\n self.W = tf.get_variable(\"W\", shape=[self.C, self.N], initializer=tf.truncated_normal_initializer)\n self.b = tf.get_variable(\"b\", shape=[self.C, 1], initializer=tf.zeros_initializer)\n\n self.z = tf.matmul(self.W, self.X) + self.b\n self.y_hat = tf.nn.softmax(self.z, dim=0)\n\n self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=self.z, dim=0))\n\n self.train = tf.train.AdamOptimizer().minimize(self.loss)\n\n self.correct_pred = tf.equal(tf.argmax(self.y, 0), tf.argmax(self.y_hat, 0))\n\n self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))\n\n self.model = tf.global_variables_initializer()", "def fetch_from_sqlite(self):\n conn = get_sqlite()\n c = conn.cursor()\n c.execute('SELECT * FROM vertices ORDER BY id')\n vertices =c.fetchall()\n c.execute('SELECT * FROM edges')\n edges =c.fetchall()\n conn.commit()\n\n self.graph.add_vertices(len(vertices))\n for one in vertices:\n id =int(one[0])\n self.graph.vs[id][\"name\"] = one[1]\n self.graph.vs[id][\"parent\"] = one[2]\n self.graph.vs[id][\"size\"] = one[3]\n self.graph.vs[id][\"last_modified\"] = one[4]\n self.graph.vs[id][\"last_accessed\"] = one[5]\n\n for one in edges:\n self.graph.add_edges([(one[0],one[1])])", "def draw_relation_graph(database_name, table_name, primary_key, group_name) -> Graph:\n\n nodes = []\n links = []\n disease_list = get_icd_diseasegroup_diseaseinfo(database_name, table_name, primary_key, group_name)[1]\n disease_list = disease_list.split(',')\n # print(disease_list)\n\n for disease in disease_list:\n disease_node = {\n \"name\": disease,\n \"symbolSize\": 50\n }\n\n if disease_node not in nodes:\n nodes.append(disease_node)\n\n gene_list = get_mesh_disease_info(database_name, 'mesh_gene', disease, 'DISEASE_ID')[1]\n gene_list = gene_list.split(',')\n for gene in gene_list:\n gene_node = {\n 'name': gene,\n 'symbolSize': 10\n }\n\n if gene_node not in nodes:\n nodes.append(gene_node)\n\n for gene in gene_list:\n links.append({\"source\": disease, \"target\": gene})\n\n print(nodes)\n print(links)\n\n c = (\n Graph(init_opts=opts.InitOpts(width=\"1440px\", height=\"900px\")).add(\"\", nodes, links, repulsion=3000)\n .set_global_opts(title_opts=opts.TitleOpts(title=\"gene-disease association network\"))\n )\n\n return c", "def _build_graph(self):\n start_t = time.time()\n self._setup_placeholders()\n self._embed()\n self._encode()\n self._match()\n self._fuse()\n self._decode()\n self._compute_loss()\n self._create_train_op()\n self.logger.info('Time to build graph: {} s'.format(time.time() - start_t))\n param_num = total_params(tf.trainable_variables())\n self.logger.info('There are {} parameters in the model'.format(param_num))", "def _generate_weighted_matrices(self):\n self.degree_weighted_matrices = dict()\n mes = []\n args = []\n for metaedge, matrix in self.adj_matrices.items():\n mes.append(metaedge)\n args.append({'matrix': matrix, 'w': self.w, 'degree_fwd': self.out_degree[metaedge],\n 'degree_rev': self.in_degree[metaedge]})\n res = parallel_process(array=args, function=mt.weight_by_degree, use_kwargs=True, n_jobs=self.n_jobs,\n front_num=0)\n for metaedge, matrix in zip(mes, res):\n self.degree_weighted_matrices[metaedge] = matrix", "def metis(W, levels, rid=None):\n # Function written by M. Defferrard, taken verbatim, from \n # https://github.com/mdeff/cnn_graph/blob/master/lib/coarsening.py#L34\n\n N, N = W.shape\n if rid is None:\n rid = np.random.permutation(range(N))\n parents = []\n degree = W.sum(axis=0) - W.diagonal()\n graphs = []\n graphs.append(W)\n #supernode_size = np.ones(N)\n #nd_sz = [supernode_size]\n #count = 0\n\n #while N > maxsize:\n for _ in range(levels):\n\n #count += 1\n\n # CHOOSE THE WEIGHTS FOR THE PAIRING\n # weights = ones(N,1) # metis weights\n weights = degree # graclus weights\n # weights = supernode_size # other possibility\n weights = np.array(weights).squeeze()\n\n # PAIR THE VERTICES AND CONSTRUCT THE ROOT VECTOR\n idx_row, idx_col, val = scipy.sparse.find(W)\n perm = np.argsort(idx_row)\n rr = idx_row[perm]\n cc = idx_col[perm]\n vv = val[perm]\n cluster_id = metis_one_level(rr,cc,vv,rid,weights) # rr is ordered\n parents.append(cluster_id)\n\n # TO DO\n # COMPUTE THE SIZE OF THE SUPERNODES AND THEIR DEGREE \n #supernode_size = full( sparse(cluster_id, ones(N,1) ,\n #\tsupernode_size ) )\n #print(cluster_id)\n #print(supernode_size)\n #nd_sz{count+1}=supernode_size;\n\n # COMPUTE THE EDGES WEIGHTS FOR THE NEW GRAPH\n nrr = cluster_id[rr]\n ncc = cluster_id[cc]\n nvv = vv\n Nnew = cluster_id.max() + 1\n # CSR is more appropriate: row,val pairs appear multiple times\n W = scipy.sparse.csr_matrix((nvv,(nrr,ncc)), shape=(Nnew,Nnew))\n W.eliminate_zeros()\n # Add new graph to the list of all coarsened graphs\n graphs.append(W)\n N, N = W.shape\n\n # COMPUTE THE DEGREE (OMIT OR NOT SELF LOOPS)\n degree = W.sum(axis=0)\n #degree = W.sum(axis=0) - W.diagonal()\n\n # CHOOSE THE ORDER IN WHICH VERTICES WILL BE VISTED AT THE NEXT PASS\n #[~, rid]=sort(ss); # arthur strategy\n #[~, rid]=sort(supernode_size); # thomas strategy\n #rid=randperm(N); # metis/graclus strategy\n ss = np.array(W.sum(axis=0)).squeeze()\n rid = np.argsort(ss)\n\n return graphs, parents", "def create_computational_graph(node):\n graph = CompGraph()\n graph.build_graph(node)\n return graph", "def gen_W(users, items, ratings):\n\n # initiate graph\n user = users.tolist()\n item = items.tolist()\n rating = ratings.tolist()\n B = nx.Graph()\n B.add_nodes_from(user, bipartite=0)\n B.add_nodes_from(item, bipartite=1)\n\n # create edges\n for i in range(len(user)):\n B.add_edges_from([(user[i], item[i])])\n B[user[i]][item[i]]['weight'] = rating[i]\n\n users_unique = users.unique() \n items_unique = items.unique()\n\n # BiAdjacency matrix - for bipartite network\n W = biadjacency_matrix(B, users_unique,items_unique).toarray()\n\n # sparce form of Biadjacency matrix\n W = spa.csr_matrix(W)\n print('Shape of W: '+str(W.shape))\n\n return W, users_unique, items_unique", "def create(self):\n\n self.init_nodes_db()\n\n # group data\n if len(self.depends) == 0:\n grouped = [((), self.data)]\n else:\n grouped = self.data.groupby(self.depends)\n\n # create all the pymc nodes\n for uniq_elem, grouped_data in grouped:\n if not isinstance(uniq_elem, tuple):\n uniq_elem = (uniq_elem,)\n\n # create new kwargs to pass to the new pymc node\n kwargs = self.kwargs.copy()\n\n # update kwarg with the right parent\n for name, parent in self.parents.items():\n kwargs[name] = parent.get_node(self.depends, uniq_elem)\n\n # get node name\n tag, subj_idx = self.create_tag_and_subj_idx(self.depends, uniq_elem)\n node_name = self.create_node_name(tag, subj_idx=subj_idx)\n\n # get value for observed node\n if self.observed:\n if self.pass_dataframe:\n kwargs[\"value\"] = grouped_data[\n self.col_name\n ] # .to_records(index=False)\n else:\n kwargs[\"value\"] = grouped_data[\n self.col_name\n ].values # .to_records(index=False)\n\n # Deterministic nodes require a parent argument that is a\n # dict mapping parent names to parent nodes. Knode wraps\n # this; so here we have to fish out the parent nodes from\n # kwargs, put them into a parent dict and put that back\n # into kwargs, which will make pm.Determinstic() get a\n # parent dict as an argument.\n if self.pymc_node is pm.Deterministic:\n parents_dict = {}\n for name, parent in self.parents.items():\n parents_dict[name] = parent.get_node(self.depends, uniq_elem)\n kwargs.pop(name)\n kwargs[\"parents\"] = parents_dict\n\n if self.observed:\n kwargs[\"parents\"][\"value\"] = kwargs[\"value\"]\n\n # Deterministic nodes require a doc kwarg, we don't really\n # need that so if its not supplied, just use the name\n if self.pymc_node is pm.Deterministic and \"doc\" not in kwargs:\n kwargs[\"doc\"] = node_name\n\n node = self.create_node(node_name, kwargs, grouped_data)\n\n if node is not None:\n self.nodes[uniq_elem] = node\n self.append_node_to_db(node, uniq_elem)", "def make_ws_graph(num_nodes, clockwise_neighbours, rewiring_prob):\r\n #initialize empty graph\r\n ws_graph = {}\r\n for vertex in range(num_nodes): ws_graph[vertex] = []\r\n #add each vertex to clockwise neighbours\r\n for vertex in range(num_nodes):\r\n for neighbour in range(vertex + 1, vertex + clockwise_neighbours + 1):\r\n neighbour = neighbour % num_nodes\r\n ws_graph[vertex] += [neighbour]\r\n ws_graph[neighbour] += [vertex]\r\n for vertex in range(num_nodes):\r\n for neighbour in ws_graph[vertex]:\r\n if random.random() < rewiring_prob:\r\n ws_graph[vertex].remove(neighbour)\r\n ws_graph[neighbour].remove(vertex)\r\n randNode = random.randint(0, num_nodes-1)\r\n while(vertex == randNode):\r\n randNode = random.randint(0, num_nodes - 1)\r\n ws_graph[vertex] += [randNode]\r\n ws_graph[randNode] += [vertex]\r\n\r\n\r\n return ws_graph\r\n #rewire each edge with probability rewiring_prob\r\n\r\n #consider each vertex\r\n\r\n #consider each neighbour\r\n\r\n #decide whether to rewire and join to a random node\r\n\r\n #update if necessary\r", "def nodes_from_dict(nd=None,**kwargs):\n\n if not nd:\n err_msg = \"ERROR: No nodes data provided\"\n print(err_msg)\n return 1\n \n nodes = []\n\n ####################\n #Create BUS objects#\n ####################\n busd = {}\n for i, row in nd[\"buses\"].iterrows():\n if row[\"active\"] and not pd.isnull(row[\"active\"]):\n logger.info(\"bus {} will be created\".format(row[\"label\"]))\n bus = solph.Bus(label=row[\"label\"])\n nodes.append(bus)\n busd[row[\"label\"]] = bus\n \n if row[\"excess\"] and not pd.isnull(row[\"excess\"]):\n # Automatically add Sink for curtailment (excess)\n # Add variable cost for excess cost --> minimise curtailment\n nodes.append(\n solph.Sink(\n label=row[\"label\"] + \"_excess\",\n inputs={\n busd[row[\"label\"]]:solph.Flow(\n variable_costs = row[\"excess costs\"]\n )\n },\n )\n )\n # Automatically add Source for shortage\n # Add variable cost for shortage --> minimize shortage\n if row[\"shortage\"] and not pd.isnull(row[\"shortage\"]):\n nodes.append(\n solph.Source(\n label = row[\"label\"] + \"_shortage\",\n outputs={\n busd[row[\"label\"]]:solph.Flow(\n variable_costs=row[\"shortage costs\"]\n )\n },\n )\n )\n ########################\n # Create Source objects#\n ########################\n for i, row in nd[\"commodity_sources\"].iterrows():\n if row[\"active\"] and not pd.isnull(row[\"active\"]):\n nodes.append(\n solph.Source(\n label=row[\"label\"],\n outputs={\n busd[row[\"to\"]]: solph.Flow(\n variable_costs = row[\"variable costs\"]\n )\n },\n )\n )\n ########################\n # Create Source objects with fixed time series from 'renewables' table\n ########################\n \"\"\"\n A source can represent a pv-system, a wind power plant, an import of natural gas or a slack variable to avoid creating an in-feasible model.\n While a wind power plant will have an hourly feed-in depending on the weather conditions the natural_gas import might be restricted by \n maximum value (nominal_value) and an annual limit (summed_max). As we do have to pay for imported gas we should set variable costs. \n Comparable to the demand series an fix is used to define a fixed the normalised output of a wind power plant. \n Alternatively, you might use max to allow for easy curtailment. The nominal_value sets the installed capacity.\n \"\"\"\n for i, row in nd[\"renewables\"].iterrows():\n if row[\"active\"] and not pd.isnull(row[\"active\"]):\n # set static outflow values\n outflow_args = {}\n\n # get time series for node and parameter\n for col in nd[\"timeseries\"].columns.values:\n if col.split(\".\")[0] == row[\"label\"]:\n outflow_args[col.split(\".\")[1]]=nd[\"timeseries\"][col]\n # outflow_args[\"fix\"]=nd[\"timeseries\"][col]\n \n # TODO add NON-CONVEX to outflow_args\n if row[\"capex\"] and not pd.isnull(row[\"capex\"]):\n # with investment mode, nominal_value must be None\n logger.info(\"Invest {} capacity\".format(row[\"label\"]))\n invest_args = {}\n if not row[\"epc_invest\"] or pd.isnull(row[\"epc_invest\"]):\n epc_invest = economics.annuity(row[\"capex\"],20,0.08)\n else:\n epc_invest=row[\"epc_invest\"]\n invest_args[\"ep_costs\"] = epc_invest\n\n if row[\"max\"] and not pd.isnull(row[\"max\"]):\n invest_args[\"maximum\"] = row[\"max\"]\n\n if row[\"min\"] and not pd.isnull(row[\"min\"]):\n invest_args[\"minimum\"]=row[\"min\"]\n\n if row[\"existing\"] and not pd.isnull(row[\"existing\"]):\n invest_args[\"existing\"]=row[\"existing\"]\n \n outflow_args[\"investment\"] = solph.Investment(**invest_args) \n else: \n outflow_args[\"nominal_value\"] = row[\"capacity\"]\n \n # create\n nodes.append(\n solph.Source(\n label=row[\"label\"],\n outputs = {\n busd[row[\"to\"]]:solph.Flow(**outflow_args)\n }\n )\n )\n #######################\n # Create Sink objects # \n #######################\n \"\"\"\n A sink is normally used to define the demand within an energy model but it can also be used to detect excesses.\n\n The example shows the electricity demand of the electricity_bus defined above.\n - 'nd['timeseries'][col]' should be sequence of normalised values\n - 'nominal_value' is the maximum demand the normalised sequence is multiplied with.\n - Giving 'nd['timeseries'][col]' as parameter 'fix' means that the demand cannot be changed by the solver. \n \n In contrast to the 'demand sink' the 'excess sink' has normally less restrictions but is open to take the whole excess.\n \"\"\"\n for i, de in nd[\"demand\"].iterrows():\n if de[\"active\"] and not pd.isnull(de[\"active\"]):\n # set static inflow values\n inflow_args = {\n \"nominal_value\":de[\"nominal value\"]\n }\n # get time series for node and parameter\n for col in nd[\"timeseries\"].columns.values:\n if col.split(\".\")[0]==de[\"label\"]:\n # inflow_args[col.split(\".\")[1]]=nd[\"timeseries\"][col]\n # TODO: veriry other key than 'fix'?????\n inflow_args[\"fix\"]=nd[\"timeseries\"][col] \n \n # Create Sink object and append to nodes\n nodes.append(\n solph.Sink(\n label=de[\"label\"],\n inputs={\n busd[de[\"from\"]]:solph.Flow(**inflow_args)\n }\n )\n )\n #############################\n # Create Transformer object #\n #############################\n \"\"\"\n An instance of the Transformer class can represent a node with multiple input and output flows such as:\n - a power plant\n - a transport line \n - or any kind of a transforming process as electrolysis, a cooling device or a heat pump. \n The efficiency has to be constant within one time step to get a linear transformation.\n You can define a different efficiency for every time step (e.g. the thermal powerplant efficiency according \n to the ambient temperature) but this series has to be predefined and cannot be changed within the optimisation.\n\n A condensing power plant can be defined by a transformer with one input (fuel) and one output (electricity)\n ```\n b_gas = solph.Bus(label='natural_gas')\n b_el = solph.Bus(label='electricity')\n solph.Transformer(\n label=\"pp_gas\",\n inputs={bgas: solph.Flow()},\n outputs={b_el: solph.Flow(nominal_value=10e10)},\n conversion_factors={electricity_bus: 0.58})\n ```\n\n A CHP power plant would be defined in the same manner but with two outputs:\n ```\n b_gas = solph.Bus(label='natural_gas')\n b_el = solph.Bus(label='electricity')\n b_th = solph.Bus(label='heat')\n\n solph.Transformer(\n label='pp_chp',\n inputs={b_gas: Flow()},\n outputs={b_el: Flow(nominal_value=30),\n b_th: Flow(nominal_value=40)},\n conversion_factors={b_el: 0.3, b_th: 0.4})\n ```\n A CHP power plant with 70% coal and 30% natural gas can be defined with two inputs and two outputs:\n ```\n b_gas = solph.Bus(label='natural_gas')\n b_coal = solph.Bus(label='hard_coal')\n b_el = solph.Bus(label='electricity')\n b_th = solph.Bus(label='heat')\n\n solph.Transformer(\n label='pp_chp',\n inputs={b_gas: Flow(), b_coal: Flow()},\n outputs={b_el: Flow(nominal_value=30),\n b_th: Flow(nominal_value=40)},\n conversion_factors={b_el: 0.3, b_th: 0.4,\n b_coal: 0.7, b_gas: 0.3})\n ```\n \"\"\"\n for i, row in nd[\"transformers\"].iterrows():\n if row[\"active\"] and not pd.isnull(row[\"active\"]):\n # set static inflow values\n inflow_args = {\n \"variable_costs\":row[\"variable input costs\"]\n }\n # inflow_args = {}\n outflow_args = {}\n # get time series for inflow transformer\n for col in nd[\"timeseries\"].columns.values:\n if col.split(\".\")[0]==row[\"label\"]:\n # inflow_args[col.split(\".\")[1]] = nd[\"timeseries\"][col]\n inflow_args[\"fix\"] = nd[\"timeseries\"][col]\n \n #TODO: multi inputs/outputs and add investment\n\n if row[\"capex inflow\"] and not pd.isnull(row[\"capex inflow\"]):\n logger.info(\"Invest {} inflow capacity\".format(row[\"label\"])) \n invest_args = {}\n invest_args[\"ep_costs\"] = economics.annuity(row[\"capex inflow\"],20,0.08)\n\n if row[\"max inflow\"] and not pd.isnull(row[\"max inflow\"]):\n invest_args[\"maximum\"] = row[\"max inflow\"]\n\n if row[\"min inflow\"] and not pd.isnull(row[\"min inflow\"]):\n invest_args[\"minimum\"] = row[\"min inflow\"]\n\n if row[\"existing inflow\"] and not pd.isnull(row[\"existing inflow\"]):\n invest_args[\"existing\"] = row[\"existing inflow\"]\n\n inflow_args[\"investment\"] = solph.Investment(**invest_args)\n else: \n outflow_args[\"nominal_value\"] = row[\"capacity\"] # should be specify capacity inflow or outflow\n\n # create\n nodes.append(\n solph.Transformer(\n label=row[\"label\"],\n inputs = {\n busd[row[\"from\"]]:solph.Flow(**inflow_args)\n },\n outputs={\n busd[row[\"to\"]]:solph.Flow(**outflow_args)\n },\n conversion_factors = {\n busd[row[\"to\"]]:row[\"efficiency\"]\n }\n )\n )\n ##################################\n # Create Transformer CHP objects #\n ##################################\n for i, row in nd[\"transformers_chp\"].iterrows():\n if row[\"active\"] and not pd.isnull(row[\"active\"]):\n\n inflow_args = {}\n outflow_elec_args = {}\n outflow_heat_args = {}\n\n inflow_args[\"variable_costs\"] = row[\"variable input costs\"]\n\n if row[\"capex elec\"] and not pd.isnull(row[\"capex elec\"]):\n logger.info(\"Invest {} inflow capacity\".format(row[\"label\"])) \n invest_args = {}\n invest_args[\"ep_costs\"] = economics.annuity(row[\"capex elec\"],20,0.08)\n if row[\"max elec\"] and not pd.isnull(row[\"max elec\"]):\n invest_args[\"maximum\"] = row[\"max elec\"]\n if row[\"min elec\"] and not pd.isnull(row[\"min elec\"]):\n invest_args[\"minimum\"] = row[\"min elec\"]\n if row[\"existing elec\"] and not pd.isnull(row[\"existing elec\"]):\n invest_args[\"existing\"] = row[\"existing elec\"]\n \n outflow_elec_args[\"investment\"] = solph.Investment(**invest_args)\n investment = solph.Investment(**invest_args)\n else:\n # inflow_args[\"nominal_value\"] = row[\"capacity_el\"]\n outflow_elec_args[\"nominal_value\"] = row[\"capacity_el\"]\n outflow_heat_args[\"nominal_value\"] = row[\"capacity_heat\"]\n\n # Create\n nodes.append(\n solph.Transformer(\n label = row[\"label\"],\n inputs ={\n busd[row[\"from\"]]:solph.Flow(**inflow_args)\n },\n outputs={\n busd[row[\"to_el\"]]:solph.Flow(**outflow_elec_args),\n busd[row[\"to_heat\"]]:solph.Flow(**outflow_heat_args)\n },\n conversion_factors={\n busd[row[\"to_el\"]]:row[\"efficiency_el\"],\n busd[row[\"to_heat\"]]:row[\"efficiency_heat\"]\n }\n )\n )\n\n ##########################\n # Create Storage objects #\n ##########################\n for i, row in nd[\"storages\"].iterrows():\n if row[\"active\"] and not pd.isnull(row[\"active\"]):\n\n inflow_args = {}\n outflow_args = {}\n\n if row[\"capex\"] and not pd.isnull(row[\"capex\"]):\n logger.info(\"Invest {} storage capacity\".format(row[\"label\"]))\n\n invest_args = {}\n invest_args[\"ep_costs\"] = economics.annuity(row[\"capex\"],20,0.08)\n if row[\"max\"] and not pd.isnull(row[\"max\"]):\n invest_args[\"maximum\"] = row[\"max\"]\n if row[\"min\"] and not pd.isnull(row[\"min\"]):\n invest_args[\"minimum\"] = row[\"min\"]\n if row[\"existing\"] and not pd.isnull(row[\"existing\"]):\n invest_args[\"existing\"] = row[\"existing\"]\n\n investment=solph.Investment(\n **invest_args\n )\n nominal_capacity=None\n \n #TODO add if row[\"capex inflow\"] and if row[\"capex outflow\"]\n #TODO read relation_capacity_inflow/outflow from excel\n \n else:\n investment = None\n nominal_capacity = row[\"nominal capacity\"] \n \n if row[\"capacity inflow\"] and row[\"capacity inflow ratio\"]:\n logger.error(\"{} is overdetermined, only capacity inflow or capacity inflow ratio shoul be set\".format(row[\"label\"]))\n return 1\n if row[\"capacity inflow\"]:\n inflow_args[\"nominal_value\"] = row[\"capacity inflow\"]\n if row[\"capacity inflow ratio\"]:\n capacity_inflow_ratio = row[\"capacity inflow ratio\"]\n else:\n capacity_inflow_ratio = None\n inflow_args[\"variable_costs\"] = row[\"variable input costs\"]\n\n \n if row[\"capacity outflow\"] and row[\"capacity outflow ratio\"]:\n logger.error(\"{} is overdetermined, only capacity outflow or capacity outflow ratio shoul be set\".format(row[\"label\"]))\n return 1\n if row[\"capacity outflow\"]:\n outflow_args[\"nominal_value\"] = row[\"capacity outflow\"]\n if row[\"capacity outflow ratio\"]:\n capacity_outflow_ratio = row[\"capacity outflow ratio\"]\n else:\n capacity_outflow_ratio = None\n\n outflow_args[\"variable_costs\"] = row[\"variable output costs\"]\n\n nodes.append(\n solph.components.GenericStorage(\n label=row[\"label\"],\n inputs = {\n busd[row[\"bus\"]]:solph.Flow(**inflow_args)\n },\n outputs = {\n busd[row[\"bus\"]]:solph.Flow(**outflow_args)\n },\n investment=investment,\n nominal_storage_capacity=nominal_capacity,\n loss_rate = row[\"capacity loss\"],\n initial_storage_level = row[\"initial capacity\"],\n max_storage_level=row[\"capacity max\"],\n min_storage_level=row[\"capacity min\"],\n invest_relation_input_capacity = capacity_inflow_ratio,\n invest_relation_output_capacity = capacity_outflow_ratio,\n inflow_conversion_factor = row[\"efficiency inflow\"],\n outflow_conversion_factor = row[\"efficiency outflow\"]\n )\n )\n #######################\n # Create Link objects #\n #######################\n \"\"\"\n A Link object with 1...2 inputs and 1...2 outputs\n Note: This component is experimental. Use it with care\n \"\"\"\n for i, p in nd[\"powerlines\"].iterrows():\n if p[\"active\"] and not pd.isnull(p[\"active\"]):\n bus1 = busd[p[\"bus_1\"]]\n bus2 = busd[p[\"bus_2\"]]\n nodes.append(\n solph.custom.Link(\n label = \"powerline\" + \"_\" + p[\"bus_1\"] + \"_\" + p[\"bus_2\"],\n inputs = {\n bus1:solph.Flow(),\n bus2:solph.Flow()\n },\n outputs = {\n bus1: solph.Flow(nominal_value = p[\"capacity\"]),\n bus2: solph.Flow(nominal_value=p[\"capacity\"]),\n },\n conversion_factors={\n (bus1,bus2):p[\"efficiency\"],\n (bus2,bus1):p[\"efficiency\"]\n }\n )\n ) \n return nodes", "def __build_graph(self):\n all_matches = self.___matches()\n # make new matrix of the student classes and their general studies matrix\n split_reqs = [reqs.split(', ') for reqs in all_matches['ALL'].as_matrix() ]\n rep_values = [line.count(\",\") + 1 for line in all_matches['ALL']]\n CLS = np.repeat(all_matches['FULL'].as_matrix(), rep_values )\n REQ = np.array(list(chain.from_iterable(split_reqs)))\n graph = pd.DataFrame([CLS, REQ]).T\n graph.columns = ['CLS','REQ']\n graph = graph.drop_duplicates()\n return graph", "def __init__(self, no_vertices=0):\r\n self.__neighbours = {}\r\n self.__cost = {}\r\n for i in range(no_vertices):\r\n self.__neighbours[i] = []", "def _build_graph(self):\n start_t = time.time()\n self._setup_placeholders()\n self._embed()\n self._encode()\n self._match()\n self._fuse()\n self._decode()\n self._passage_rank()\n self._compute_loss()\n self._create_train_op()\n self.logger.info('Time to build graph: {} s'.format(time.time() - start_t))\n param_num = sum([np.prod(self.sess.run(tf.shape(v))) for v in self.all_params])\n self.logger.info('There are {} parameters in the model'.format(param_num))", "def _build_graph(self):\n start_t = time.time()\n self.load_data()\n self.get_train_data()\n self.plot_data()\n self._setup_placeholders()\n self.lstm()\n self._compute_loss()\n self._create_train_op()\n self.logger.info('Time to build graph: {} s'.format(time.time() - start_t))\n param_num = sum([np.prod(self.sess.run(tf.shape(v))) for v in self.all_params])\n self.logger.info('There are {} parameters in the model'.format(param_num))", "def add_communites(self):\n\n query = '''\n MATCH (c1:)-[r:INTERACTS]->(c2:)\n RETURN c1.name, c2.name, r.weight AS weight\n '''\n ig = IGraph.TupleList(self.graph.run(query), weights=True)\n\n clusters = IGraph.community_walktrap(ig, weights=\"weight\").as_clustering()\n\n nodes = [{\"name\": node[\"name\"]} for node in ig.vs]\n for node in nodes:\n idx = ig.vs.find(name=node[\"name\"]).index\n node[\"community\"] = clusters.membership[idx]\n\n write_clusters_query = '''\n UNWIND {nodes} AS n\n MATCH (c:) WHERE c.name = n.name\n SET c.community = toInt(n.community)\n '''\n\n self.graph.run(write_clusters_query, nodes=nodes)", "def gexf_graph():\n # you must replace these lines and supply your own graph\n gexf = Gexf(\"author\", \"title\")\n mygraph = gexf.addGraph(\"undirected\", \"static\", \"A web network\")\n atr_type = mygraph.addNodeAttribute('Type', type='string')\n atr_id = mygraph.addNodeAttribute('id', type='string')\n atr_label = mygraph.addNodeAttribute('label', type='string')\n atr_color_r = mygraph.addNodeAttribute('color_r', type='string', defaultValue='0')\n atr_color_g = mygraph.addNodeAttribute('color_g', type='string', defaultValue='0')\n atr_color_b = mygraph.addNodeAttribute('color_b', type='string', defaultValue='0')\n k = 0\n for i in range(min_parts()):\n tmp = mygraph.addNode(set_num[i], name[i], r=\"0\", g=\"0\", b=\"0\")\n tmp.addAttribute(atr_type, \"set\")\n tmp.addAttribute(atr_id, set_num[i])\n tmp.addAttribute(atr_label, name[i])\n for j in range(len(Parts[i][\"Parts\"])):\n if mygraph.nodeExists(Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"])==0:\n temp = mygraph.addNode((Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"]), Parts[i][\"Parts\"][j][\"name\"], r=str(int(Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][0:2], 16)), g=str(int(Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][2:4], 16)), b=str(int(Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][4:6], 16)))\n temp.addAttribute(atr_type, \"part\")\n temp.addAttribute(atr_id, (Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"]))\n temp.addAttribute(atr_label, Parts[i][\"Parts\"][j][\"name\"])\n temp.addAttribute(atr_color_r, Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][0:2])\n temp.addAttribute(atr_color_g, Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][2:4])\n temp.addAttribute(atr_color_b, Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][4:6])\n mygraph.addEdge(str(k), set_num[i], (Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"]), weight=Parts[i][\"Parts\"][j][\"quantity\"])\n k = k+1\n output_file = open(\"bricks_graph.gexf\", \"wb\")\n gexf.write(output_file)\n return -1", "def add_graph(self):\n \n self.cd_sampling = None\n \n if \"CD\" in self.algorithm:\n\n self.add_cd_samples()\n \n if self.num_hidden ==0:\n \n self.cd_sampling = self.get_cd_samples()\n \n if \"CSS\" in self.algorithm and self.mf_steps > 0: \n \n self.add_mf_updates()\n \n elif \"CSS\" in self.algorithm and self.gibbs_steps > 0:\n \n self.add_cd_samples()\n \n if self.num_hidden ==0:\n \n self.cd_sampling = self.get_cd_samples() \n \n self.add_objective()\n\n self.add_grad_updates() \n \n if self.report_p_tilda:\n \n self.add_p_tilda()\n \n self.add_pseudo_cost_measure()\n\n self.optimize = self.optimization_step()", "def make_graph(VERTICES, CLUSTERS, DENSITY):\n CLUSTER_SIZE = VERTICES / CLUSTERS\n clusters = []\n\n for i in xrange(CLUSTERS):\n if i == CLUSTERS - 1:\n vertex_set = range(i * CLUSTER_SIZE, VERTICES)\n else:\n vertex_set = range(i * CLUSTER_SIZE, (i + 1) * CLUSTER_SIZE)\n\n cluster = make_connected(vertex_set, DENSITY * CLUSTER_SIZE)\n clusters += [cluster]\n\n graph = joinClusters(clusters)\n return graph", "def algorithm(df, params):\n\n output = {}\n\n # algorithm specific code\n # construct network analysis\n NW = Network(df, params['relations'])\n output['d3js'] = NW.export_json()\n output['gephi'] = NW.export_gephi()\n output['pajek'] = NW.export_pajek()\n output['assortativity'] = NW.assortativity()\n output['node_attributes'] = NW.node_attributes()\n output['edge_attributes'] = NW.edge_attributes()\n output['strong_components'] = NW.strong_components()\n output['weak_components'] = NW.weak_components()\n output['triads'] = NW.triads()\n\n # plot network\n pruned_network = NW.prune_network()\n output['div'] = plot.plot_network(pruned_network, params['layout'],\n params['relations'],\n title=params['relations']\n + ' Network graph of 500 nodes with highest degree centrality')\n\n return output", "def build_graph(self):\n n_classes = self.n_classes\n\n (self.feed('data')\n .conv(3, 3, 64, 1, 1, name='conv1_1', trainable=False)\n .conv(3, 3, 64, 1, 1, name='conv1_2', trainable=False)\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool1')\n .conv(3, 3, 128, 1, 1, name='conv2_1', trainable=False)\n .conv(3, 3, 128, 1, 1, name='conv2_2', trainable=False)\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool2')\n .conv(3, 3, 256, 1, 1, name='conv3_1')\n .conv(3, 3, 256, 1, 1, name='conv3_2')\n .conv(3, 3, 256, 1, 1, name='conv3_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool3')\n .conv(3, 3, 512, 1, 1, name='conv4_1')\n .conv(3, 3, 512, 1, 1, name='conv4_2')\n .conv(3, 3, 512, 1, 1, name='conv4_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool4')\n .conv(3, 3, 512, 1, 1, name='conv5_1')\n .conv(3, 3, 512, 1, 1, name='conv5_2')\n .conv(3, 3, 512, 1, 1, name='conv5_3'))\n\n self.compute_rDeRF() # dummy\n\n # Classification\n (self.feed('conv5_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool6')\n .reshape(shape=(-1, 7, 7, 512), name='pool6_reshape')\n .fc(4096, name='fc6')\n .dropout(0.5, name='drop6')\n .fc(4096, name='fc7')\n .dropout(0.5, name='drop7')\n # .make_time(name='drop7_reduced')\n .fc(n_classes, relu=False, name='cls_score')\n .softmax(name='cls_prob'))\n pass", "def _setup_graph(self):\n pass", "def _setup_graph(self):\n pass", "def build_graph(self):\n assert self.n_features is not None, 'Number of features is unknown. It can be set explicitly by .core.set_num_features'\n self.graph = tf.Graph()\n self.graph.seed = self.seed\n with self.graph.as_default():\n with tf.name_scope('learnable_params') as scope:\n self.init_learnable_params()\n with tf.name_scope('input_block') as scope:\n self.init_placeholders()\n with tf.name_scope(\"cosine_similarity\"):\n self.init_similarity_computation()\n with tf.name_scope('main_block') as scope:\n self.init_main_block()\n with tf.name_scope('optimization_criterion') as scope:\n self.init_regularization()\n self.init_loss()\n self.init_target()\n self.trainer = self.optimizer.minimize(self.target)\n self.init_all_vars = tf.global_variables_initializer()\n self.summary_op = tf.summary.merge_all()\n self.saver = tf.train.Saver()", "def create_cost_unitary(graph, gamma):\n\n cost_unitary = QuantumCircuit(len(graph.nodes), name=\"Cost Unitary\")\n weights = nx.get_edge_attributes(graph, 'weight').values() # Get weights from graph\n\n # Add corresponding gates for each edge\n for edge, weight in zip(graph.edges, weights):\n cost_unitary.cx(int(edge[0]), int(edge[1]))\n cost_unitary.rz(2*gamma*weight, int(edge[1]))\n cost_unitary.cx(int(edge[0]), int(edge[1]))\n cost_unitary.barrier() # Visually the unitary for each edge\n #cost_unitary.to_gate()\n return cost_unitary", "def graph_course(self):\n group = self.__data[\"filted_general_groupby\"]\n graph = {}\n if self.analysis[\"courses\"] is None:\n self.courses_list()\n\n # inicializa o dicionario que vai guardar o grafico\n for course in self.analysis[\"courses\"].index:\n graph[course] = []\n\n for i in range(18):\n min_v = i * 5\n max_v = min_v + 4.99\n self.__calc_graph_mean(group, min_v, max_v, graph)\n\n min_v = 95\n max_v = 100\n self.__calc_graph_mean(group, min_v, max_v, graph)\n\n self.analysis[\"graph_course\"] = graph", "def build_graph(self):\n start_time = time.time()\n\n # init temp node\n for value in self.domain:\n node = TempDepNode(value)\n self.nodes[value] = node\n\n attr_data = self.graph_data[self.attr_name]\n print(f'{len(attr_data)} records in data')\n\n # init temp edge\n for source_ix, value_i in tqdm(attr_data.items()):\n visited = set()\n for target_ix, value_j in attr_data[source_ix+1:].items():\n if value_j in visited:\n continue\n else:\n visited.add(value_j)\n time_diff = self.graph_data[self.time][target_ix] - \\\n self.graph_data[self.time][source_ix]\n if time_diff > self.time_diff_threshold:\n break\n if (value_i, value_j) not in self.edges or (value_j, value_i) not in self.edges:\n self.edges[(value_i, value_j)] = TempDepEdge(value_i, value_j)\n self.edges[(value_j, value_i)] = TempDepEdge(value_j, value_i)\n self.edges[(value_i, value_j)].add_event(time_diff)\n if value_i != value_j:\n self.edges[(value_j, value_i)].add_event(time_diff)\n end_time = time.time()\n print(f'{end_time-start_time} seconds for graph building')", "def calc_assn_weights():\n\t\n\t\t\t#\n\t\t\t#\n\ttext(\"\"\"INSERT INTO assignments (mentor_id, course_id, cost)\n\t\t\tSELECT M.mentor_id, C.course_id, SUM(COALESCE(PW.weight_value,PT.def_weight_val))\n\t\t\tFROM mentors M, courses C\n\t\t\tJOIN course2pref C2P ON C2P.course_id = C.course_id\n\t\t\tJOIN prefs P ON P.pref_id = C2P.pref_id\n\t\t\tJOIN pref_types PT ON PT.pref_type_id = P.pref_type_id\n\t\t\tJOIN pref_weights PW ON PW.pref_type_id = P.pref_type_id\n\t\t\tLEFT JOIN choices Ch ON Ch.mentor_id = M.mentor_id AND Ch.weight_id = PW.pref_id", "def make_graph(self, list_of_paths, view_progress=False):\n\n for i, path in enumerate(list_of_paths):\n\n # No constraints required for first path\n if i == 0:\n self.add_all_edges(path)\n\n else:\n constraint = {}\n\n for (source, dest) in zip(path, path[1:]): \n\n # Add edge to graph if it is new\n if not self.has_edge(source, dest):\n self.add_edge(source, dest)\n\n # Check if source node is a \"branching\" node\n # i.e. has more than one destination node \n if len(self.G[source]) >= 2: \n\n # If path constraints exist, add to edge\n if len(constraint) >= 1:\n\n self.update_edge_constraint(source, dest, constraint)\n self.update_old_branches(source, constraint)\n\n # Add this edge to path constraints\n constraint.update({source: {dest}})\n\n # Once path fully added, update backup copy of Graph\n self.G_backup = copy.deepcopy(self.G)\n\n # Show progress after each new path is added\n if view_progress:\n print('\\nCurrent graph is: ')\n pprint(self.G)\n input(\"Press Enter to continue.\\n\")", "def finalize_graph(self) -> None:\n digraph = nx.MultiDiGraph()\n\n for node in self.graph.iternodes():\n attributes = self.get_attributes(node)\n attributes[\"schema\"] = node.type.name\n if node.caption is not None:\n attributes[\"label\"] = node.caption\n if node.is_entity and node.schema is not None:\n attributes[\"schema\"] = node.schema.name\n digraph.add_node(node.id, **attributes)\n\n for edge in self.graph.iteredges():\n attributes = self.get_attributes(edge)\n attributes[\"schema\"] = edge.type_name\n attributes[\"weight\"] = str(edge.weight)\n digraph.add_edge(edge.source_id, edge.target_id, key=edge.id, **attributes)\n\n for line in generate_gexf(digraph, prettyprint=True):\n self.fh.write(line)\n self.fh.write(\"\\n\")", "def build_triangle_graph_with_costs(directed=False):\n if directed:\n graph = DirectedGraph()\n else:\n graph = UndirectedGraph()\n\n graph.new_node()\n graph.new_node()\n graph.new_node()\n\n graph.new_edge(1, 2, 1)\n graph.new_edge(2, 3, 2)\n graph.new_edge(3, 1, 10)\n\n return graph", "def create_basic_adjacency_map_3():\n sample_adj_map = {\n \"A\": [\"B\", \"C\"],\n \"C\": [\"D\", \"E\"],\n \"D\": [\"X\", \"Y\"],\n \"E\": [\"X\"],\n \"X\": [\"Z\"],\n \"Y\": [\"Z\"]\n }\n graph = generate_graph(sample_adj_map, node_start_name=\"A\")\n return graph", "def __init__(self, n=1):\n vertices = [Vertex(i) for i in range(n)]\n for vertex in vertices:\n self.add_vertex(vertex)\n self.populate_graph()", "def main():\n\n #Courses\n years = [2016, 2017, 2018, 2019, 2020]\n with sqlite3.connect(\"determined.db\") as conn:\n c = conn.cursor()\n c.execute(\n \"\"\"\n CREATE TABLE \"course\" (\n \"course_number\"\tint NOT NULL,\n \"dept_id\"\tvarchar(4) NOT NULL,\n \"title\"\tvarchar(100) NOT NULL,\n \"instructor_fname\"\tvarchar(35) DEFAULT NULL,\n \"instructor_lname\"\tvarchar(35) DEFAULT NULL,\n \"student_work_products\"\tjson DEFAULT NULL,\n `term` varchar(7) NOT NULL,\n `year` int NOT NULL,\n PRIMARY KEY(\"course_number\", \"term\", \"year\")) \n \"\"\"\n )\n conn.commit()\n courses = [\n (1370, \"CPSC\", \"Computer Literacy\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\"),\n (1375, \"CPSC\", \"Programming I\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\"),\n (2376, \"CPSC\", \"Intro to Game Programming\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\"),\n (2380, \"CPSC\", \"Algorithms\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\"),\n (2482, \"CPSC\", \"Computer Organization\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\"),\n (3377, \"CPSC\", \"Advanced Game Programming\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\"),\n (3380, \"CPSC\", \"Operating Systems\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\"),\n (3383, \"CPSC\", \"Programming Languages\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\"),\n (3384, \"CPSC\", \"Computer Networks\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Summer\"),\n (4360, \"CPSC\", \"Computer Security\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Summer\")\n ]\n #Adding years\n upload_courses = []\n for year in years:\n upload_courses += [x + (year,) for x in courses]\n #Making a few instructors teach multiple course\n new_courses = [\n (4557, \"CPSC\", \"Natural Language Processing\", ),\n (2375, \"CPSC\", \"Programming II\",),\n (2776, \"CPSC\", \"Data Structures and Algorithms\",),\n (4862, \"CPSC\", \"Image Recognition\", ),\n ]\n for i in range(0,len(new_courses)):\n year = choice(years)\n for y in range(0,2): #Number of times new course is taught\n c = upload_courses[i]\n new_data = (c[3], c[4], c[5], choice([\"Fall\", \"Spring\", \"Summer\"]), year+y)\n data = new_courses[i] + new_data\n upload_courses.append(data)\n #Adding solo instructors and solo courses\n upload_courses += [\n (4672, \"CPSC\", \"Programming Memes\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\", choice(years)),\n (1872, \"CPSC\", \"Information Systems\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Summer\", choice(years)),\n (1123, \"CPSC\", \"Microsoft Office\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\", choice(years))\n ]\n\n with sqlite3.connect(\"determined.db\") as conn:\n c = conn.cursor()\n c.executemany('''INSERT INTO course (course_number, dept_id, title, instructor_fname, instructor_lname, student_work_products, term, year)\n VALUES ( ?, ?, ?, ?, ?, ?, ?, ?)''', upload_courses)\n conn.commit()\n\n #SWP\n with sqlite3.connect(\"determined.db\") as conn:\n c = conn.cursor()\n c.execute(\n \"\"\"\n CREATE TABLE `student_work_product` (\n `id` INTEGER PRIMARY KEY,\n `product` varchar(250) NOT NULL,\n `course_id` int NOT NULL,\n `dept_id` int NOT NULL,\n `student_fname` varchar(35) NOT NULL,\n `student_lname` varchar(35) NOT NULL,\n `student_outcome` int DEFAULT NULL,\n `score` int DEFAULT NULL,\n `term` varchar(7) NOT NULL,\n `year` int NOT NULL,\n CONSTRAINT `course` FOREIGN KEY (`course_id`) REFERENCES `course` (`course_number`)\n CONSTRAINT `course` FOREIGN KEY (`dept_id`) REFERENCES `course` (`dept_id`)\n )\n \"\"\"\n )\n conn.commit()\n \n swps = []\n with sqlite3.connect(\"determined.db\") as conn:\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute (\"Select * from course\")\n records = [dict(x) for x in c.fetchall()]\n #Generating 20 student records for each swp in each course\n for i, course in enumerate(records):\n student_names = []\n for _ in range(20):\n student_names.append({'fname': names.get_first_name(),\n 'lname': names.get_last_name()})\n for product in json.loads(course['student_work_products'])['swp']:\n for student in student_names:\n if i%7 == 0:\n score = int(triangular(50, 85))\n else:\n score = int(triangular(50, 100))\n if score >= 90: outcome = 4\n elif score >= 80: outcome = 3\n elif score >= 70: outcome = 2\n elif score >= 60: outcome = 1\n else: outcome = 0 \n swps.append((\n product,\n course['course_number'],\n \"CPSC\",\n student['fname'],\n student['lname'],\n outcome,\n score, \n course['term'], \n course['year']\n ))\n \n c.executemany('''INSERT INTO student_work_product (product, course_id, dept_id, student_fname, student_lname, student_outcome, score, term, year)\n VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?)''', swps)\n conn.commit()", "def add_course(graph, course, parent, color):\n if course[-1] == '*':\n # Concurrent course\n color = 'green'\n course = course[:-1]\n child = pydot.Node(course)\n graph.add_node(child)\n edge = pydot.Edge(parent, course, color=color)\n graph.add_edge(edge)", "def __init__(self):\n self.tree = nx.Graph() \n self.orig_dist_matrix = pd.DataFrame()\n self.work_dist_matrix = pd.DataFrame() \n self.cluster_map = {} \n self.class_map = {}", "def rebuild_graph_data(self, consolidator=None):\n\n if len(self.nodes) == 0:\n raise DomainException(\"No nodes supplied to graph!\")\n\n if consolidator != None:\n for node in self.nodes.values():\n na = set()\n nat = set()\n no = set()\n nr = set()\n ni = set()\n nir = set()\n nrc = Counter()\n for atype, attribute in node.attributes:\n try:\n atype = consolidator(atype)\n except ConsolidatorException:\n continue\n na.add((atype, attribute))\n nat.add(atype)\n\n for rtype, dest in node.outgoing_relations:\n try:\n rtype = consolidator(rtype)\n except ConsolidatorException:\n continue\n no.add((rtype, dest))\n nr.add(rtype)\n nrc[rtype] += 1\n \n for rtype, pred in node.incoming_relations:\n try:\n rtype = consolidator(rtype)\n except ConsolidatorException:\n continue\n ni.add((rtype, pred))\n nir.add(rtype)\n nrc[rtype] += 1\n\n #update values\n node.attributes = na\n node.outgoing_relations = no\n node.incoming_relations = ni\n node.rtypes = nr\n node.i_rtypes = nir\n node.atypes = nat\n node.rtype_count = nrc\n\n # ==== compute member variables ====\n self.usage_map = self.map_uses()\n self.usage_counts = {x:len(y) for x,y in self.usage_map.items()}\n self.rtype_vectors = self.index_rtypes()\n self.node_vectors = self.index_nodes()\n self.rkdtree_keys, _rvalues = zip(*self.rtype_vectors.items())\n self.rkdtree = cKDTree(_rvalues)\n self.nkdtree_keys, _nvalues = zip(*self.node_vectors.items())\n self.nkdtree = cKDTree(_nvalues)\n\n # ==== precompute some vector constructs ====\n for node in self.nodes.values():\n node.compute_dicts(self)\n\n # ==== compute tf-idf weights for all nodes ====\n\n #calculate number of nodes containing rtype and \n #find maximum frequency rtype for any single node\n maxftd = 0\n c2 = Counter()\n for y in self.nodes.values():\n for k,z in y.rtype_count.items():\n c2[k] += 1\n if z > maxftd:\n maxftd = z\n\n #calculate augmented term frequency\n tf = Counter()\n for x,y in self.nodes.items():\n for z,v in y.rtype_count.items():\n tf[(x,z)] = 0.5 + 0.5*(v/maxftd)\n\n #calculate inverse document frequency\n idf = Counter()\n N = len(self.nodes)\n for x in c2:\n idf[x] = log(N / c2[x])\n\n tfidf = {}\n for x,y in self.nodes.items():\n for z in y.rtype_count:\n tmp = tfidf.setdefault(x,{})\n tmp[z] = tf[(x,z)] * idf[z]\n\n self.tfidf = tfidf\n self.dirty = False" ]
[ "0.610946", "0.6068693", "0.5970327", "0.59139115", "0.5910554", "0.59065074", "0.5842171", "0.58402723", "0.5831642", "0.5821705", "0.58180076", "0.5813589", "0.5796841", "0.57905227", "0.5715409", "0.5708784", "0.56940794", "0.5655929", "0.5635845", "0.5629975", "0.56149757", "0.56129605", "0.56017387", "0.5577498", "0.5569718", "0.55530834", "0.5528388", "0.55136675", "0.55126846", "0.5476829", "0.5466921", "0.54540855", "0.54415584", "0.5439747", "0.5427362", "0.54155636", "0.5415547", "0.5387961", "0.5387961", "0.5382741", "0.53613484", "0.5361319", "0.5342517", "0.53360564", "0.5328969", "0.53162646", "0.53122735", "0.5310568", "0.53105396", "0.53101444", "0.5307735", "0.52963364", "0.5295824", "0.528474", "0.5276116", "0.5274247", "0.52721673", "0.52719235", "0.52716106", "0.52664405", "0.5266254", "0.52609843", "0.52586496", "0.5255319", "0.52550554", "0.5254774", "0.52513593", "0.5241077", "0.52409804", "0.5234664", "0.52314997", "0.52252614", "0.5221183", "0.52191514", "0.52132744", "0.5202034", "0.5191866", "0.5187881", "0.51840764", "0.51773405", "0.51760864", "0.517214", "0.51716083", "0.51713276", "0.51708794", "0.51708794", "0.51692545", "0.5166241", "0.516507", "0.516164", "0.5161423", "0.51605487", "0.5158073", "0.515681", "0.5155019", "0.51527536", "0.5148819", "0.5130326", "0.51274127", "0.5126797" ]
0.7083467
0
Process a full set of images, with parallelization if multiple CPU threads are available on this machine
def _process_images( raw_image_paths: pd.Series, raw_images_dir: str, ROI_definitions: Dict[str, Tuple], flat_field_filepath_or_none: Union[str, None], save_ROIs: bool, save_dark_frame_corrected_images: bool, save_flat_field_corrected_images: bool, ) -> Tuple[pd.DataFrame, pd.DataFrame]: def _process_image_local(raw_image_path): """ Version of process_image with all of the local configuration variables packed in. Also encapsulates the opening of the image. """ return process_image( original_rgb_image=raw.open.as_rgb(raw_image_path), original_image_filepath=raw_image_path, raw_images_dir=raw_images_dir, ROI_definitions=ROI_definitions, flat_field_filepath_or_none=flat_field_filepath_or_none, save_ROIs=save_ROIs, save_dark_frame_corrected_image=save_dark_frame_corrected_images, save_flat_field_corrected_image=save_flat_field_corrected_images, ) with ThreadPoolExecutor() as executor: # We want identical warnings to be shown only for the first image they occur on (the default), # but we also want subsequent calls to process_experiment to start with a fresh warning store # so that warnings don't stop showing after the first run. # catch_warnings gives us this fresh warning store. with warnings.catch_warnings(): # process_image returns roi_summary_data df, image_diagnostics df -> this will be a list of 2-tuples roi_summary_data_and_image_diagnostics_dfs_for_files = list( tqdm( executor.map(_process_image_local, raw_image_paths), total=len(raw_image_paths), ) ) roi_summary_data_for_files, image_diagnostics_for_files = zip( *roi_summary_data_and_image_diagnostics_dfs_for_files ) roi_summary_data_for_all_files = _stack_dataframes(roi_summary_data_for_files) image_diagnostics_for_all_files = _stack_serieses(image_diagnostics_for_files) return roi_summary_data_for_all_files, image_diagnostics_for_all_files
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scan_images_multiprocessed(images, clf, processes, vstep=15, hstep=15, dnum=5):\n pool = Pool(processes=processes) # start 4 worker processes\n results = []\n for i in range(0, processes):\n begin = i * int(len(images) / processes)\n if i == processes - 1:\n end = len(images)\n else:\n end = (i + 1) * int(len(images) / processes)\n results.append(pool.apply_async(scan_images, (images[begin:end], clf, begin, vstep, hstep, dnum)))\n detections = []\n for result in results:\n detections.append(result.get())\n return np.concatenate(detections)", "def process_images(pool, func, images, entries):\n start = time.perf_counter()\n images = pool.map(func, images)\n logger.info(\"Erased white background from %i images:\", len(images))\n util.pprint_log([x.name for x in entries], logger.info)\n logger.info(util.elapsed(start))\n logger.info(\"\\n\")\n return images", "def _process_image_files(name, cnts, roots, num_shards): \n \n # Break all images into batches with a [ranges[i][0], ranges[i][1]].\n spacing = np.linspace(0, sum(cnts), FLAGS.num_threads + 1).astype(np.int)\n ranges = []\n for i in range(len(spacing) - 1):\n ranges.append([spacing[i], spacing[i + 1]])\n\n # Launch a thread for each batch.\n print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))\n sys.stdout.flush()\n\n # Create a mechanism for monitoring when all threads are finished.\n coord = tf.train.Coordinator()\n\n threads = []\n for thread_index in range(len(ranges)):\n args = (thread_index, ranges, name, cnts, roots, num_shards)\n t = threading.Thread(target=_process_image_files_batch, args=args)\n t.start()\n threads.append(t)\n\n # Wait for all the threads to terminate.\n coord.join(threads)\n print('%s: Finished writing all %d images in data set.' %\n (datetime.now(), sum(cnts)))\n sys.stdout.flush()", "def load_images_test():\n\n path = os.path.join('./test','*.jpg')\n files = glob.glob(path)\n\n x_test = []\n x_test_id = []\n x_test_shape = []\n pool = multiprocessing.Pool(processes=8)\n for fl in files:\n print(fl)\n flbase = os.path.basename(fl)\n img = cv2.imread(fl, cv2.IMREAD_COLOR)\n img = cv2.imread(fl, cv2.IMREAD_COLOR)\n result_list = pool.map(process_image, [fl])\n x_test.append(result_list[0])\n x_test_id.append(flbase)\n #cv2.imshow(\"dst\", dst2)\n #cv2.waitKey(0)\n #cv2.destroyAllWindows()\n pool.close()\n return x_test, x_test_id", "def load_images_train():\n\n global pool\n x_train = []\n x_train_id = []\n y_train = []\n x_shape = []\n start_time = time.time()\n\n print(\"Reading train images\")\n folders = ['ALB', 'BET', 'DOL', 'LAG', 'NoF', 'OTHER', 'SHARK', 'YFT']\n #folders = ['new']\n for fld in folders:\n index = folders.index(fld)\n print('Loading folder {} (Index: {})'.format(fld, index))\n path = os.path.join('./train1', fld, '*.jpg')\n files = glob.glob(path)\n pool = multiprocessing.Pool(processes=8)\n for fl in files:\n flbase = os.path.basename(fl)\n img = cv2.imread(fl,cv2.IMREAD_COLOR)\n result_list = pool.map(process_image, [fl])\n x_train.append(result_list[0])\n x_train_id.append(flbase)\n y_train.append(index)\n #x_shape.append(shape)\n\n print('Read train data time: {} seconds'.format(round(time.time() - start_time, 2)))\n pool.close()\n return x_train, y_train, x_train_id", "def process_images(self, images, mode='train'):\n if self.dataset_name == 'dataset1':\n return images[:5000]\n elif self.dataset_name == 'dataset2':\n return np.add(images, 745)\n elif self.dataset_name == 'dataset3':\n # concatenate three images into three-digit image\n if mode == 'train':\n return np.concatenate((images[:40000], images[10000:50000],\n images[20000:60000]), axis=1)\n elif mode == 'test':\n return np.concatenate((images[:8000], images[1000:9000],\n images[2000:10000]), axis=1)\n elif self.dataset_name == 'dataset4':\n # merge two images into one\n if mode == 'train':\n return images[:50000] + images[-50000:]\n elif mode == 'test':\n return images[:9000] + images[-9000:]\n else:\n return images", "def classify_all_images(cc):\n print 'Classify images'\n images = cc.d.images\n for img_idx in range(comm_rank, len(images), comm_size): # PARALLEL\n print 'classify image %d/%d at %d'%(img_idx/comm_size, len(images)/comm_size, comm_rank)\n img = images[img_idx]\n scores = classify_image(cc, img_idx)\n savefile = config.get_classifier_score_name(img, cc.L)\n cPickle.dump(scores, open(savefile,'w'))", "async def extractimages(self, ctx):\n if self.extract_images_running:\n await ctx.send(inline('Extract images already running'))\n return\n\n event_loop = asyncio.get_event_loop()\n running_load = event_loop.run_in_executor(self.executor, self.do_extract_images)\n\n self.extract_images_running = True\n await ctx.send(inline('Running image extract pipeline: this could take a while'))\n await running_load\n self.extract_images_running = False\n await ctx.send(inline('Image extract finished'))", "def process_bounded_image_files(name, filenames, labels, num_shards, num_threads, output_dir):\n assert len(filenames) == len(labels)\n\n # Break all images into batches with a [ranges[i][0], ranges[i][1]].\n spacing = np.linspace(0, len(filenames), num_threads + 1).astype(np.int)\n ranges = []\n for i in range(len(spacing) - 1):\n ranges.append([spacing[i], spacing[i + 1]])\n\n # Launch a thread for each batch.\n print('Launching %d threads for spacings: %s' % (num_threads, ranges))\n sys.stdout.flush()\n\n # Create a mechanism for monitoring when all threads are finished.\n coord = tf.train.Coordinator()\n\n # Create a generic TensorFlow-based utility for converting all image codings.\n coder = BoundingImageCoder()\n\n threads = []\n for thread_index in range(len(ranges)):\n args = (coder, thread_index, ranges, name, filenames,\n labels, num_shards, output_dir)\n t = threading.Thread(target=_process_image_files_batch, args=args)\n t.start()\n threads.append(t)\n\n # Wait for all the threads to terminate.\n coord.join(threads)\n print('%s: Finished writing all %d images in data set.' %\n (datetime.now(), len(filenames)))\n sys.stdout.flush()", "def process():\n config = read_config()\n \n\n img_dir = config['DEFAULT']['images_directory']\n results_dict = {}\n images = list(get_image_files(img_dir))\n for image in tqdm.tqdm(images):\n info = hash_file(image)\n if info == 0:\n continue\n\n hash_value = info['hash']\n\n if hash_value not in results_dict:\n file_name = os.path.basename(info['_id'])\n results_dict[hash_value] = [file_name, 1]\n else:\n results_dict[hash_value][1] += 1\n\n count = list(results_dict.values())\n sorted_count = sorted(count, key=lambda x: x[1], reverse=True)\n \n with ImagesDB(IMG_INFO_DB_FILENAME) as imgDb: \n imgDb.insert_batch(sorted_count)", "def process(image):\n pass", "def run(self):\n self.run_tasks()\n self.images = np.array(self.images)\n self.shapes.extend(self.images.shape[-2:])\n\n self.images = np.reshape(self.images, self.shapes)", "def multi_run_wrapper(args):\n\treturn img_preprocessing(*args)", "def classify_pics(pic_q):\n while not pic_q.empty():\n try:\n print (\"Starting a batch of threads...\")\n threads = []\n for i in range(MAX_THREADS):\n if not pic_q.empty():\n picTuple = pic_q.get()\n t = Thread(target=classify_pic,\n args=(picTuple[0], picTuple[1], pic_q))\n threads.append(t)\n t.start()\n\n for t in threads:\n t.join()\n\n finally:\n # write to DATA_FILE even when process is interrupted\n with open(DATA_FILE, 'w') as data_file:\n print (\"Rewriting %s with %s entries\" %\n (DATA_FILE, len(classify_data)))\n json.dump(classify_data, data_file)", "def main(save_dir, img_dir, df, fname_col):\n\tpool = mp.Pool(mp.cpu_count())\n\tresult = pool.map(multi_run_wrapper,[(save_dir, img_dir, \n\t\t\t\t\t\tfname) for fname in df[fname_col].values[0:4]])", "def _compute_ij_images_numpy_parallel(\n src_x_image: np.ndarray,\n src_y_image: np.ndarray,\n src_i_min: int,\n src_j_min: int,\n dst_src_ij_images: np.ndarray,\n dst_x_offset: float,\n dst_y_offset: float,\n dst_x_scale: float,\n dst_y_scale: float,\n uv_delta: float\n):\n src_height = src_x_image.shape[-2]\n dst_src_ij_images[:, :, :] = np.nan\n for src_j0 in nb.prange(src_height - 1):\n _compute_ij_images_for_source_line(\n src_j0,\n src_x_image,\n src_y_image,\n src_i_min,\n src_j_min,\n dst_src_ij_images,\n dst_x_offset,\n dst_y_offset,\n dst_x_scale,\n dst_y_scale,\n uv_delta\n )", "def run_skim(self):\n # Split input into chunks for processin\n skim_files = glob.glob(self.args.input + \"*.root\")\n # Make processing pool\n pool = Pool(processes=self.args.ncore)\n # Map processing to _run function\n pool.imap(_run_skim, skim_files)\n # Close and join pool\n pool.close()\n pool.join()", "def read_images(imagedir, size, ncores=mp.cpu_count()):\n _f = functools.partial(_image_worker, size=size)\n with mp.Pool(ncores) as pool:\n ret = pool.map(_f, get_files(imagedir))\n return {k: v for k,v in ret if v is not None}", "def train_batch_create_mp(imagedirs, classes, indices, image_key, offset_percent, output_size, nprocesses):\r\n batch_size = len(indices)\r\n n_classes = len(classes)\r\n # now create the output class and pixel arrays\r\n output_array = np.zeros((batch_size, output_size[0], output_size[1], output_size[2]), dtype=np.float32)\r\n class_array = np.zeros((batch_size, n_classes), dtype=np.int8)\r\n batch_data = [image_key[i] for i in indices]\r\n whole_minibatch_size = batch_size // nprocesses\r\n num_whole_minibatches = batch_size // whole_minibatch_size\r\n input_list = []\r\n for i in range(num_whole_minibatches):\r\n input_list.append(batch_data[whole_minibatch_size*i:whole_minibatch_size*(1+i)])\r\n if batch_size % nprocesses != 0:\r\n input_list.append(batch_data[whole_minibatch_size*num_whole_minibatches:])\r\n frozen_params = (imagedirs, classes, offset_percent, output_size)\r\n partial_worker = partial(batch_worker, frozen_params=frozen_params)\r\n # initializes the pool of processes\r\n print('building pool')\r\n pool = multiprocessing.Pool(nprocesses)\r\n # maps partial_worker and list of stars to the pool, stores used parameters in a list\r\n print('mapping pool')\r\n outputs = pool.map(partial_worker, input_list)\r\n # end the list of functions to go to pool\r\n pool.close()\r\n print('pool closed')\r\n # wait for all processes to return\r\n pool.join()\r\n print('pool joined')\r\n counter = 0\r\n for i in range(len(outputs)):\r\n current_output = outputs[i]\r\n pixel_data = current_output[0]\r\n class_data = current_output[1]\r\n num_fish = len(pixel_data)\r\n for lf in range(num_fish):\r\n output_array[counter, :, :, :] = np.reshape(pixel_data[lf], output_size)\r\n class_array[counter, :] = class_data[lf]\r\n counter += 1\r\n return output_array, class_array", "def _iter_images(self):\n raise NotImplementedError", "def process_images(image, label):\n # Normalize images to have a mean of 0 and standard deviation of 1\n # per_image_standardization is preferred, which normalize the entire image to mean zero and std 1.\n # It also make learning fast.\n image = tf.image.per_image_standardization(image)\n # Resize images from 32x32 to 277x277\n image = tf.image.resize(image, (227,227))\n return image, label", "def process(self, image):", "def run(self, images):\n\n # Apply filtering\n if len(self.preprocessing) > 0: \n print('Applying', len(self.preprocessing), 'filter(s) to input images')\n for filter in self.preprocessing:\n for i in range(len(images)):\n images[i] = filter(images[i])\n\n # Apply feature extraction\n if len(self.features) > 0:\n print('Extracting', len(self.features), 'feature(s) from input images')\n scaler = MinMaxScaler(feature_range=(0, 1))\n for i in range(len(images)):\n features = []\n for feature in self.features:\n features.append(feature(images[i]))\n images[i] = np.hstack(features)\n images = scaler.fit_transform(images)\n else:\n # Flatten images (not necessary when using feature extraction)\n train_data = np.array(train_data).reshape((len(train_data), -1))\n\n # Run predictions\n print('Predicting presence of parasites in', len(images), 'images\\n')\n return self.classifier.predict(images)", "def download_images(pages):\n try:\n pool = Pool(conf.MAX_PROCESS)\n pool.map_async(get_image_from_page, pages)\n pool.close()\n pool.join()\n except:\n pool.close()\n pool.join()", "def do_image_operations(self):\n def inner(future):\n self.done_callback()\n\n self.context.thread_pool.queue(\n operation=self.img_operation_worker,\n callback=inner\n )", "def batch_worker(minibatch_info, frozen_params):\r\n imagedirs = frozen_params[0]\r\n classes = frozen_params[1]\r\n offset_percent = frozen_params[2]\r\n output_size = frozen_params[3]\r\n nclass = len(classes)\r\n nfish = len(minibatch_info)\r\n class_onehot = np.zeros((nfish, nclass), dtype=np.int8)\r\n imdata = np.zeros((nfish, int(np.prod(output_size))))\r\n for i in range(nfish):\r\n current_fishtuple = minibatch_info[i]\r\n fish_type = current_fishtuple[-1]\r\n fish_type = fish_type.strip(\"'\")\r\n fish_directory = imagedirs[fish_type]\r\n imdata[i, :] = read_single_image(current_fishtuple, fish_directory, offset_percent, output_size)\r\n if fish_type != 'NoF':\r\n fish_type = 'FISH'\r\n fish_class = int(classes[fish_type])\r\n class_onehot[i, fish_class] = 1\r\n return imdata, class_onehot", "def extract(directory):\n global usersDict\n images = []\n\n for (dirpath, dirnames, filenames) in walk(directory):\n if not filenames:\n continue\n for file in filenames:\n img = Image(dirpath, file)\n images.append(img)\n # This will utilized all cores, good for single machine / VM, it is not a distributed solution\n pool = Pool(4, initializer, ())\n\n pool.map(model_processing, images)\n\n print('FINISHHH----', usersDict)\n for user in usersDict:\n print('DICTTT----', user.images)\n user.save()", "def process_images(images, cam, params):\n print cam, params\n groups = groupby(images, \"EXPTIME\")\n for time, ims in groups.items():\n func = {\"sbc\": make_sbc_flat_name, \"sky\": make_sky_flat_name}[cam]\n out = func(time, params)\n out = os.path.join(FLATPATH, out)\n print time, len(ims), out\n make_flat_avg(ims, out)", "def run_map(self):\n # Split input into chunks for processing\n files = self.split_list()\n # Make processing pool\n pool = Pool(processes=self.args.ncore)\n # Map processing to _run function\n self.output = pool.map(_run, files)\n # Close and join pool\n pool.close()\n pool.join()", "def process_images(self):\n self.processed_content_image = tf.keras.applications.vgg19.preprocess_input(\n self.content_image)\n self.processed_style_image = tf.keras.applications.vgg19.preprocess_input(\n self.style_image)", "def scale_all_images(image_dir, ratio):\n pool = Pool(1)\n pool.starmap(scale_image, zip(\n image_dir, itertools.repeat(ratio)))\n pool.close()\n pool.join()", "def main():\n # CV_IMWRITE_PNG_COMPRESSION from 0 to 9. A higher value means a smaller size and longer\n # compression time. If read raw images during training, use 0 for faster IO speed.\n\n # create output folders if they don't already exist\n for dir in [save_folder, save_mask_folder,save_hist_plot_folder]:\n if dir != None:\n if not os.path.exists(dir):\n os.makedirs(dir)\n print('mkdir [{:s}] ...'.format(dir))\n\n else:\n # print('Folder [{:s}] already exists. Exit...'.format(save_folder))\n # sys.exit(1)\n pass # uncomment above two lines for ease of working, if necessary\n\n img_list = []\n for root, dirsfoo, file_list in sorted(os.walk(input_folder)): # +'/*SR.tif'\n for x in file_list: # assume only images in the input_folder\n if x.endswith(\"SR.tif\"):\n path = os.path.join(root, x) \n img_list.append(path)\n break\n # img_list = ['/data_dir/Scenes/20190619_191648_25_106f_3B_AnalyticMS_SR.tif'] # for testing\n def update(arg):\n pbar.update(arg)\n # img_list=img_list[238:270] # for testing\n pbar = ProgressBar(len(img_list))\n pool = Pool(n_thread) # (n_thread)\n for path in img_list:\n if input_mask_folder==None:\n path_mask=None\n else:\n path_mask=name_lookup(path) # lookup mask path\n pool.apply_async(worker,\n args=(path, save_folder, crop_sz, step, thres_sz, compression_level, path_mask, save_mask_folder),\n callback=update)\n pool.close()\n pool.join()\n print('All subprocesses done.')", "def multiprocess_filtered_images_to_tiles(display=False, save_summary=True, save_data=True, save_top_tiles=True,\n html=False, image_list=None):\n timer = Time()\n print(\"Generating tile summaries (multiprocess)\\n\")\n\n if save_summary and not os.path.exists(slide.TILE_SUMMARY_DIR):\n os.makedirs(slide.TILE_SUMMARY_DIR)\n\n # how many processes to use\n num_processes = min(multiprocessing.cpu_count(),5)#multiprocessing.cpu_count()\n pool = multiprocessing.Pool(num_processes)\n\n if image_list is not None:\n num_train_images = len(image_list)\n\n if num_processes > num_train_images:\n num_processes = num_train_images\n images_per_process = num_train_images / num_processes\n\n print(\"Number of processes: \" + str(num_processes))\n print(\"Number of training images: \" + str(num_train_images))\n\n tasks = []\n for num_process in range(1, num_processes + 1):\n start_index = (num_process - 1) * images_per_process + 1\n end_index = num_process * images_per_process\n start_index = int(start_index)\n end_index = int(end_index)\n if image_list is not None:\n sublist = image_list[start_index - 1:end_index]\n tasks.append((sublist, display, save_summary, save_data, save_top_tiles))\n print(\"Task #\" + str(num_process) + \": Process slides \" + str(sublist))\n else:\n tasks.append((start_index, end_index, display, save_summary, save_data, save_top_tiles))\n if start_index == end_index:\n print(\"Task #\" + str(num_process) + \": Process slide \" + str(start_index))\n else:\n print(\"Task #\" + str(num_process) + \": Process slides \" + str(start_index) + \" to \" + str(end_index))\n\n # start tasks\n results = []\n for t in tasks:\n if image_list is not None:\n results.append(pool.apply_async(image_list_to_tiles, t))\n else:\n results.append(pool.apply_async(image_range_to_tiles, t))\n\n slide_names = list()\n tile_summaries_dict = dict()\n for result in results:\n image_nums, tile_summaries = result.get()\n slide_names.extend(image_nums)\n tile_summaries_dict.update(tile_summaries)\n print(\"Done tiling slides: %s\" % image_nums)\n\n if html:\n generate_tiled_html_result(slide_names, tile_summaries_dict, save_data)\n\n print(\"Time to generate tile previews (multiprocess): %s\\n\" % str(timer.elapsed()))", "def image_processor(self, img_arr):\n assert img_arr.dtype == np.uint8, \\\n f\"image_processor requires uint8 array but not {img_arr.dtype}\"\n img_arr = self.transformation.run(img_arr)\n if self.is_train:\n img_arr = self.augmentation.run(img_arr)\n img_arr = self.post_transformation.run(img_arr)\n\n return img_arr", "def run_calculation():\n\n print(\"Creating %d-process pool\" % mp.cpu_count())\n\n pool = mp.Pool(mp.cpu_count())\n\n f = h5py.File('/testdata/mandelbrot.hdf5', 'w')\n\n print(\"Creating output dataset with shape %s x %s\" % (NX, NY))\n\n dset = f.create_dataset('mandelbrot', (NX, NY), 'i')\n dset.attrs['XSTART'] = XSTART\n dset.attrs['YSTART'] = YSTART\n dset.attrs['XEXTENT'] = XEXTENT\n dset.attrs['YEXTENT'] = YEXTENT\n\n result = pool.imap(compute_row, (x * xincr for x in range(NX)))\n\n for idx, arr in enumerate(result):\n if idx % 25 == 0: print(\"Recording row %s\" % idx)\n dset[idx] = arr\n\n print(\"Closing HDF5 file\")\n\n f.close()\n\n print(\"Shutting down process pool\")\n\n pool.close()\n pool.join()", "def process_image(self, **kwargs):\n try:\n img = self.current_image\n\n if self.is_vis:\n result = self._process_job_vis(img, **kwargs)\n elif self.is_nir:\n result = self._process_job_nir(img, **kwargs)\n elif self.is_fluo:\n result = self._process_job_fluo(img, **kwargs)\n else:\n raise NotImplementedError\n\n except Exception as e:\n print(\n 'Failed to process image: \"{}\", because \"{}\"'.format(\n self.file_path, repr(e)\n )\n )\n self.print_images()\n return False\n\n self.print_images()\n\n return result", "def load_images(pool, entries):\n start = time.perf_counter()\n images = pool.map(ski.io.imread, [x.path for x in entries])\n logger.info(\"Loaded %i images:\", len(images))\n util.pprint_log([x.name for x in entries], logger.info)\n logger.info(util.elapsed(start))\n logger.info(\"\\n\")\n return images", "def flow(self, batch_size=32):\n nb_batches = int(len(self.image_ids_in_subset) / batch_size) + 1\n while True:\n # Before each epoch we shuffle the images' ids\n random.shuffle(self.image_ids_in_subset)\n\n for i in range(nb_batches):\n # We first get all the image ids for the next batch\n current_bach = self.image_ids_in_subset[i*batch_size:(i+1)*batch_size]\n X_batch = []\n Y_batch = []\n\n for image_id in current_bach:\n # Load the image and resize it. We get a PIL Image object\n img = image.load_img(self.get_img_path(int(image_id)), grayscale=False, target_size=(cfg.IMAGE.IMG_SIZE, cfg.IMAGE.IMG_SIZE))\n # Cast the Image object to a numpy array and put the channel has the last dimension\n img_arr = image.img_to_array(img, data_format='channels_last')\n X_batch.append(img_arr)\n # Y_batch.append(self.id_to_label[image_id])\n Y_batch.append(self.get_labels(image_id))\n\n # resize X_batch in (batch_size, IMG_HEIGHT, IMG_WIDTH, 3)\n X_batch = np.reshape(X_batch, (-1, cfg.IMAGE.IMG_SIZE, cfg.IMAGE.IMG_SIZE, 3))\n # resize Y_batch in (None, nb_classes)\n Y_batch = np.reshape(Y_batch, (-1, self.nb_classes))\n\n # substract mean values from imagenet\n X_batch = preprocess_input(X_batch, data_format='channels_last')\n yield(X_batch, Y_batch)", "def _process_image_files_batch(coder, thread_index, ranges, name, filenames,\n labels, num_shards, output_dir):\n # Each thread produces N shards where N = int(num_shards / num_threads).\n # For instance, if num_shards = 128, and the num_threads = 2, then the first\n # thread would produce shards [0, 64).\n num_threads = len(ranges)\n assert not num_shards % num_threads\n num_shards_per_batch = int(num_shards / num_threads)\n\n shard_ranges = np.linspace(ranges[thread_index][0],\n ranges[thread_index][1],\n num_shards_per_batch + 1).astype(int)\n num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]\n\n counter = 0\n for s in range(num_shards_per_batch):\n # Generate a sharded version of the file name, e.g. 'train-00002-of-00010'\n shard = thread_index * num_shards_per_batch + s\n output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)\n output_file = os.path.join(output_dir, output_filename)\n writer = tf.python_io.TFRecordWriter(output_file)\n\n shard_counter = 0\n files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)\n for i in files_in_shard:\n filename = filenames[i]\n label = labels[i]\n\n try:\n image_buffer, height, width = _process_image(filename, coder)\n except Exception as e:\n print(e)\n print('SKIPPED: Unexpected error while decoding %s.' % filename)\n continue\n\n example = _convert_to_example(filename, image_buffer, label,\n height, width)\n writer.write(example.SerializeToString())\n shard_counter += 1\n counter += 1\n\n if not counter % 1000:\n print('%s [thread %d]: Processed %d of %d images in thread batch.' %\n (datetime.now(), thread_index, counter, num_files_in_thread))\n sys.stdout.flush()\n\n writer.close()\n print('%s [thread %d]: Wrote %d images to %s' %\n (datetime.now(), thread_index, shard_counter, output_file))\n sys.stdout.flush()\n shard_counter = 0\n print('%s [thread %d]: Wrote %d images to %d shards.' %\n (datetime.now(), thread_index, counter, num_files_in_thread))\n sys.stdout.flush()", "def run(images):\n sc = SparkContext()\n rdd = sc.parallelize(images, 16) \\\n .map(truncate).repartition(16)\n rdd = generate_Y_cb_cr_matrices(rdd)\n rdd = generate_sub_blocks(rdd)\n rdd = apply_transformations(rdd)\n rdd = combine_sub_blocks(rdd)\n\n ### BEGIN SOLUTION HERE ###\n # Add any other necessary functions you would like to perform on the rdd here\n # Feel free to write as many helper functions as necessary\n return rdd", "def process(self):\n if self.images == None:\n return None\n\n result = self.tf_session.run(\n self.output_tensor,\n feed_dict={self.input_tensor: np.array(self.images)})\n self.images = None\n return result", "def read_processed_images(mode, image_type):\n raw_data = read_image_data(mode, image_type)\n labels = read_label_data(mode, image_type)\n features = np.apply_along_axis(extract_features, 1, raw_data)\n return ProcessedImageData(features, labels, np.arange(len(features)))", "def process_image(self):\n pass", "def split_preprocess_jobs(preprocess_images_job, input_images, prefix):\n resized_images = []\n\n for i in range(len(input_images)):\n curr = i % len(preprocess_images_job)\n preprocess_images_job[curr].add_inputs(input_images[i])\n out_file = File(prefix + str(input_images[i]))\n preprocess_images_job[curr].add_outputs(out_file)\n resized_images.append(out_file)\n \n return resized_images", "def process_image(self):\n\n detect.main(self.nn_args)", "def process(\n self,\n image: np.array\n ) -> np.array:\n pass", "def read_images(fs, img_path_batch, mode=\"rb\"):\n result = []\n logging.info(\"Start to read images at {}\".format(socket.gethostname()))\n for (label, img_path) in img_path_batch:\n img = read_image(fs, img_path, mode)\n result.append((label, img))\n logging.info(\"Finish the reading of {} images on {}\".format(\n len(result), socket.gethostname()))\n return result", "def batch(img_path, gt_path,img_list, batch, total_size, label_list):\r\n\r\n image_list = [os.path.join(img_path, i) for i in img_list]\r\n gt_list = [os.path.join(gt_path,i) for i in img_list]\r\n\r\n \r\n for i in range(0, total_size, batch):\r\n yield image_load_resize(image_list[i:i+batch]), make_label_map(gt_list[i:i+batch], label_list)", "def process_image():\n global last_frame, is_streaming\n i=0\n\n imgproc = ImgProc()\n while(True):\n if last_frame is not None and is_streaming:\n time.sleep(0.1)\n\n print(\"Processing frame \", i)\n imgproc.detect_object(last_frame, i)\n print(\"Processing complete \", i)\n i+=1", "def process_images():\n create_dirs()\n for root, dirs, files in os.walk(IN):\n for name in files:\n if name[0] == '.':\n continue\n process_image(name)", "def run():\n\n today = datetime.now().strftime(\"%Y-%m-%d\")\n log_file = os.path.abspath(\"logs/{}.log\".format(today))\n logger = RsmasLogger(\"pipeline\", log_file)\n\n images = get_list_of_images()\n # LOG: list of images to process\n logger.log(loglevel.INFO, [img.key for img in images])\n\n for im in images:\n\n logger.log(loglevel.INFO, \"Processing image: {}\".format(im.key))\n\n file_path = \"{}/{}\".format(im.bucket_name, im.key)\n full_path = \"{}_full.jpg\"\n mod_path = \"{}_mod.jpg\"\n aws_path = \"{}/{}/{}/{}\"\n try:\n haz_id, haz_name, sat_name, sat_dir, img_type, img_date, center = summary.pull_summary_data(\n \"/vsis3/{}\".format(file_path))\n sat_id = Satellite.from_params(sat_name, bool(sat_dir))\n except:\n # LOG: error in image metadata format\n logger.log(loglevel.ERROR, '\\tThere was an error in the metadata format of the image. Skipping.')\n continue\n\n aws_path = aws_path.format(haz_id, sat_id, img_type, img_date)\n full_path = full_path.format(img_date)\n mod_path = mod_path.format(img_date)\n\n # 1. Read in image file\n with rasterio.open(\"s3://{}\".format(file_path)) as data:\n band = data.read(1)\n img = plot.show(band)\n img.get_figure().savefig(full_path, dpi=300)\n\n # 3. Compress image\n compressed = immanip.compress_image(full_path, compression_amount=0.3)\n\n # 4 - 5. Pad image and add date on image\n text_image = immanip.add_text_to_image(compressed, img_date)\n\n # 6. Save image locally\n text_image.save(mod_path.format(img_date))\n mod_path_aws = save.get_s3_url(\"{}/{}\".format(aws_path, mod_path))\n full_path_aws = save.get_s3_url(\"{}/{}\".format(aws_path, full_path))\n\n tif_path_aws = save.get_s3_url(\"{}/{}\".format(aws_path, im.key))\n\n # LOG: images successfully moved to S3 bucket\n # LOG: mod_path_aws, full_path_aws, tif_path_aws\n\n hazard = Hazard(haz_id, haz_name, HazardType.VOLCANO, Location(center[0], center[1]), Date(img_date), 0)\n satellite = Satellite.from_params(sat_name, bool(sat_dir))\n image = Image(str(randint(1, 10000000)),\n haz_id,\n satellite,\n ImageType.from_string(img_type),\n Date(img_date),\n ImageURL(full_path_aws),\n ImageURL(tif_path_aws),\n ImageURL(mod_path_aws))\n\n try:\n db = Database()\n except ConnectionError:\n logger.log(loglevel.ERROR, \"\\tThere was an error while connecting to the database. Skipping this image.\")\n continue\n\n db.create_new_hazard(hazard)\n db.create_new_satellite(satellite)\n db.create_new_image(image)\n\n db.close()\n\n # LOG: database successfully updated\n logger.log(loglevel.INFO, \"\\tDatabase succesfully updated.\")\n\n save.save_image_s3(mod_path, \"{}/{}\".format(aws_path, mod_path))\n save.save_image_s3(full_path, \"{}/{}\".format(aws_path, full_path))\n save.move_tif(im.key, \"{}/{}\".format(aws_path, im.key))\n\n logger.log(loglevel.INFO, \"\\tImages were successfully uploaded to the S3 bucket\")\n logger.log(loglevel.INFO, \"\\t\\tmod_path_aws: {}\".format(mod_path_aws))\n logger.log(loglevel.INFO, \"\\t\\tfull_path_aws: {}\".format(full_path_aws))\n logger.log(loglevel.INFO, \"\\t\\ttif_path_aws: {}\".format(tif_path_aws))\n\n # LOG: image completed\n logger.log(loglevel.INFO, \"\\tProcessing of {} completed.\".format(im.key))\n\n # LOG: finished processing images\n logger.log(loglevel.INFO, \"Processing complete.\")", "def _compute_ij_images_numpy_sequential(\n src_x_image: np.ndarray,\n src_y_image: np.ndarray,\n src_i_min: int,\n src_j_min: int,\n dst_src_ij_images: np.ndarray,\n dst_x_offset: float,\n dst_y_offset: float,\n dst_x_scale: float,\n dst_y_scale: float,\n uv_delta: float\n):\n src_height = src_x_image.shape[-2]\n dst_src_ij_images[:, :, :] = np.nan\n for src_j0 in range(src_height - 1):\n _compute_ij_images_for_source_line(\n src_j0,\n src_x_image,\n src_y_image,\n src_i_min,\n src_j_min,\n dst_src_ij_images,\n dst_x_offset,\n dst_y_offset,\n dst_x_scale,\n dst_y_scale,\n uv_delta\n )", "def compute(self,n_speed,s_speed,msize,thershold):\n X=self.image\n for layer in self.CNN:\n #layer dictonary {type,kernel,bias,hparams}\n #offloading decisions\n kernel=layer[\"kernel\"]\n hparam=layer[\"hparams\"]\n if layer[l_type]==\"conv\":\n off_dec=offload(n_speed,s_speed,msize,X,kernel,hparam)\n if(off_dec.checkOffload(thershold)):\n #get the result form the server\n conv_dict={ \"data\":X,\"l_type\":layer[l_type],\"hpara\":hparam,\"pos\":0}\n c=client(conv_dict,self.edge[\"ip\"],self.edge[\"port\"])\n c.send()\n X=c.receive_array()\n \n else:\n X=self.thread_Compute(X,layer)\n\n else:\n X=self.thread_Compute(X,layer)", "def ImagePipeline(self,cnn_pipe = False, batch_index = None):\n\t\t\n\t\tif self.verbose:\n\t\t\tprint \"...createFolderStructure\"\n\n\t\tself.createFolderStructure()\n\n\t\tif self.verbose:\n\t\t\tprint \"...downloadImages\"\n\n\t\tself.downloadImages()\n\n\t\tif self.verbose:\n\t\t\tprint \"...binarize_classes\"\n\n\t\tclasses, lb = self.binarize_classes()\n\n\t\tif self.verbose:\n\t\t\tprint \"...load_paths_and_labels\"\n\n\t\tim_paths, im_labels = self.load_paths_and_labels(classes)\n\n\t\tif self.verbose:\n\t\t\tprint \"...load_images\"\n\t\t\t\t\n\t\t# Uncomment this if you just want to use one cpu\t\t\n\t\t#imlist = self.load_images(im_paths,cnn_pipe)\n\t\t#self.load_images(im_paths,[],[])\n\t\t#imlist = self.imlist\n\n\t\timlist, self.im_index = self.load_images_parallel(im_paths)\n\t\t#print len(imlist)\n\n\t\t# Sort the list by index so we don't have to do as many iteration in finding similar\n\t\t#if not cnn_pipe:\n\t\tzipped = zip(self.im_index, imlist)\n\t\tzipped_sorted = sorted(zipped, key=lambda x: x[0])\n\t\tself.im_index , imlist = zip(*zipped_sorted)\n\n\t\taverage_image = None\n\t\tif cnn_pipe:\n\t\t\tif self.verbose:\n\t\t\t\tprint \"...calculate_average_image\"\n\t\t\taverage_image = self.calculate_average_image(imlist)\n\n\t\tif self.verbose:\n\t\t\tprint \"\\n...data_augmentation_and_vectorization\\n\"\n\t\t\n\t\t#print imlist\n\n\t\tX,Y = self.data_augmentation_and_vectorization(imlist,lb,im_labels,average_image)\n\n\t\toutput = open( self.data_path + 'im_index.pkl', 'wb')\n\t\tcPickle.dump(self.im_index, output,protocol=-1)\n\t\toutput.close()\n\n\t\tif self.verbose:\n\t\t\tprint \"...dimReductionSdA\"\n\t\t\n\t\tX = self.dimReductionSdA(X)\n\t\t# print X[0][0:3]\n\t\t# print X[1][0:3]\n\t\t#X = self.dimReduction(X)\n\n\t\toutput = open( self.data_path + 'X_compressed_'+str(batch_index)+'.pkl', 'wb')\n\t\tcPickle.dump(X, output,protocol=-1)\n\t\toutput.close()\n\n\t\toutput = open( self.data_path + 'im_index_' + str(batch_index) + '.pkl', 'wb')\n\t\tcPickle.dump(self.im_index, output,protocol=-1)\n\t\toutput.close()\n\n\t\tif cnn_pipe:\n\t\t\tif self.verbose:\n\t\t\t\tprint \"\\n...create_train_validate_test_sets\\n\"\n\t\t\ttrain_set,valid_set,test_set = self.create_train_validate_test_sets(X, Y)\n\t\t\treturn train_set,valid_set,test_set\n\t\telse:\n\n\t\t\tif self.verbose:\n\t\t\t\tprint \"\\n...similarImages\\n\"\n\n\t\t\tdf, duplicated_images = self.similarImages(X)\n\t\t\n\t\t\treturn df,duplicated_images", "def main(self, path_4a_cobertura, move_img_bool, process_with_thread):\n all_rapideye = open(FILE_ALL_RAPIDEYE, 'r')\n imgs = all_rapideye.readlines()\n\n if process_with_thread:\n\n PrepareThreads.perform(\n imgs, self.__prepare_process_many_imgs, path_4a_cobertura,\n move_img_bool\n )\n\n else:\n for img in imgs:\n self.__prepare_process_many_imgs(\n path_4a_cobertura, img, move_img_bool, 'foot_1'\n )", "def _process_image_paths(\n self, image_paths: Iterable[str], *, use_cache: bool = True\n ) -> Iterator[Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]]:\n assert self.produce_featurized_images, (\n \"For _process_image_paths() to work, we need either a feature cache, or an image loader, \"\n \"an image featurizer, and a region detector.\"\n )\n\n batch: List[Union[str, Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]]] = []\n unprocessed_paths: Set[str] = set()\n\n def yield_batch():\n # process the images\n paths = list(unprocessed_paths)\n images, sizes = self.image_loader(paths)\n with torch.no_grad():\n images = images.to(self.cuda_device)\n sizes = sizes.to(self.cuda_device)\n featurized_images = self.image_featurizer(images, sizes)\n detector_results = self.region_detector(images, sizes, featurized_images)\n features = detector_results.features\n coordinates = detector_results.boxes\n class_probs = detector_results.class_probs\n class_labels = detector_results.class_labels\n\n # store the processed results in memory, so we can complete the batch\n paths_to_tensors = {}\n for i, path in enumerate(paths):\n if class_probs:\n class_probs_tensor = class_probs[i]\n else:\n class_probs_tensor = None\n\n if class_labels:\n class_labels_tensor = class_labels[i]\n else:\n class_labels_tensor = None\n\n paths_to_tensors[path] = (\n features[i],\n coordinates[i],\n class_probs_tensor,\n class_labels_tensor,\n )\n\n # store the processed results in the cache\n if use_cache and self.write_to_cache:\n for path, (\n features,\n coordinates,\n class_probs,\n class_labels,\n ) in paths_to_tensors.items():\n basename = os.path.basename(path)\n self._feature_cache[basename] = features\n self._coordinates_cache[basename] = coordinates\n if class_probs is not None:\n self._class_probs_cache[basename] = class_probs\n if class_labels is not None:\n self._class_labels_cache[basename] = class_labels\n\n # yield the batch\n for b in batch:\n if isinstance(b, str):\n yield paths_to_tensors[b]\n else:\n yield b\n\n for image_path in image_paths:\n basename = os.path.basename(image_path)\n try:\n if use_cache:\n features: Tensor = self._feature_cache[basename]\n coordinates: Tensor = self._coordinates_cache[basename]\n class_probs: Optional[Tensor] = self._class_probs_cache.get(basename)\n class_labels: Optional[Tensor] = self._class_labels_cache.get(basename)\n if len(batch) <= 0:\n yield features, coordinates, class_probs, class_labels\n else:\n batch.append((features, coordinates, class_probs, class_labels))\n else:\n # If we're not using the cache, we pretend we had a cache miss here.\n raise KeyError\n except KeyError:\n if not (self.image_loader and self.region_detector and self.image_featurizer):\n if use_cache:\n raise KeyError(\n f\"Could not find {basename} in the feature cache, and \"\n \"image featurizers are not defined.\"\n )\n else:\n raise KeyError(\n \"Reading the feature cache is disabled, and image featurizers \"\n \"are not defined. I can't process anything.\"\n )\n batch.append(image_path)\n unprocessed_paths.add(image_path)\n if len(unprocessed_paths) >= self.image_processing_batch_size:\n yield from yield_batch()\n batch = []\n unprocessed_paths = set()\n\n if len(batch) > 0:\n yield from yield_batch()", "def processImage(fpaths_src, label_map, fnames_src, img_idx):\n global counter\n \n n_imgs = len(fpaths_src)\n print(\"Processing %s -- %s/%s (%s%%)\"%(fnames_src[img_idx],counter,n_imgs,round(100.*counter/n_imgs)))\n \n path = fpaths_src[img_idx]\n src_image_raw = Image.open(path, 'r')\n \n # size normalization of the image\n if not (args.resize == None):\n src_image_raw = src_image_raw.resize(size=(int(args.resize), int(args.resize)), resample=Image.BILINEAR)\n \n # convert to writable numpy array\n src_image = np.asarray(src_image_raw, dtype=np.uint8)\n src_image.setflags(write=True)\n \n # some dummy label\n label = -99.99\n # the labels\n if not (label_map == {}):\n # let the label start at 1, instead of 0\n label = int(label_map[fnames_src[img_idx]])+1\n else:\n # add a dummy label (between 0 and 1)\n label = np.random.rand()\n \n image_features = []\n \n # add the original\n image_features.append(generateFeatures(src_image,label,args.knn))\n \n if args.augment == 1:\n print \"Augmenting dataset...\"\n # data augmentation techniques\n rotation_angles = [i for i in xrange(36,360,36)] # samples are transformed by these rotation angles\n \n flip_x = True # data augmentation by flipping around x axis\n flip_y = True # data augmentation by flipping around y axis\n flip_xy= True # data augmentation by flipping around x AND y axis\n \n for angle in rotation_angles:\n rot_matrix = cv2.getRotationMatrix2D(\n (src_image.shape[1]/2.,src_image.shape[0]/2.),\n angle,\n 1.0)\n rot_sample_crop = np.array([])\n rot_sample_crop = cv2.warpAffine(src_image,\n rot_matrix,\n (src_image.shape[1],src_image.shape[0]),\n rot_sample_crop,\n cv2.INTER_LINEAR,\n cv2.BORDER_REFLECT_101)\n \n # add the sample to the dataset\n image_features.append(generateFeatures(rot_sample_crop,label,args.knn))\n \n # add 3 flipped copies\n if flip_x:\n rot_sample_crop_x = cv2.flip(rot_sample_crop,0)\n image_features.append(generateFeatures(rot_sample_crop_x,label,args.knn))\n if flip_y:\n rot_sample_crop_y = cv2.flip(rot_sample_crop,1)\n image_features.append(generateFeatures(rot_sample_crop_y,label,args.knn))\n if flip_xy:\n rot_sample_crop_xy = cv2.flip(rot_sample_crop,-1)\n image_features.append(generateFeatures(rot_sample_crop_xy,label,args.knn))\n \n counter+=1\n\n # return a nx128 or nxk matrix for the features of all modifications of this image\n feat_matrix = np.asarray(image_features)\n return feat_matrix", "def batch_process(minID, maxID, side='blue', **kwargs):\r\n\r\n if side == 'both':\r\n sides = ['blue','red']\r\n else:\r\n sides = [side]\r\n for side in sides:\r\n for i in range(minID, maxID+1, 1):\r\n filename = '%s%04d.fits' % (side, i)\r\n if os.path.exists(filename):\r\n try:\r\n extract1D(i, side=side, **kwargs)\r\n except iraf.IrafError:\r\n # some errors just require you to try again...\r\n print 'Hit error, retrying...'\r\n extract1D(i, side=side, **kwargs)", "def run_images_analysis(filepath, ID, method):\n for path in filepath:\n try:\n Image.open(path)\n except IOError:\n msg = 'Please import images files, or just a single zip archive'\n else:\n filename, extension = get_file_name(path)\n\n # Save raw image to database\n msg = client.upload_file(ID, filename, extension, path)\n\n err, msg = check_msg(msg)\n\n if err is False: # if no error in uploading image\n # Request to process image\n client.process_image(ID, filename, method)\n return msg", "def test_compare_serial_with_multiprocess(sidesweep_image_sequence):\n\n cc = Cwsim_container_from_ims(ims=sidesweep_image_sequence)\n\n serial_times = []\n for idx, im in enumerate(sidesweep_image_sequence):\n t1 = time()\n cc.query_image(im)\n t2 = time() - t1\n serial_times.append(t2)\n serial_mean = np.mean(serial_times)\n\n # prepare for multiprocess stuff\n cc.prepare_memory_bank_outside()\n test_im = sidesweep_image_sequence[1]\n cc.query_image_mp(test_im)\n multip_times = []\n for idx, im in enumerate(sidesweep_image_sequence):\n t1 = time()\n cc.query_image_mp(im)\n t2 = time() - t1\n multip_times.append(t2)\n multip_mean = np.mean(multip_times)\n print('Serial mean: {}, multip mean: {} - speedup = {}'.format(serial_mean, multip_mean,serial_mean / multip_mean))", "def main():\n nb_processed = 0\n for dataset_name in DATASETS:\n print(\"-----------------\")\n print(\"Dataset: '%s'\" % (dataset_name,))\n print(\"-----------------\")\n\n dataset_dir = os.path.join(WRITE_MAIN_DIR, dataset_name)\n if not os.path.isdir(dataset_dir):\n os.makedirs(dataset_dir)\n\n dataset = Dataset([os.path.join(READ_MAIN_DIR, dataset_name)])\n print(\"Found %d images total.\" % (len(dataset.fps),))\n\n errors = []\n\n scale_height, scale_width = SCALES[dataset_name]\n target_aspect_ratio = scale_width / scale_height\n\n # iterate over every image in the current dataset,\n # augment that image N times, add cols/rows until target aspect ratio\n # is reached, resize it (e.g. 64x64), save it\n for img_idx, (image_filepath, image) in enumerate(zip(dataset.fps, dataset.get_images())):\n print(\"[%s] Image %d of %d (%.2f%%)...\" \\\n % (dataset_name, img_idx+1, len(dataset.fps),\n 100*(img_idx+1)/len(dataset.fps)))\n\n # IOErrors during loading of images result here in a None value\n if image is None:\n print(\"Error / None\")\n errors.append((\n image_filepath,\n \"Failed to load image '%s' (idx %d for dataset %s)\" \\\n % (image_filepath, img_idx, dataset_name)\n ))\n else:\n # resize too big images to smaller ones before any augmentation\n # (for performance reasons)\n height = image.shape[0]\n width = image.shape[1]\n aspect_ratio = width / height\n if width > 1000 or height > 1000:\n image = misc.imresize(image, (1000, int(1000 * aspect_ratio)))\n\n # augment image\n # converts augmented versions automatically to float32, 0-1\n augmentations = augment(image, **AUGMENTATIONS[dataset_name])\n\n # create list of original image + augmented versions\n images_aug = [image / 255.0]\n images_aug.extend(augmentations)\n\n # for each augmented version of the images:\n # resize it to target aspect ratio (e.g. same width and height),\n # save it\n for aug_idx, image_aug in enumerate(images_aug):\n image_aug = to_aspect_ratio_add(image_aug, target_aspect_ratio)\n filename = \"{:0>6}_{:0>3}.jpg\".format(img_idx, aug_idx)\n img_scaled = misc.imresize(image_aug, (scale_height, scale_width))\n misc.imsave(os.path.join(dataset_dir, filename), img_scaled)\n\n nb_processed += 1\n\n print(\"Processed %d images with %d errors.\" % (nb_processed, len(errors)))\n for (fp, err) in errors:\n print(\"File %s error:\" % (fp,))\n print(err)\n print(\"Finished.\")", "def preprocess_images(images_dir, image_dims, logger):\n find_str = images_dir + '/**/*.jpg'\n images = glob.glob(find_str, recursive=True)\n num_samples = get_num_samples(images_dir)\n\n # Load in the already processed file list\n proc_list_path = images_dir + '/processed_list.txt'\n if os.path.isfile(proc_list_path):\n with open(proc_list_path) as f:\n proc_list = f.read().split('\\n')\n else:\n proc_list = []\n \n i = 1\n for image in images:\n image_name = image.split('/')[-1]\n if image not in proc_list:\n logger.info(\"Processing %s\", \" {} - {}/{}\".format(\n image_name, i, num_samples))\n try:\n processed_image = ImageCheck.check_and_crop(image)\n except (ImageCheck.ObjectMissingError,\n ImageCheck.WormMissingError,\n ImageCheck.MultipleWormsError,\n ImageCheck.TooBlurryError) as e:\n logger.info(\"Processing Error: %s\",\n \"Image at: \\n{} \\n Produced error: {} \\n Removing\"\n \" image\".format(image, e))\n os.remove(image)\n i = i + 1\n continue\n cv2.imwrite(image, processed_image)\n with open(proc_list_path, 'a') as f:\n f.write(image + '\\n')\n else:\n logger.info(\"Skipping %s\", \" {} (already processed) - {}/{}\".format(\n image_name, i, num_samples))\n i = i + 1", "def _build_image_processing(self, shift_ratio=0):\n with tf.device(self.cpu_device):\n subset = 'train'\n image_producer_ops = []\n image_producer_stages = []\n images_splits, labels_splits = self.image_preprocessor.minibatch(\n self.dataset,\n subset=subset,\n use_datasets=self.params.use_datasets,\n cache_data=self.params.cache_data,\n shift_ratio=shift_ratio)\n images_shape = images_splits[0].get_shape()\n labels_shape = labels_splits[0].get_shape()\n for device_num in range(len(self.devices)):\n image_producer_stages.append(\n data_flow_ops.StagingArea(\n [images_splits[0].dtype, labels_splits[0].dtype],\n shapes=[images_shape, labels_shape]))\n return (image_producer_ops, image_producer_stages)", "def bulk_process_images(inputpath, outputpath, extension):\n\n for dirpath, dirnames, filenames in os.walk(inputpath):\n structure = os.path.join(outputpath, dirpath[len(inputpath) + 1:])\n for file in filenames:\n if file.endswith(extension):\n src = os.path.join(dirpath, file)\n dest = os.path.join(structure, file)\n img = load_and_preprocess_image(src)\n cv2.imwrite(dest, img)", "def haiku_multiprocessing(paths, num_processes=2):\n with Pool(num_processes) as pool:\n results = pool.map(single_process, paths)\n return results", "def calculatePixelMetricsMP(input_img, input_df, num_workers=8):\n\n manager = Manager()\n new_cir = manager.list()\n q = Queue()\n for index, row in input_df.iterrows():\n plot = row['plot']\n x = row['x']\n y = row['y']\n r = row['r']\n weight = row['weight']\n info = [plot, x, y, r, weight]\n q.put(info)\n workers = Pool(num_workers, calculatePixelMetricsQueue,(q, input_img, input_df, new_cir))\n workers.close()\n workers.join()\n \n header = ['plot', 'x', 'y', 'r', 'weight', 'core', 'inner', 'outer']\n print(len(new_cir))\n output_df = pd.DataFrame(list(new_cir), columns=header)\n return output_df", "def applyToBatch(batch, operation, parallel_safe):\n logging.info(\"Applying operation to batch...\"); t0 = time()\n if operation is None:\n logging.info(\"Nothing to do\")\n return batch\n \n if parallel_safe:\n ret = ProcessPool().map(operation, batch)\n else:\n ret = list(map(operation, batch))\n \n t = time()-t0\n logging.info(f\"Finished applying to batch in {t}s, average time per image was {t/len(batch)}s\")\n return ret", "def process_images(self):\n source_images = self.get_build_images()\n self.log.info(\"Got %s images for publishing. Processing..\", len(source_images))\n\n for image in source_images:\n self.fetch_image(image)\n\n for target in image.push_registries:\n for tag in image.release_tags:\n repository = \"%s/%s\" % (target, image.repository.name)\n self.tag_image(image, repository, tag)\n retry_count = 1\n while retry_count <= self.retry_limit:\n self.log.info(\"Pushing %s:%s to %s (%d/%d)\", repository, tag, target, retry_count, self.retry_limit)\n try:\n self.publish_image(target, repository, tag)\n break\n except ImagePushError as e:\n self.log.error(\"%s\", e.message)\n retry_count = retry_count + 1\n else:\n return False\n return True", "def parallel_run():\n from IPython.parallel import Client\n\n c = Client() # here is where the client establishes the connection\n lv = c.load_balanced_view() # this object represents the engines (workers)\n\n\n rays = []\n maxs=25\n bounding = AABA(xmin=0, ymin=0, zmin=0, xmax=maxs, ymax=maxs, zmax=maxs,)\n gridd = np.zeros((maxs,maxs,maxs))\n # spectrum for red to nir leaves\n red_nir_leaves = spectrum(np.array([0.5, 0.85]), np.array([0.1, 0.6]), np.array([0.5, 0.1]))\n # spectrum for soil\n red_nir_soil = spectrum(np.array([0.5, 0.85]), np.array([0.3, 0.4]), np.array([0.0, 0.0]))\n\n\n # scattering setup\n scatt = BRDSF(red_nir_leaves, 0.0)\n lf = leaf(55.0, 0.8) # leaf angle distribution and leaf area density\n\n\n tasks = []\n for x in xrange(maxs):\n for y in xrange(maxs):\n tasks.append(lv.apply(prun, x,y, maxs, gridd, scatt, red_nir_soil, bounding, lf))\n\n result = [task.get() for task in tasks] # blocks until all results are back\n\n return results", "def combine_images(args):\n\n # Read all images into a cube (TODO: think about the RAM)\n with fits.open(args.input[0]) as im0:\n lx, ly = im0[0].data.shape\n ref_hdr = im0[0].header\n\n headers = [fits.open(im_name)[0].header for im_name in args.input]\n cube = numpy.ma.zeros((len(args.input), lx, ly))\n cube.mask = numpy.zeros_like(cube.data)\n for ii, im_name in enumerate(args.input):\n with astroim.Astroim(im_name) as im:\n cube.data[ii, :,:] = im.chips[0].data\n if im.chips[0].mask is not None:\n cube.mask[ii,:,:] = im.chips[0].mask\n\n # Scale images\n scale_functions = {\"median\": numpy.ma.median,\n \"mean\": numpy.ma.mean,\n \"mode\": scipy.stats.mstats.mode,\n \"none\": lambda x: 1}\n for ii, im_name in enumerate(args.input):\n func = scale_functions[args.scale.lower()]\n cube[ii,:,:] /= func(cube[ii,:,:])\n\n\n # Reproject all images to the ref_hdr\n for ii, _ in enumerate(args.input):\n if ii == 0:\n continue\n cube.data[ii,:,:], footprint = reproject_interp((cube.data[ii,:,:], headers[ii]), ref_hdr)\n cube.mask[ii,:,:], footprint = reproject_interp((cube.mask[ii,:,:], headers[ii]), ref_hdr)\n #whr = numpy.isnan(cube.data[ii,:,:])\n #cube.mask[ii,:,:][whr] = True\n\n # Do average\n average_functions = {\"median\": numpy.ma.median, \"mean\": numpy.ma.mean, \"sum\": numpy.ma.sum}\n func = average_functions[args.average.lower()]\n final_image = func(cube, axis=0)\n ref_hdr[\"NCOMBINE\"] = len(args.input)\n\n mask_name = utilities.replace_extension(args.output, \".fits.msk\")\n mask_name_header = utilities.replace_extension(os.path.basename(args.output), \".fits.msk\")\n ref_hdr[\"MASK\"] = mask_name_header\n fits.writeto(args.output, final_image.data, ref_hdr, clobber=True )\n fits.writeto(mask_name, numpy.array(final_image.mask, dtype=int), clobber=True)\n\n return args.output", "def run(self, images):\n\n if not (isinstance(images, Images)):\n raise Exception('Input data must be Images or a subclass')\n\n if len(images.dims.count) not in set([2, 3]):\n raise Exception('Number of image dimensions %s must be 2 or 3' % (len(images.dims.count)))\n\n self.isPrepared(images)\n\n # broadcast the reference\n bcReg = images.rdd.context.broadcast(self)\n\n def fitandtransform(im, reg):\n t = reg.value.getTransform(im)\n return t.apply(im)\n\n newrdd = images.rdd.mapValues(lambda im: fitandtransform(im, bcReg))\n\n return Images(newrdd).__finalize__(images)", "def load_images(image_types=None,\n directory=None,\n images_per_type=None,\n image_size=224,\n process=False,\n model=mobilenet_v2):\n\n images_numpy = []\n images_class = []\n\n for image_type in image_types:\n images_path = os.path.join(directory, image_type, '*.jpg')\n for i, filename in enumerate(glob.glob(images_path)):\n try:\n if i == images_per_type:\n break\n loaded_image = load_img(filename, target_size=(image_size, image_size))\n images_numpy.append(img_to_array(loaded_image))\n images_class.append(image_type)\n except Exception as e:\n print('TypeError: {}'.format(e))\n\n if process:\n image_batch = np.expand_dims(images_numpy, axis=0)\n images_processed = model.preprocess_input(image_batch.copy())\n images_class_processed = process_images_class(images_class)\n\n return images_processed[0], images_class_processed\n\n else:\n return images_numpy, images_class", "def process_image(image):\n # Open the image using PIL\n pil_image = Image.open(image)\n \n # Resize the image to 256x256 while maintining aspect ratio\n if pil_image.width > pil_image.height:\n resize_dim = (int(pil_image.width*256 / pil_image.height), 256)\n else:\n resize_dim = (256, int(pil_image.height*256 / pil_image.width))\n \n pil_image = pil_image.resize(resize_dim)\n \n # Crop image to center 224 pixles\n crop_box_dim = 224\n left = (pil_image.width - crop_box_dim)/2\n top = (pil_image.height - crop_box_dim)/2\n right = pil_image.width - (pil_image.width - crop_box_dim)/2\n bottom = pil_image.height - (pil_image.height - crop_box_dim)/2\n pil_image = pil_image.crop((left, top, right, bottom))\n \n # Update color channels\n np_image = np.array(pil_image)\n np_image_means = np.array([0.485, 0.456, 0.406])\n np_image_stddev = np.array([0.229, 0.224, 0.225])\n np_image = (np_image/255 - np_image_means) / np_image_stddev\n \n # PIL images and numpy arrays have color channels in the 3rd dimension\n # Transpose them to first dimension to match what PyTorch expects\n np_image = np_image.transpose((2,0,1))\n\n return np_image", "def main(params):\n mpi_vs_multiprocess_logging(\"process\", params)\n\n ifg_paths = []\n for ifg_path in params[cf.INTERFEROGRAM_FILES]:\n ifg_paths.append(ifg_path.sampled_path)\n\n rows, cols = params[\"rows\"], params[\"cols\"]\n\n return process_ifgs(ifg_paths, params, rows, cols)", "def run(self):\n\n im = None\n while im == None:\n im = self.vid_mem_reader.get_latest_image()\n if im == None:\n print \"not receiving images yet...\"\n time.sleep(0.2)\n\n #Wait for video source to be ready:\n #TODO: Shoud use vidmemreader, but this one never seem to return a resolution (at time of writing):\n #res = self.vid_mem_reader.get_resolution()\n \n #TODO: This should work, but it doesn't because OpenCV keeps on complaining about that im is not a IPL image \n #(while if you print it, it seems to be a IPL image).\n #print im\n size = cv.GetSize(im[0])\n #print size\n self.res = ({'width':size[0], 'height':size[1]})\n res = self.res\n\n self.transformer = util.speed_angle.SpeedAngle(None, res['width'], res['height'])\n \n while True:\n self.__ticker.tick()\n start_time = time.time()\n img = self.get_new_image()\n ''' Parallel Process Inside this module\n \n im = np.asarray(img[:,:])\n time_spent = time.time() - start_time\n \n #Parallel process\n \n self.parallel_rotate_image(im)\n self.logger.debug(\"Set one finished\")\n \n print \"Image Length: \", self.rotatedImages\n for img in self.rotatedImages:\n self.get_faces(img[0])\n self.update()\n \n self.rotatedImages = []\n '''\n im = np.asarray(img[:,:])\n \n image = self.rotate_image( im, [self.rotation])\n self.get_faces(image)\n self.update()\n\n #TODO: To be removed and or configurable:\n directory = \"/tmp/emergency/\"\n if not os.path.exists(directory):\n os.makedirs(directory) \n try:\n cv.SaveImage(directory + \"image.png\", image)\n except:\n print \"ERROR: Could not write image to /tmp/emergency/\"", "def _process_image_files_batch(thread_index, ranges, name, cnts, roots, num_shards):\n # Each thread produces N shards where N = int(num_shards / num_threads).\n # For instance, if num_shards = 128, and the num_threads = 2, then the first\n # thread would produce shards [0, 64).\n num_threads = len(ranges)\n assert not num_shards % num_threads\n num_shards_per_batch = int(num_shards / num_threads)\n\n shard_ranges = np.linspace(ranges[thread_index][0],\n ranges[thread_index][1],\n num_shards_per_batch + 1).astype(int)\n num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]\n\n counter = 0\n for s in range(num_shards_per_batch):\n # Generate a sharded version of the file name, e.g. 'train-00002-of-00010'\n shard = thread_index * num_shards_per_batch + s\n output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)\n output_file = os.path.join(FLAGS.output_directory, output_filename)\n writer = tf.python_io.TFRecordWriter(output_file)\n\n shard_counter = 0\n files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)\n for i in files_in_shard:\n if i < cnts[0]:\n subset_idx = 0\n filename = ('%06d' % i) + filename_suffix\n else:\n subset_idx = 1\n filename = ('%06d' % (i - cnts[0])) + filename_suffix\n\n try:\n if name == 'test':\n _left_image, _right_image = _process_image(filename, subset_idx, name=='test')\n else:\n _left_image, _right_image, _disparity, _mask = _process_image(filename, subset_idx, name=='test')\n except Exception as e:\n print(e)\n print('SKIPPED: Unexpected eror while decoding %s, %s, %s.' % (filename, subset_idx, name))\n print(_left_image.shape, _right_image.shape, _disparity.shape, _mask.shape)\n continue\n\n if name == 'test':\n example = _convert_to_example(filename, subset_idx, _left_image, _right_image)\n else:\n example = _convert_to_example(filename, subset_idx, _left_image, _right_image, _disparity, _mask)\n writer.write(example.SerializeToString())\n shard_counter += 1\n counter += 1\n\n if not counter % 1000:\n print('%s [thread %d]: Processed %d of %d images in thread batch.' %\n (datetime.now(), thread_index, counter, num_files_in_thread))\n sys.stdout.flush()\n\n writer.close()\n print('%s [thread %d]: Wrote %d images to %s' %\n (datetime.now(), thread_index, shard_counter, output_file))\n sys.stdout.flush()\n shard_counter = 0\n print('%s [thread %d]: Wrote %d images to %d shards.' %\n (datetime.now(), thread_index, counter, num_files_in_thread))\n sys.stdout.flush()", "def resize_all_images(image_dir, width, height, resize_type):\n if width == \"\" and height == \"\":\n return 0\n print(\"Enter resizing image.\")\n print(\"Enter resizing.\", width)\n pool = Pool(1)\n pool.starmap(resize_image, zip(\n image_dir, itertools.repeat(width), itertools.repeat(height), itertools.repeat(resize_type)))\n pool.close()\n pool.join()", "def load_images(input_dir=\"/tmp/mapswipe/project-1\", n_images=2000, seed=1):\n class_map = {1: \"1\", 0: \"5\"}\n output_dir = \"/Users/thead/git/dreamview/data/\"\n\n X_ = []\n y_ = []\n for new_klass in class_map:\n images = []\n for klass in class_map[new_klass]:\n for img in glob.glob(input_dir + \"/%s/*/*/*/aerial.jpeg\" % klass):\n if os.stat(img).st_size > 0:\n images.append(img)\n\n images = shuffle(images, random_state=seed+42+new_klass)\n images = images[:n_images]\n X_ += images\n y_ += [new_klass] * len(images)\n\n # XXX deduce array size from an actual image\n X = np.zeros((2*n_images, 256*256), dtype=np.ubyte)\n y = np.zeros(2*n_images, dtype=np.int)\n\n for n, (img_path, klass) in enumerate(zip(X_, y_)):\n # the order of these OPs has been chosen on purpose, don't mess\n # without checking what happens\n img = imread(img_path)\n img = equalize_adapthist(img)\n img = rgb2grey(img)\n img = img_as_ubyte(img)\n\n if not n % 10:\n fname = os.path.split(img_path)[:-1]\n fname = os.path.join(*fname, \"aerial-processed.jpeg\")\n imsave(fname, img)\n\n X[n,:] = img.ravel()\n y[n] = klass\n\n return X, y", "def mpirun_pipeline(image=\"uber/horovod:0.13.11-tf1.10.0-torch0.4.0-py3.5\",\n\t\t\t\t\t\t batch_size=\"64\",\n\t\t\t\t\t\t optimizer='momentum',\n sync_source='https://github.com/tensorflow/benchmarks.git',\n git_sync_branch='cnn_tf_v1.9_compatible',\n data='user-susan:/training',\n gpus=1,\n workers=1,\n cpu_limit='2',\n metric='images/sec',\n memory_limit='10Gi'):\n\n env = ['NCCL_DEBUG=INFO','GIT_SYNC_BRANCH={0}'.format(git_sync_branch)]\n\n train=arena.mpi_job_op(\n \tname=\"all-reduce\",\n \timage=image,\n \tenv=env,\n data=[data],\n workers=workers,\n sync_source=sync_source,\n gpus=gpus,\n cpu_limit=cpu_limit,\n memory_limit=memory_limit,\n metrics=[metric],\n \tcommand=\"\"\"\n \tmpirun python code/benchmarks/scripts/tf_cnn_benchmarks/tf_cnn_benchmarks.py --model resnet101 \\\n \t--batch_size {0} --variable_update horovod --optimizer {1}\\\n \t--summary_verbosity=3 --save_summaries_steps=10\n \t\"\"\".format(batch_size, optimizer)\n )", "def main() -> co.Parallel:\n actors = [\"Oprah Winfrey\", \"Kate Mara\", \"Don Cheadle\", \"Dwayne Johnson\"]\n root = co.Parallel(image=_get_image())\n for actor in actors:\n root[actor] = co.Lazy(\n f\"python pipeline.py all_by_actor '{actor}'\"\n )\n return root", "def multi(video, processes):\n if processes < 0:\n processes = cpu_count() + processes\n elif processes == 0:\n raise ValueError('Number of processes must not be zero.')\n\n points = video.points\n points_split = tools.split_points(points, processes=processes)\n \n idi_kwargs = {\n 'cih_file': video.cih_file,\n }\n \n method_kwargs = {\n 'roi_size': video.method.roi_size, \n 'pad': video.method.pad, \n 'max_nfev': video.method.max_nfev, \n 'tol': video.method.tol, \n 'verbose': video.method.verbose, \n 'show_pbar': video.method.show_pbar\n }\n \n pool = Pool(processes=processes)\n results = [pool.apply_async(worker, args=(p, idi_kwargs, method_kwargs)) for p in points_split]\n pool.close()\n pool.join()\n\n out = []\n for r in results:\n _r = r.get()\n for i in _r:\n out.append(i)\n \n return np.asarray(out)", "def compute_img(self):\r\n self.load_img()\r\n self.check_shape()\r\n self.convert_img()\r\n self.img_computed = True", "def compute_parallel(self, inputs, communicator):\n self.compute_sequential([inputs], [communicator])", "def main():\n \n # for inserting other images, add tem to /input folder and list them here\n images = (\n 'image-0',\n 'image-1',\n 'image-2'\n )\n\n for image_name in images:\n print(image_name, \"image:\")\n\n image = open_image(image_name)\n display_image(image, \"Original input \" + image_name)\n\n grayscale_v = transform_colors(image)\n display_image(grayscale_v[:,:,0], \"Grayscale \" + image_name)\n save_image(image_name + \"-grayscale\", grayscale_v[:,:,0])\n\n contours_v, contours = get_contours(grayscale_v)\n display_image(contours_v, \"Contours \" + image_name)\n save_image(image_name + \"-contours\", contours_v)\n\n labeled_img, areas = get_measures(image, contours[1:])\n display_image(labeled_img, \"Labeled \" + image_name)\n save_image(image_name + \"-labeled\", labeled_img)\n\n areas_histogram(areas, image_name)", "def processImages(self):\n for file in os.listdir(self.config[\"tempPath\"]):\n self.logger.debug(\"Calling generateImages for the file: {0}\".format(file))\n self.generateText(file)", "def all_images():\n\n total = set()\n jobs = [nomad.parse(get_job(job.template)) for job in config.enabled_jobs]\n for spec in jobs:\n for image in nomad.get_images(spec):\n if image is not None and image != 'None':\n total |= set([image])\n return total", "def main():\n test_image = load_image()\n\n pixelate_image(\n normalize_image(test_image)\n )\n pass", "def run(self,image, label, featureClasses, settings, enabledImageTypes,csvFile):\n print('Processing started')\n import time\n startTime = time.time()\n # grayscaleImage = sitk.ReadImage(sitkUtils.GetSlicerITKReadWriteAddress(imageNode.GetName()))\n grayscaleImage = sitk.ReadImage(image)\n #sitkUtils.PushToSlicer(label, labelNode.GetName(), overwrite=True, compositeView=2)\n labelsDict = {}\n if label:\n print(\"label={}\".format(label))\n labelsDict = self.prepareLabelsFromLabelmap(label, grayscaleImage, labelsDict)\n # if segmentationNode:\n # labelsDict = self.prepareLabelsFromSegmentation(segmentationNode, grayscaleImage, labelsDict)\n\n #self.featureValues = extractor.execute(grayscaleImage, labelImage, images, **kwargs)\n featuresDict = {}\n for l in labelsDict.keys():\n print(\"Calculating features for \"+l)\n try:\n featuresDict[l] = self.calculateFeatures(grayscaleImage,\n labelsDict[l],\n featureClasses,\n settings,\n enabledImageTypes)\n except:\n print('calculateFeatures() failed')\n traceback.print_exc()\n self.saveFeatures2CSVFile(featuresDict,csvFile)\n print(\"Completed\")\n endtime = time.time()\n print(\"totalTime={}\".format(endtime-startTime))\n # return featuresDict", "def general_image_folder(opt):\n #### configurations\n read_all_imgs = False # whether real all images to memory with multiprocessing\n # Set False for use limited memory\n BATCH = 5000 # After BATCH images, lmdb commits, if read_all_imgs = False\n n_thread = 40\n ########################################################\n img_folder = opt['img_folder']\n lmdb_save_path = opt['lmdb_save_path']\n meta_info = {'name': opt['name']}\n if not lmdb_save_path.endswith('.lmdb'):\n raise ValueError(\"lmdb_save_path must end with \\'lmdb\\'.\")\n if osp.exists(lmdb_save_path):\n print('Folder [{:s}] already exists. Exit...'.format(lmdb_save_path))\n sys.exit(1)\n\n #### read all the image paths to a list\n print('Reading image path list ...')\n all_img_list = sorted(glob.glob(osp.join(img_folder, '*')))\n keys = []\n for img_path in all_img_list:\n keys.append(osp.splitext(osp.basename(img_path))[0])\n\n if read_all_imgs:\n #### read all images to memory (multiprocessing)\n dataset = {} # store all image data. list cannot keep the order, use dict\n print('Read images with multiprocessing, #thread: {} ...'.format(n_thread))\n pbar = util.ProgressBar(len(all_img_list))\n\n def mycallback(arg):\n '''get the image data and update pbar'''\n key = arg[0]\n dataset[key] = arg[1]\n pbar.update('Reading {}'.format(key))\n\n pool = Pool(n_thread)\n for path, key in zip(all_img_list, keys):\n pool.apply_async(read_image_worker, args=(path, key), callback=mycallback)\n pool.close()\n pool.join()\n print('Finish reading {} images.\\nWrite lmdb...'.format(len(all_img_list)))\n\n #### create lmdb environment\n data_size_per_img = cv2.imread(all_img_list[0], cv2.IMREAD_UNCHANGED).nbytes\n print('data size per image is: ', data_size_per_img)\n data_size = data_size_per_img * len(all_img_list)\n env = lmdb.open(lmdb_save_path, map_size=data_size * 10)\n\n #### write data to lmdb\n pbar = util.ProgressBar(len(all_img_list))\n txn = env.begin(write=True)\n resolutions = []\n for idx, (path, key) in enumerate(zip(all_img_list, keys)):\n pbar.update('Write {}'.format(key))\n key_byte = key.encode('ascii')\n data = dataset[key] if read_all_imgs else cv2.imread(path, cv2.IMREAD_UNCHANGED)\n if data.ndim == 2:\n H, W = data.shape\n C = 1\n else:\n H, W, C = data.shape\n txn.put(key_byte, data)\n resolutions.append('{:d}_{:d}_{:d}'.format(C, H, W))\n if not read_all_imgs and idx % BATCH == 0:\n txn.commit()\n txn = env.begin(write=True)\n txn.commit()\n env.close()\n print('Finish writing lmdb.')\n\n #### create meta information\n # check whether all the images are the same size\n assert len(keys) == len(resolutions)\n if len(set(resolutions)) <= 1:\n meta_info['resolution'] = [resolutions[0]]\n meta_info['keys'] = keys\n print('All images have the same resolution. Simplify the meta info.')\n else:\n meta_info['resolution'] = resolutions\n meta_info['keys'] = keys\n print('Not all images have the same resolution. Save meta info for each image.')\n\n pickle.dump(meta_info, open(osp.join(lmdb_save_path, 'meta_info.pkl'), \"wb\"))\n print('Finish creating lmdb meta info.')", "def process_images(path, dataset):\n \n print(f\"Processing images {os.path.join(path, dataset)}\", flush=True)\n label_file = os.path.join(path, dataset + '-labels-idx1-ubyte')\n with open(label_file, 'rb') as file:\n _, num = struct.unpack(\">II\", file.read(8))\n labels = numpy.fromfile(file, dtype=numpy.int8) #int8\n new_labels = numpy.zeros((num, 10))\n new_labels[numpy.arange(num), labels] = 1\n\n img_file = os.path.join(path, dataset + '-images-idx3-ubyte')\n with open(img_file, 'rb') as file:\n _, num, rows, cols = struct.unpack(\">IIII\", file.read(16))\n imgs = numpy.fromfile(file, dtype=numpy.uint8).reshape(num, rows, cols) #uint8\n imgs = imgs.astype(numpy.float32) / 255.0\n\n os.remove(label_file); os.remove(img_file)\n print(f\"Saving files under {os.path.join(path, dataset)} path\", flush=True)\n numpy.savez_compressed(os.path.join(path, dataset), imgs=imgs, labels=labels)", "def REDS(mode):\n #### configurations\n read_all_imgs = False # whether real all images to memory with multiprocessing\n # Set False for use limited memory\n BATCH = 5000 # After BATCH images, lmdb commits, if read_all_imgs = False\n if mode == 'train_sharp':\n img_folder = '../../datasets/REDS/train_sharp'\n lmdb_save_path = '../../datasets/REDS/train_sharp_wval.lmdb'\n H_dst, W_dst = 720, 1280\n elif mode == 'train_sharp_bicubic':\n img_folder = '../../datasets/REDS/train_sharp_bicubic'\n lmdb_save_path = '../../datasets/REDS/train_sharp_bicubic_wval.lmdb'\n H_dst, W_dst = 180, 320\n elif mode == 'train_blur_bicubic':\n img_folder = '../../datasets/REDS/train_blur_bicubic'\n lmdb_save_path = '../../datasets/REDS/train_blur_bicubic_wval.lmdb'\n H_dst, W_dst = 180, 320\n elif mode == 'train_blur':\n img_folder = '../../datasets/REDS/train_blur'\n lmdb_save_path = '../../datasets/REDS/train_blur_wval.lmdb'\n H_dst, W_dst = 720, 1280\n elif mode == 'train_blur_comp':\n img_folder = '../../datasets/REDS/train_blur_comp'\n lmdb_save_path = '../../datasets/REDS/train_blur_comp_wval.lmdb'\n H_dst, W_dst = 720, 1280\n elif mode == 'train_sharp_flowx4':\n img_folder = '../../datasets/REDS/train_sharp_flowx4'\n lmdb_save_path = '../../datasets/REDS/train_sharp_flowx4.lmdb'\n H_dst, W_dst = 360, 320\n n_thread = 40\n ########################################################\n if not lmdb_save_path.endswith('.lmdb'):\n raise ValueError(\"lmdb_save_path must end with \\'lmdb\\'.\")\n if osp.exists(lmdb_save_path):\n print('Folder [{:s}] already exists. Exit...'.format(lmdb_save_path))\n sys.exit(1)\n\n #### read all the image paths to a list\n print('Reading image path list ...')\n all_img_list = data_util._get_paths_from_images(img_folder)\n keys = []\n for img_path in all_img_list:\n split_rlt = img_path.split('/')\n folder = split_rlt[-2]\n img_name = split_rlt[-1].split('.png')[0]\n keys.append(folder + '_' + img_name)\n\n if read_all_imgs:\n #### read all images to memory (multiprocessing)\n dataset = {} # store all image data. list cannot keep the order, use dict\n print('Read images with multiprocessing, #thread: {} ...'.format(n_thread))\n pbar = util.ProgressBar(len(all_img_list))\n\n def mycallback(arg):\n '''get the image data and update pbar'''\n key = arg[0]\n dataset[key] = arg[1]\n pbar.update('Reading {}'.format(key))\n\n pool = Pool(n_thread)\n for path, key in zip(all_img_list, keys):\n pool.apply_async(read_image_worker, args=(path, key), callback=mycallback)\n pool.close()\n pool.join()\n print('Finish reading {} images.\\nWrite lmdb...'.format(len(all_img_list)))\n\n #### create lmdb environment\n data_size_per_img = cv2.imread(all_img_list[0], cv2.IMREAD_UNCHANGED).nbytes\n print('data size per image is: ', data_size_per_img)\n data_size = data_size_per_img * len(all_img_list)\n env = lmdb.open(lmdb_save_path, map_size=data_size * 10)\n\n #### write data to lmdb\n pbar = util.ProgressBar(len(all_img_list))\n txn = env.begin(write=True)\n for idx, (path, key) in enumerate(zip(all_img_list, keys)):\n pbar.update('Write {}'.format(key))\n key_byte = key.encode('ascii')\n data = dataset[key] if read_all_imgs else cv2.imread(path, cv2.IMREAD_UNCHANGED)\n if 'flow' in mode:\n H, W = data.shape\n assert H == H_dst and W == W_dst, 'different shape.'\n else:\n H, W, C = data.shape\n assert H == H_dst and W == W_dst and C == 3, 'different shape.'\n txn.put(key_byte, data)\n if not read_all_imgs and idx % BATCH == 0:\n txn.commit()\n txn = env.begin(write=True)\n txn.commit()\n env.close()\n print('Finish writing lmdb.')\n\n #### create meta information\n meta_info = {}\n meta_info['name'] = 'REDS_{}_wval'.format(mode)\n channel = 1 if 'flow' in mode else 3\n meta_info['resolution'] = '{}_{}_{}'.format(channel, H_dst, W_dst)\n meta_info['keys'] = keys\n pickle.dump(meta_info, open(osp.join(lmdb_save_path, 'meta_info.pkl'), \"wb\"))\n print('Finish creating lmdb meta info.')", "def execute(self, images_and_density_maps):\n # these imports are used in eval(), don't remove them\n import CCAugmentation.outputs as cca_out\n import CCAugmentation.transformations as cca_trans\n _ = cca_out, cca_trans\n\n op_str = self._get_op_str()\n const_str = self._get_const_str()\n\n for image_and_density_map in images_and_density_maps:\n rand_str = self._get_rand_str()\n args_str = \",\".join([const_str, rand_str]) if const_str and rand_str else const_str + rand_str\n op = eval(f\"{op_str}({args_str})\")\n for result in op.execute([image_and_density_map]):\n yield result", "def query(self, images):\n if self.pool_size == 0: # if the buffer size is 0, do nothing\n return images\n return_images = []\n for image in images:\n image = torch.unsqueeze(image.data, 0)\n if self.num_imgs < self.pool_size: # if the buffer is not full; keep inserting current images to the buffer\n self.num_imgs = self.num_imgs + 1\n self.images.append(image)\n return_images.append(image)\n else:\n p = random.uniform(0, 1)\n if p > 0.5: # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer\n random_id = random.randint(0, self.pool_size - 1) # randint is inclusive\n tmp = self.images[random_id].clone()\n self.images[random_id] = image\n return_images.append(tmp)\n else: # by another 50% chance, the buffer will return the current image\n return_images.append(image)\n return_images = torch.cat(return_images, 0) # collect all the images and return\n return return_images", "def execute_augmentation(queue_images: Queue, progress: tqdm, output: str, factor: int) -> None:\n while not queue_images.empty():\n element = queue_images.get(block=False)\n augmentation(element, output, factor)\n progress.update(1)", "def main():\n\n os.system(\"rm -rf images; mkdir images\")\n\n if (len(sys.argv) > 1):\n N = int(sys.argv[1])\n else:\n N = 10\n\n x_test = np.load(\"../../../../data/mnist/mnist_test_images.npy\")\n\n for i in range(N):\n r,c = random.randint(6,12), random.randint(6,12)\n g = np.zeros(r*c)\n for j in range(r*c):\n if (random.random() < 0.15):\n g[j] = 1\n g = g.reshape((r,c))\n g[:,0] = g[0,:] = g[:,-1] = g[-1,:] = 0\n\n img = np.zeros((28*r,28*c), dtype=\"uint8\")\n for x in range(r):\n for y in range(c):\n if (g[x,y] == 1):\n n = random.randint(0, x_test.shape[0])\n im = x_test[n]\n img[28*x:(28*x+28), 28*y:(28*y+28)] = im\n \n Image.fromarray(img).save(\"images/image_%04d.png\" % i)", "def process_batch(self, image_batch):\n images = []\n for image_data in image_batch:\n image_resize = cv2.resize(image_data, (0,0), fx=0.5, fy=0.5) #NOTE\n images.append(image_resize)\n\n return np.array(images)", "def load_preprocess_images(image_paths: List[str], image_size: tuple) -> List[np.ndarray]:\n image_size = image_size[1:] # we do not need the number of channels\n images = []\n for image_path in image_paths:\n images.append(load_preprocess_image(image_path, image_size))\n return images", "def _compute_var_image_numpy_parallel(\n src_var_image: np.ndarray,\n dst_src_ij_images: np.ndarray,\n dst_var_image: np.ndarray\n):\n dst_height = dst_var_image.shape[-2]\n for dst_j in nb.prange(dst_height):\n _compute_var_image_for_dest_line(\n dst_j, src_var_image, dst_src_ij_images, dst_var_image\n )", "def process_next_image(self):\n if self.queue:\n next_queue_item = self.queue.popleft()\n if type(next_queue_item) == str:\n if next_queue_item == 'clear':\n self.signal_status_message.emit('Clearing ROI data (from request in image queue)')\n self.clear()\n return\n [image,file_id,image_num] = next_queue_item\n # print('image_num',image_num)\n # print('next image',self.next_image)\n self.signal_status_message.emit('Started processing ID {} Im {}'.format(file_id,image_num))\n image = image - self.emccd_bias # don't edit in place because this seemed to cause an issue with images not showing in GUI. Maybe not thread safe?\n # print('image min',np.min(image))\n # print('image max',np.max(image))\n image_num_too_big = False\n for group in self.roi_groups:\n for roi in group.rois:\n try:\n roi.counts[image_num][file_id] = image[roi.x:roi.x+roi.w,roi.y:roi.y+roi.h].sum()\n except IndexError: # image_num was not valid for the number of images that MAIA is expecting\n image_num_too_big = True\n if image_num_too_big:\n self.signal_status_message.emit('Image number {} is greater than max expected images, so this image has been ignored (most likely cause is rearrangement toggle).')\n self.signal_status_message.emit('Finished processing ID {} Im {}'.format(file_id,image_num))\n self.calculate_thresholds()", "def pair_images():\n # TODO: maybe implement some way to skip frames if queue is too long\n queue_a = xy_imgs\n queue_b = z_imgs\n if len(queue_a) == 0 or len(queue_b) == 0:\n return\n a_prev = None\n b_prev = None\n a = queue_a[0]\n b = queue_b[0]\n if a.ts < b.ts:\n while a.ts < b.ts:\n a_prev = queue_a.popleft()\n if len(queue_a) == 0:\n if b.within_threshold(a_prev):\n yield process_images(a_prev, b)\n return\n a = queue_a[0]\n closest_a = b.closest_to(a, a_prev)\n if closest_a is not None:\n yield process_images(closest_a, b)\n else:\n while b.ts < a.ts:\n b_prev = queue_b.popleft()\n if len(queue_b) == 0:\n if a.within_threshold(b_prev):\n yield process_images(a, b_prev)\n return\n b = queue_b[0]\n closest_b = a.closest_to(b, b_prev)\n if closest_b is not None:\n yield process_images(a, closest_b)" ]
[ "0.7019778", "0.701473", "0.68229294", "0.6802597", "0.67542833", "0.6570712", "0.6545006", "0.65141946", "0.6469355", "0.6455748", "0.6437736", "0.6373732", "0.6337872", "0.6320698", "0.6302606", "0.6255835", "0.6229472", "0.62148833", "0.6202636", "0.61866295", "0.61764526", "0.6150914", "0.61422175", "0.6139899", "0.61092854", "0.6103789", "0.6096633", "0.6084338", "0.6081984", "0.6056349", "0.60544425", "0.60125476", "0.6011628", "0.6003218", "0.5985592", "0.5985277", "0.59809476", "0.5966458", "0.5962644", "0.5954697", "0.5952728", "0.5945884", "0.59340256", "0.5914991", "0.5910856", "0.5903178", "0.5888857", "0.58792806", "0.58637893", "0.5862652", "0.5846829", "0.5844264", "0.5836815", "0.58289874", "0.5828507", "0.5827354", "0.58261764", "0.5823627", "0.58176905", "0.5815725", "0.5812806", "0.58059716", "0.58056295", "0.58014864", "0.5799791", "0.57885695", "0.57799655", "0.5779944", "0.57715267", "0.5769696", "0.5757933", "0.5754358", "0.57370317", "0.5736786", "0.5733541", "0.5727277", "0.5724785", "0.57194847", "0.57165354", "0.57104695", "0.57073003", "0.5702725", "0.57012105", "0.569182", "0.56909305", "0.56787676", "0.56767887", "0.56750906", "0.56750816", "0.56702733", "0.5659374", "0.56571764", "0.5652742", "0.5648781", "0.5643737", "0.56334025", "0.5629055", "0.5619187", "0.56186384", "0.5611631" ]
0.56445336
94
Return the serializer instance that should be used for validating and deserializing input, and for serializing output.
def get_serializer_in(self, *args, **kwargs): serializer_class = self.get_serializer_class_in() kwargs['context'] = self.get_serializer_context() return serializer_class(*args, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getSerializer():", "def get_serializer(self, *args, **kwargs):\n serializer_class = self.get_serializer_class()\n kwargs['context'] = self.get_serializer_context()\n return serializer_class(*args, **kwargs)", "def serializer_for(self, obj):\n # 1-NULL serializer\n if obj is None:\n return self._null_serializer_adapter\n\n obj_type = type(obj)\n serializer = None\n\n # 2-Default serializers, Dataserializable, Portable, primitives, arrays, String and some helper types(BigInteger etc)\n serializer = self.lookup_default_serializer(obj_type)\n\n # 3-Custom registered types by user\n if serializer is None:\n serializer = self.lookup_custom_serializer(obj_type)\n\n # 4 Internal serializer\n if serializer is None and self._global_serializer_adaptor is None:\n serializer = self.lookup_python_serializer(obj_type)\n\n # 5-Global serializer if registered by user\n if serializer is None:\n serializer = self.lookup_global_serializer(obj_type)\n\n if serializer is not None:\n if self._active:\n raise HazelcastSerializationError(\"There is no suitable serializer for:\" + str(obj_type))\n else:\n raise HazelcastInstanceNotActiveError()\n return serializer", "def get_serializer_class(self):\n assert self.serializer_class is not None, (\n \"'%s' should either include a `serializer_class` attribute, \"\n \"or override the `get_serializer_class()` method.\"\n % self.__class__.__name__\n )\n\n return self.serializer_class", "def get_serializer_class(self):\n return self.serializer_class", "def get_serializer():\n if 'serializer' in _CACHE:\n serializer = _CACHE['serializer']\n else:\n name = getattr(settings, 'DJANGO_NUMERICS_SERIALIZER_BACKEND',\n _DEFAULT_SERIALIZER)\n serializer = import_string(name)()\n _CACHE['serializer'] = serializer\n return serializer", "def get_serializer_class(self):\n if self.request.method == 'POST':\n return my_serializers.UploadedDataPostSerializer\n return self.serializer_class", "def get_serializer_class(self):\n pk_lookup, dataid_lookup = self.lookup_fields\n form_pk = self.kwargs.get(pk_lookup)\n dataid = self.kwargs.get(dataid_lookup)\n fmt = self.kwargs.get(\"format\", self.request.GET.get(\"format\"))\n sort = self.request.GET.get(\"sort\")\n fields = self.request.GET.get(\"fields\")\n if fmt == Attachment.OSM:\n serializer_class = OSMSerializer\n elif fmt == \"geojson\":\n serializer_class = GeoJsonSerializer\n elif fmt == \"xml\":\n serializer_class = DataInstanceXMLSerializer\n elif (\n form_pk is not None\n and dataid is None\n and form_pk != self.public_data_endpoint\n ):\n if sort or fields:\n serializer_class = JsonDataSerializer\n else:\n serializer_class = DataInstanceSerializer\n else:\n serializer_class = super().get_serializer_class()\n\n return serializer_class", "def serializer(self) -> ArchiveSerializer:\n return serializer_from_dict(self.doc.get('serializer'))", "def get_serializer_class(self):\n if self.action == 'create':\n return self.serializer_classes.get('create')\n else:\n return self.serializer_classes.get('default')", "def get_serializer_class(self):\n if self.action == 'list' or self.action == 'retrieve':\n return SillaSerializer\n else:\n return SillaSerializer", "def get_serializer_class(self):\n serializer_map = {\n \"witness\": WitnessServiceSerializer,\n \"review\": ReviewServiceSerializer,\n \"certificate_provider\": LPACertificateServiceSerializer,\n }\n\n return serializer_map[self.kwargs.get(\"service_type\", \"witness\")]", "def force_serializer_instance(serializer):\n if inspect.isclass(serializer):\n assert issubclass(serializer, serializers.BaseSerializer), \"Serializer required, not %s\" % serializer.__name__\n return serializer()\n\n assert isinstance(serializer, serializers.BaseSerializer), \\\n \"Serializer class or instance required, not %s\" % type(serializer).__name__\n return serializer", "def get_query_serializer(self):\n query_serializer = self.overrides.get('query_serializer', None)\n if query_serializer is not None:\n query_serializer = force_serializer_instance(query_serializer)\n return query_serializer", "def get_serializer_class(self):\n if self.request.method in ['GET', ]:\n return QuestionSerializer\n return QuestionGroupQuestionSerializer", "def get_deserialization_instance(cls):\n if cls.__orig__ is None:\n return cls()\n else:\n return cls.__orig__()", "def get_serializer(self, *args, **kwargs):\n kwargs['context'] = self.get_serializer_context()\n realm = kwargs['context'].get('realm', None)\n if realm is not None:\n serializer_class = ItemSerializer\n else:\n serializer_class = self.get_serializer_class()\n return serializer_class(*args, **kwargs)", "def get_serializer(name):\n\n dumps = importlib.import_module(name).dumps\n\n # Serializers that handle unicode streams and a are safe against comments\n # can be used directly\n if name == 'json':\n import json\n return json.dumps\n\n def serializer(x):\n # Serialize\n try:\n data = dumps(x)\n except Exception as ex:\n raise SerializationError(ex)\n\n # Transmit with b85 encode: safe characters and no newlines\n return (b'+' + base64.b85encode(data)).decode('ascii')\n\n return serializer", "def get_serializer_class(self):\n if self.action == 'login':\n return UserLoginSerializer\n if self.action == 'signup':\n return UserSignUpSerializer\n if self.action == 'remember_code':\n return RememberCodeSerializer\n return UserModelSerializer", "def get_serializer_class(self):\n if self.action == 'list' or self.action == 'retrieve':\n return EventosSerializer\n else:\n return EventosRegisterSerializer", "def get_serializer_class(self):\n if self.action == 'retrieve':\n return serializers.BookDetailSerializer\n elif self.action == 'upload_image':\n return serializers.BookImageSerializer\n\n return self.serializer_class", "def initialize_serializer(self):\n serializer = self.pyleus_config.get('serializer')\n if serializer in SERIALIZERS:\n self._serializer = SERIALIZERS[serializer](\n self._input_stream, self._output_stream)\n else:\n raise ValueError(\"Unknown serializer: {0}\", serializer)", "def get_request_serializer(self):\n body_override = self.overrides.get('request_body', None)\n\n if body_override is not None:\n if body_override is no_body:\n return None\n if self.method not in self.body_methods:\n raise SwaggerGenerationError(\"request_body can only be applied to PUT, PATCH or POST views; \"\n \"are you looking for query_serializer or manual_parameters?\")\n if isinstance(body_override, openapi.Schema.OR_REF):\n return body_override\n return force_serializer_instance(body_override)\n elif self.method in self.body_methods:\n return self.get_view_serializer()\n\n return None", "def get_serializer_class(self):\n return self.serializers.get(self.action,\n self.serializers['default'])", "def get_serializer_class(self):\n return {\"create\": ReportFileSerializer, }.get(self.action, ReportFileSerializer)", "def get_serializer_class(self):\n return self.serializer_classes.get(self.action,\n self.default_serializer_class)", "def get_serializer_class(self):\n\n if self.action in ['list', 'retrieve']:\n return OrderListSerializer\n else:\n return OrderSerializer", "def get_serializer_class(self):\n if self.action == 'retrieve':\n return self.serializer_classes.get('retrieve')\n elif self.action == 'create':\n return self.serializer_classes.get('create')\n elif self.action == 'update':\n return self.serializer_classes.get('update')\n else:\n return self.serializer_classes.get('default')", "def get_serializer_class(self):\n if self.action in [\"list\", \"retrieve\"]:\n return OrderSerializer\n return OrderCreateSerializer", "def get_view_serializer(self):\n if not hasattr(self.view, 'get_serializer'):\n return None\n return self.view.get_serializer()", "def get_serializer_class(self):\n if (self.request.method == \"GET\" and\n self.request.query_params.get(\"nested\")):\n return serializers.ReviewNestedSerializer\n return serializers.ReviewSerializer", "def serializer_class(self):", "def get_serializer_class(self):\n if self.action == 'list' or self.action == 'retrieve':\n return UserReadSerializer\n else:\n return UserSerializer", "def get_serializer_class(self):\n if self.action == 'update':\n return UserChangePassword\n elif self.action == 'create':\n return UserVerifyTokenSerializer\n else:\n None", "def get_serializer(self, *args, **kwargs):\n if self.__class__.serializer_class is not None:\n cls = self.__class__.serializer_class\n else:\n if self.action == 'list' and hasattr(self.__class__,\n 'list_serializer_class'):\n cls = self.__class__.list_serializer_class\n elif hasattr(self.__class__, 'detail_serializer_class'):\n cls = self.__class__.detail_serializer_class\n else:\n # error handling\n return super().get_serializer(*args, **kwargs)\n\n # default the context\n kwargs['context'] = self.get_serializer_context()\n\n return cls(*args, **kwargs)", "def get_query_serializer(self):\n serializer = super().get_query_serializer()\n serializer_class = getattr(self.view, 'request_serializer_class', None)\n\n if not serializer and serializer_class:\n serializer = serializer_class()\n\n return serializer", "def serialize(serializer_class, instance, data=None, **kwargs):\n\n if data is None:\n serializer = serializer_class(instance, **kwargs)\n else:\n serializer = serializer_class(instance, data=data, **kwargs)\n serializer.is_valid(raise_exception=True)\n\n return serializer", "def get_serializer_class(self):\n #overide function this is a fun that called to retrive the serailizer class\n #for perticular request\n #this fun are used for wanted to chang the serailzer class for the different action\n #that are available on the recip0e viewset\n if self.action == 'retrieve':\n print('okkkkkkkkkkkkw')\n return serializers.RecipeDetailSerializer\n elif self.action == 'upload_image':\n print('okkkkkkkkkkkkkkkkk')\n return serializers.RecipeImageSerailzer\n\n return self.serializer_class", "def get_serializer_class(self):\n serializer_map = {\n \"RealEstate\": RealEstateSerializer,\n \"BankAccount\": BankAccountSerializer,\n \"Insurance\": InsuranceSerializer,\n \"Investment\": InvestmentSerializer,\n \"Company\": CompanySerializer,\n \"Residual\": ResidualSerializer,\n }\n\n return serializer_map[self.kwargs.get(\"asset_type\", \"RealEstate\")]", "def get_serializer_class(self):\n if self.action == 'retrieve':\n return serializers.OperationDetailSerializer\n\n return self.serializer_class", "def get_serializer_class(self):\n if self.action == 'retrieve':\n return serializers.AccountDetailSerializer\n\n return self.serializer_class", "def get_serializer_class(self):\n if self.action == 'retrieve':\n return serializers.ProductDetailSerializer\n\n return self.serializer_class", "def get_serializer_class(self, *args, **kwargs):\n\n if self.request.method in ['GET', 'POST']:\n serializer_class = SearchSerialzer\n\n elif self.action == 'destroy':\n serializer_class = SearchNotRequiredSerializer\n\n elif self.action == 'destroy_all':\n serializer_class = SearchDeleteAllSerializer\n\n return serializer_class", "def get_serializer_class(self) -> serializers.ModelSerializer:\n if self.request.user.rank == 'Management':\n return employee_serializers.ManagerSerializer\n return employee_serializers.EmployeeSerializer", "def get_serializer_class(self):\n\n if self.action == 'create':\n return CreateRideSerializer\n\n if self.action == 'join':\n return JoinRideSerializer\n\n if self.action == 'finish':\n return EndRideSerializer\n\n if self.action == 'qualify':\n return QualifyRideSerializer\n\n return RideModelSerializer", "def default_serializer(_cls: Type[Any], obj: Any) -> Any:", "def get_user_serializer_class(self):\n return durin_settings.USER_SERIALIZER", "def lookup_serializer(encoding: str) -> Serializer:\n try:\n return _SERIALIZERS[encoding]\n except KeyError:\n raise ValueError(f\"Unregistered encoding {encoding!r}\")", "def get_serializer_class(self):\n group = self.request.query_params.get('type_group')\n return self.serializer_lookup.get(group, serializers.MeasurementTypeSerializer)", "def get_serializer(self, *args, **kwargs):\n serializer_class = self.get_serializer_class()\n kwargs['context'] = self.get_serializer_context()\n return serializer_class(many=True, *args, **kwargs)", "def serializers(self, **kwargs):\n return serializers.serializers(self._host, self._session, **kwargs)", "def get_serializer_class(self):\n if self.action in ('retrieve', 'list', 'update', 'partial_update'):\n return ListaPedidoSerializer\n return PedidoSerializer", "def get_proto_serializer():\n def _serialize_proto(proto):\n return proto.SerializeToString()\n return _serialize_proto", "def get_serializer_class(self, *args, **kwargs):\n if self.action == 'list':\n return self.serializer_list_class\n else:\n return self.serializer_class", "def by_extension_and_format(cls, extension: str, ser_format: str):\n if cls._format_to_serializer is None:\n cls._register_subclasses()\n if ser_format == 'auto':\n serializer = cls._extension_to_serializer.get(extension.lstrip('.'))\n else:\n serializer = cls._format_to_serializer.get(ser_format)\n\n if serializer is None:\n raise InvalidExtensionOrFormat(\n 'Cannot find serializer for format: %s and extension %s' % (\n ser_format, extension))\n return serializer", "def serialize(self):\n serialization = self._weaver.Serialize()\n if not serialization:\n raise AssertionError(\n 'Weaver Serialization failed: %s' % self._weaver.error_string())\n return serialization", "def get_serializer_class(self):\n if self.action == 'retrieve':\n return ContaRetrieveSerializer\n\n return ContaSerializer", "def get_serializer_class(self):\n if self.request is None or self.request.method == \"POST\":\n return serializers.ProfileItemDetailSerializer\n\n return serializers.ProfileItemListSerializer", "def get_serializer(self, *args, **kwargs):\n try:\n params = self.request.query_params\n\n for key in ['part_detail', 'location_detail', 'supplier_part_detail', 'tests']:\n kwargs[key] = str2bool(params.get(key, False))\n except AttributeError:\n pass\n\n kwargs['context'] = self.get_serializer_context()\n\n return self.serializer_class(*args, **kwargs)", "def get_serializer_class(self):\n profile = self.get_object()\n\n # Owner of the profile\n if self.request.user == profile.user:\n if profile.filled_out or self.request.data.get('filled_out'):\n return self.serializer_class_filled_out\n else:\n return self.serializer_class_owner\n # Staff or instructor is looking at profile\n elif not self.request.user.is_anonymous and self.request.user.role_set.filter(\n role__in=(Staff.ROLE_ID, Instructor.ROLE_ID),\n program__programenrollment__user__profile=profile,\n ).exists():\n return self.serializer_class_staff\n # Profile is public\n elif profile.account_privacy == Profile.PUBLIC:\n return self.serializer_class_limited\n # Profile is public to mm verified users only\n elif profile.account_privacy == Profile.PUBLIC_TO_MM:\n return self.serializer_class_limited\n # this should never happen, but just in case\n return self.serializer_class_limited", "def get_serializer_class(self):\n if self.request.method == \"POST\":\n return VideoUsersCreationSerializer\n else: \n return VideoUserSerializer", "def getDeserializer():", "def get_serializer(self, *args, **kwargs):\n kwargs['part_detail'] = True\n kwargs['location_detail'] = True\n kwargs['supplier_part_detail'] = True\n kwargs['context'] = self.get_serializer_context()\n\n return self.serializer_class(*args, **kwargs)", "def _get_serializer_for_value(value, serializing):\n _init_serialization()\n\n cls = type(value)\n is_class = inspect.isclass(value)\n\n serialization_cls = None\n\n if inspect.isclass(value):\n if cls in _serialization_map:\n serialization_cls = _serialization_map[cls]\n elif is_class:\n serialization_cls = ClassSerialization\n else:\n if cls in _deconstructed_serialization_map:\n serialization_cls = _deconstructed_serialization_map[cls]\n elif (Enum is not None and\n (serializing and issubclass(cls, Enum)) or\n (not serializing and\n cls is dict and\n value.get('_enum') is True)):\n serialization_cls = EnumSerialization\n elif serializing and hasattr(value, 'deconstruct'):\n serialization_cls = DeconstructedSerialization\n elif (not serializing and\n cls is dict and\n value.get('_deconstructed') is True):\n serialization_cls = DeconstructedSerialization\n elif isinstance(value, BasePlaceholder):\n serialization_cls = PlaceholderSerialization\n elif cls in _serialization_map:\n serialization_cls = _serialization_map[cls]\n\n return serialization_cls", "def get_serializer_class(self, *args, **kwargs):\n\n if self.request.method == 'GET':\n serializer_class = FavoriteModelSerializer\n\n elif self.request.method == 'POST':\n serializer_class = FavoriteCreateSerializer\n\n elif self.action == 'destroy':\n serializer_class = FavoriteDestorySerializer\n\n elif self.action == 'destroy_all':\n serializer_class = FavoriteDestroyAllSerializer\n\n return serializer_class", "def serializer_from_settings():\n if settings.PROFILE_SERIALIZER:\n return import_string(settings.PROFILE_SERIALIZER)\n\n return UserProfileSerializer", "def __init__(self, serializer=None):\r\n self.client = Client()\r\n self.serializer = serializer\r\n\r\n if not self.serializer:\r\n self.serializer = Serializer()", "def deserialize(serializer_class, data, **kwargs):\n\n serializer = serializer_class(data=data, **kwargs)\n serializer.is_valid(raise_exception=True)\n\n return serializer", "def get_serializer_class(self):\n if self.action in (\"list\",):\n return serializers.NotesGroupListSerializer\n\n return serializers.NotesGroupDetailSerializer", "def serialize(self) -> typing.Any:\n return self._serialize(self.__dict__)", "def get_serializer_class(self):\n if self.request is None or self.request.method == \"POST\":\n return serializers.ProfileTopicDetailSerializer\n\n return serializers.ProfileTopicListSerializer", "def get_serializer_class(self):\n if self.action == \"list_attendances\":\n return serializers.LiveAttendanceGraphSerializer\n return super().get_serializer_class()", "def serialize(self, value, **kwargs):\n kwargs.update({'include_class': kwargs.get('include_class', True)})\n if self.serializer is not None:\n return self.serializer(value, **kwargs)\n if value is None:\n return None\n if isinstance(value, HasProperties):\n return value.serialize(**kwargs)\n return self.to_json(value, **kwargs)", "def serialize(cls, *args, **kwargs):\n return serialize_cls(cls)(*args, **kwargs)", "def register_serializer(cls, *, serializer, deserializer):\n context = ray.worker.global_worker.get_serialization_context()\n context._register_cloudpickle_serializer(cls, serializer, deserializer)", "def _ReadSerializerStream(self):\n stream_name = 'serializer.txt'\n if not self._HasStream(stream_name):\n return\n\n serialization_format = self._ReadStream(stream_name)\n if serialization_format != definitions.SERIALIZER_FORMAT_JSON:\n raise ValueError(\n 'Unsupported stored serialization format: {0:s}'.format(\n serialization_format))\n\n return serialization_format", "def get_serializer(self, *args, **kwargs):\n try:\n kwargs['user_detail'] = str2bool(self.request.query_params.get('user_detail', False))\n except Exception:\n pass\n\n kwargs['context'] = self.get_serializer_context()\n\n return self.serializer_class(*args, **kwargs)", "def serialize(self, format, queryset, **options):\n s = get_serializer(format)() # noqa\n s.serialize(queryset, **options)\n return s.getvalue()", "def get_serializer_class(self):\n try:\n return self.serializer_action_classes[self.action]\n except (KeyError, AttributeError):\n return super(\n MultiSerializerViewSetMixin, self).get_serializer_class()", "def transparent_serialize(cls):\n return _create_wrapper_cls(cls, store_init_parameters=False)", "def _init_serialization():\n global _deconstructed_serialization_map, _serialization_map\n\n if _deconstructed_serialization_map or _serialization_map:\n return\n\n _deconstructed_serialization_map = {\n Q: QSerialization,\n }\n\n if CombinedExpression is not None:\n _deconstructed_serialization_map[CombinedExpression] = \\\n CombinedExpressionSerialization\n\n _serialization_map = {\n # String-based\n bytes: StringSerialization,\n six.text_type: StringSerialization,\n\n # Dictionary-based\n OrderedDict: DictSerialization,\n dict: DictSerialization,\n\n # Primitives\n bool: PrimitiveSerialization,\n float: PrimitiveSerialization,\n int: PrimitiveSerialization,\n type(None): PrimitiveSerialization,\n\n # Iterables\n list: ListSerialization,\n set: SetSerialization,\n tuple: TupleSerialization,\n\n # Class references\n type: ClassSerialization,\n }\n\n if six.PY2:\n _serialization_map.update({\n long: PrimitiveSerialization,\n })", "def _register_serializers(self):\n import ray.util.serialization_addons\n from ray.util.serialization import StandaloneSerializationContext\n\n ctx = StandaloneSerializationContext()\n ray.util.serialization_addons.apply(ctx)", "def serialize_cls(cls):\n return _create_wrapper_cls(cls)", "def get_deserializer(name):\n\n loads = importlib.import_module(name).loads\n\n # Serializers that handle unicode streams and a are safe against comments\n # can be used directly\n if name == 'json':\n import json\n return json.loads\n\n def deserializer(x):\n # Load base85 bytes data\n x = x[1:].encode('ascii')\n x = base64.b85decode(x)\n try:\n return loads(x)\n except Exception as ex:\n raise SerializationError(ex)\n\n return deserializer", "def decorator(cls):\n\n instance = cls(*args, **kwargs)\n serializer_services.register_serializer(instance, **kwargs)\n\n return cls", "def serialize(self, obj):\n return obj", "def encoder(self) -> json.JSONEncoder:\n return encoder_from_string(self.doc.get('encoder'))", "def get_serialization_instance(cls, value):\n\n # if the instance is a list, convert it to a cls instance.\n # this is only useful when deserializing method arguments for a client\n # request which is the only time when the member order is not arbitrary\n # (as the members are declared and passed around as sequences of\n # arguments, unlike dictionaries in a regular class definition).\n if isinstance(value, list) or isinstance(value, tuple):\n assert len(value) <= len(cls._type_info)\n\n cls_orig = cls\n if cls.__orig__ is not None:\n cls_orig = cls.__orig__\n inst = cls_orig()\n\n keys = cls._type_info.keys()\n for i in range(len(value)):\n setattr(inst, keys[i], value[i])\n\n elif isinstance(value, dict):\n inst = cls()\n\n for k in cls._type_info:\n setattr(inst, k, value.get(k, None))\n\n else:\n inst = value\n\n return inst", "def serialize(self, request, content_type, default_serializers=None):\n\n if self.serializer:\n serializer = self.serializer\n else:\n _mtype, _serializer = self.get_serializer(content_type,\n default_serializers)\n serializer = _serializer()\n\n response = webob.Response()\n response.status_int = self.code\n for hdr, value in self._headers.items():\n response.headers[hdr] = str(value)\n response.headers['Content-Type'] = content_type\n if self.obj is not None:\n response.body = serializer.serialize(self.obj)\n\n return response", "def get_serializer(self, content_type, default_serializers=None):\n\n default_serializers = default_serializers or {}\n\n try:\n mtype = _MEDIA_TYPE_MAP.get(content_type, content_type)\n if mtype in self.serializers:\n return mtype, self.serializers[mtype]\n else:\n return mtype, default_serializers[mtype]\n except (KeyError, TypeError):\n raise exception.InvalidContentType(content_type=content_type)", "def serialize(self):\n raise NotImplementedError(\n \"Subclasses of Serializable must implement serialize\"\n )", "def test_get_serializer_class():\n view = views.PasswordResetRequestView()\n expected = serializers.PasswordResetRequestSerializer\n\n assert view.get_serializer_class() == expected", "def _serialize(\n obj: object,\n to_proto: bool = True,\n to_bytes: bool = False,\n) -> Union[str, bytes, Message]:\n\n is_serializable: Serializable\n if not isinstance(obj, Serializable):\n if hasattr(obj, \"serializable_wrapper_type\"):\n is_serializable = obj.serializable_wrapper_type(value=obj) # type: ignore\n else:\n traceback_and_raise(\n Exception(f\"Object {type(obj)} has no serializable_wrapper_type\")\n )\n else:\n is_serializable = obj\n\n serialize_method = getattr(is_serializable, \"sy_serialize\", None)\n if serialize_method is None:\n serialize_method = getattr(is_serializable, \"serialize\", None)\n if serialize_method is None:\n raise Exception(f\"Object {type(obj)} has no serialize method\")\n\n return serialize_method(to_proto=to_proto, to_bytes=to_bytes)", "def test_get_serializer_class():\n view = views.EmailVerificationView()\n expected = serializers.EmailVerificationSerializer\n\n assert view.get_serializer_class() == expected", "def set_serializer(self, serializer):\n self._serializer = serializer\n return self", "def test_get_serializer_class():\n view = views.UserCreateView()\n expected = serializers.UserCreationSerializer\n\n assert view.get_serializer_class() == expected", "def register_serializer(\n encoding: Union[str, Tuple[str, ...]], serializer: Serializer = None\n):\n\n def wrapper(serializer):\n if isinstance(encoding, tuple):\n for e in encoding:\n register_serializer(e, serializer)\n else:\n _SERIALIZERS[encoding] = serializer\n return serializer\n\n return wrapper(serializer) if serializer is not None else wrapper", "def get_serializer(self, *args, **kwargs):\n try:\n kwargs['item_detail'] = str2bool(self.request.query_params.get('item_detail', False))\n except Exception:\n pass\n\n try:\n kwargs['user_detail'] = str2bool(self.request.query_params.get('user_detail', False))\n except Exception:\n pass\n\n kwargs['context'] = self.get_serializer_context()\n\n return self.serializer_class(*args, **kwargs)", "def serialize(self, data, format='application/json'):\r\n return self.serializer.serialize(data, format=format)", "def serialize(self):\n return self.instantiate_queue()" ]
[ "0.72009987", "0.7115752", "0.69366866", "0.6924428", "0.68449354", "0.6820688", "0.67195165", "0.6692233", "0.6686074", "0.65120816", "0.6473344", "0.6473284", "0.6444412", "0.6443525", "0.6418904", "0.63524395", "0.6348317", "0.63381505", "0.6311651", "0.63109744", "0.62789714", "0.62647057", "0.6243169", "0.6240013", "0.623506", "0.62324035", "0.61878854", "0.6182841", "0.617552", "0.6173757", "0.61729735", "0.614281", "0.61395276", "0.6133147", "0.6128442", "0.61279124", "0.61093885", "0.6107114", "0.6105589", "0.6101638", "0.60992885", "0.6092012", "0.6079949", "0.6061221", "0.6045427", "0.6022098", "0.5991614", "0.5989082", "0.595159", "0.5938331", "0.5910282", "0.58833855", "0.5866978", "0.57846475", "0.57676476", "0.5748564", "0.5727291", "0.5686052", "0.5667387", "0.56470567", "0.5640231", "0.5635826", "0.5543641", "0.54997194", "0.5493936", "0.5413152", "0.5407627", "0.5401495", "0.53956974", "0.5393255", "0.53637904", "0.5354623", "0.5354557", "0.5345471", "0.5341087", "0.53392893", "0.5339143", "0.5332332", "0.533047", "0.53259933", "0.5322456", "0.53021425", "0.52999425", "0.5275463", "0.52374655", "0.52361417", "0.522915", "0.5228819", "0.52153224", "0.5211377", "0.51908773", "0.5172133", "0.5170512", "0.5152825", "0.5142171", "0.5128", "0.51180595", "0.5101454", "0.5080314", "0.50775915" ]
0.67095476
7
Description When is given a directory name that exist Expected Result Shows log that directory was found
def test_has_directory_log(self, check_fn_true, caplog): #setup records = caplog.records has_directory = extractor.make_has_directory(os.path.isdir) directory_path = "./data/observed" #when test1 = has_directory(directory_path) #result assert len(records) == 1 assert records[0].message == f"It was found directory {directory_path}"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_doesnt_have_directory_log(self, check_fn_false, caplog):\n\n #setup\n records = caplog.records\n has_directory = extractor.make_has_directory(os.path.isdir)\n directory_path = \"./data/tests\"\n \n #when\n test2 = has_directory(directory_path)\n\n #result\n assert len(records) == 1\n assert records[0].message == f\"It wasn't found directory {directory_path}\"", "def checkDirectory(path,logger):\n newPath = completePath(path)\n if not os.path.exists(newPath):\n os.makedirs(newPath)\n if (logger):\n print(\"Did not found required directories. Creating them...\")\n else:\n if (logger):\n print(\"Found the required directories!\")", "def test_ensure_dir_exists(self):\n pass", "def checkDir(directory):\n ## test if directory is there\n if not os.path.exists(directory):\n os.mkdir(directory)\n sys.out = open(directory + '/' + str(time.time()) + '.log', 'w')\n print(\"Making new directory: \" + directory + \"\\n\")\n else:\n sys.out = open(directory + '/' + str(time.time()) + '.log', 'w')\n print(\"Found directory: \" + directory + \"\\n\")", "def check_if_dir_exists():\n if not os.path.exists(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"logs\"):\n try:\n os.mkdir(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"logs\")\n logger.debug(\"Dir for logs has been created\")\n except OSError:\n logger.debug(f\"Creation of the directory {str(__CURRENT_DIRECTORY) + os.sep + '..' + os.sep + 'logs'} failed\")\n\n if not os.path.exists(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"db\"):\n try:\n os.mkdir(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"db\")\n logger.debug(\"Dir for DB has been created\")\n except OSError:\n logger.debug(f\"Creation of the directory {str(__CURRENT_DIRECTORY) + os.sep + '..' + os.sep + 'db'} failed\")", "def check_dir(directory: str, err_string: str) -> None:\n if not pathlib.Path(directory).is_dir():\n print('\\n' + err_string + '\\n')\n raise NotADirectoryError", "def test_log_dir(self):\n false_dir = '/tmp/any'\n self.test_config['LOG_DIR'] = false_dir\n self.write_config_to_file()\n self.log = nginx_log_generator()\n self.generate_report()\n # Check our log\n path_to_log = '{}/assets/{}'.format(self.test_dir, 'analyzer.log')\n with open(path_to_log) as f:\n log_content = f.read()\n self.assertTrue(\"Sorry, directory {} wasn't found\".format(false_dir) in log_content)", "def print_is_directory(dir_name):\n print('pwgrep: {}: is a directory'.format(dir_name))", "def test_list_directory(self):\n import os\n stat_f = lambda x: FakeStat(33188, 16398844, 65024L, 1, 1049, 1049, 0,\n 1409046988, 1409046988, 1409046988)\n os.stat = stat_f\n os.lstat = stat_f\n expected = [\"subdir1\", \"subdir2\"]\n result = [r[0] for r in self.path_translator.list_directory(\n \"/{0}/search1/tmp/study\".format(self.search.instance))]\n self.assertEqual(result, expected)", "def check_dir(dirname):\n print('Checking directory...{}'.format(dirname))\n if dirname is not None and not is_dir(dirname):\n raise FileNotFoundError('{} is not a valid directory'.format(dirname))", "def check_dir(dname):\n direc = os.path.dirname(dname)\n try:\n os.stat(direc)\n except:\n os.mkdir(direc)\n print \"Made directory %s....\" % dname\n return dname", "def Directory(self) -> str:", "def check_dir_exist():\n if os.path.isdir(path_structure):\n return True\n else:\n return False", "def test_has_directory(self, check_fn_true):\n\n #setup\n has_directory = extractor.make_has_directory(os.path.isdir)\n \n #when\n test1 = has_directory(\"./data/observed\")\n\n #result\n assert test1 is True", "def test_is_summary_directory(self):\n summary_base_dir = tempfile.mkdtemp()\n file_count = 1\n directory_count = 1\n gen_directories_and_files(summary_base_dir, file_count, directory_count)\n\n summary_watcher = SummaryWatcher()\n flag = summary_watcher.is_summary_directory(summary_base_dir, './')\n assert flag\n flag = summary_watcher.is_summary_directory(summary_base_dir, './\\x00')\n assert not flag\n shutil.rmtree(summary_base_dir)", "def check_dir(self):\n\n if not os.path.isdir(self.directory):\n os.mkdir(self.directory, 755)\n if not os.path.exists(self.log_file):\n from pathlib import Path\n Path(self.log_file).touch()", "def test_doesnt_have_directory(self, check_fn_false):\n\n # setup\n has_directory = extractor.make_has_directory(os.path.isdir)\n\n # when\n test2 = has_directory(\"./data/tests\")\n\n # result\n assert test2 is False", "def dir_filter(x):\n return os.path.isdir('logs/{}'.format(x))", "def dir_exists(self, path):\n if not path:\n return True\n return False", "def test_test_directory_identifer_exists(self):\n self.logger.info(\"STEP: Initialize the workspace.\")\n with Workspace(Mock()) as workspace:\n self.workspace = workspace\n\n self.logger.info(\n \"STEP: Enter a test directory in a context manager with identifier \"\n \"'dir1'.\"\n )\n with workspace.test_directory(\"dir1\") as directory:\n self.logger.info(\n \"STEP: Check that test directory was created and exit the \"\n \"context.\"\n )\n if not directory.exists() and directory.is_dir():\n raise Exception(\"Test directory was not properly created.\")\n first_stat = directory.stat()\n\n with workspace.test_directory(\"dir1\") as directory:\n self.logger.info(\n \"STEP: Enter a test directory in a context manager with the \"\n \"same identifer.\"\n )\n if not directory.exists() and directory.is_dir():\n raise Exception(\"Test directory was not properly created.\")\n\n self.logger.info(\"STEP: Verify that the folder was re-used.\")\n self.assertEqual(\n first_stat,\n directory.stat(),\n \"Second directory is not the same as the first directory.\",\n )", "def check_dir_exist(scheme):\n if os.path.exists(scheme.prefix) is False:\n print(\"ERROR: Required directory '{}' is missing! Exiting!\").format(scheme.prefix)\n sys.exit(1)", "def testNoSuchDirectory(self):\n\n self.assertRaises(OSError,\n parse_package,\n \"no_such_directory\")", "def check_dir(filedir, olddir): # Yasemin's code\r\n\tgoodname = False\r\n\twhile goodname == False:\r\n\t\tif exists(filedir + olddir):\r\n\t\t\tprint(\"Directory already exists! Please pick a knew directory name for old lab files:\")\r\n\t\t\tolddir = input(\"> \")\r\n\t\t\tolddir = name_check(olddir)\r\n\t\telse:\r\n\t\t\tgoodname = True\r\n\treturn olddir", "def is_dir(self, path):", "def is_valid_directory(parser, arg):", "def scan_sample_directory(sample_dir: Path) -> None:\n if not (sample_dir / 'README.md').is_file():\n print(f\"WARNING ({sample_dir}): No README.md file\")\n if not (sample_dir / 'main.py').is_file():\n print(f\"ERROR ({sample_dir}): No main.py file\")", "def _check_directories(self, dist, component):\n path = join(self.repository, 'dists', dist, component, 'source')\n\n if not isdir(path):\n makedirs(path)", "def test_error_is_thrown_if_directory_does_not_exist(fs):\n\n output_dir = 'user_specified_directory'\n assert not os.path.exists(output_dir)\n\n with pytest.raises(NotADirectoryError) as exception_info:\n verify_output_dir(output_dir)\n\n assert exception_info.value.args[0] == 'The \"user_specified_directory\" directory, which was specified by ' \\\n 'the --output-dir command-line argument, is not an existing directory. ' \\\n 'Please either create that directory or specify a different one.'", "def check_dir(dir):\n if not os.path.exists(dir):\n print(\"[+] Creating directory for target..\")\n os.makedirs(dir)", "def _existDir(d):\n\treturn os.path.exists(d)", "def checkDir(dirName=None):\r\n if not os.path.exists(dirName):\r\n os.makedirs(dirName)\r\n return 0", "def check_dir(path):\n \n if not os.path.exists(path):\n os.makedirs(path)\n print path", "def valid_directory(self, directory):\n\n if os.path.isdir(directory):\n return directory\n else:\n msg = f\"The write directory provided by the user does not exist: {directory}\"\n logging.exception(msg)\n self.close_logger()\n raise NotADirectoryError(msg)", "def scan_directory(self, dirname):\n if not dirname:\n dirname = os.getcwd()\n\n if os.path.exists(dirname):\n for item in os.listdir(dirname):\n item_path = os.path.join(dirname, item)\n if os.path.isfile(item_path):\n self.file_confidence.append(self.confidence(item_path))\n else:\n raise FileNotFoundError('Directory does not exist. Change your path and try again')", "def test_get_result_directories(self):\n pass", "def assert_is_dir_and_exists(self):\n if not self.is_dir():\n msg = \"'%s' is not a file or doesn't exists!\" % self\n raise EnvironmentError(msg)", "def test_isdir(self, remote_mock_dir):\n\n with HdfsHook() as hook:\n assert hook.isdir(posixpath.join(remote_mock_dir, \"subdir\"))\n assert not hook.isdir(posixpath.join(remote_mock_dir, \"test.txt\"))", "def directory_exists(self, directory: str = None) -> bool:\n return os.access(directory if directory else self.get_directory(), os.R_OK)", "def handle_directory_pre(self, dir):\n if self.ppath_prefix_len:\n prefix = self.ppath_prefix[self.ppath_prefix_idx%self.ppath_prefix_len]\n dir = os.path.join(prefix, dir)\n try:\n dir_lstats = os.lstat(dir)\n except WindowsError as e:\n if e.winerror == 3 and len(dir) > hydra.Utils.MAX_WINDOWS_FILEPATH_LENGTH:\n self.log.error('Unable to stat dir due to path length > %d characters. Try setting HKLM\\System\\CurrentControlSet\\Control\\FileSystem\\LongPathsEnabled to 1'%hydra.Utils.MAX_WINDOWS_FILEPATH_LENGTH)\n else:\n if hydra.is_invalid_windows_filename(dir):\n self.log.error('Directory contains invalid characters or invalid names for Windows: %s'%dir)\n else:\n self.log.exception(e)\n self.stats['error_stat_dirs'] += 1\n return True\n except Exception as e:\n self.log.exception(e)\n self.stats['error_stat_dirs'] += 1\n if stat.S_ISLNK(dir_lstats.st_mode):\n # We do not want to process a symlink so account for it here as a symlink\n self.stats['symlink_dirs'] += 1\n return True\n return False", "def logs_directory(self):", "def list_dir(self):\n x = [x for x in os.listdir(self.spath) if os.path.isdir(os.path.join(self.spath, x))]\n if x != [] :\n print (f\"choose one of these : {x}\")", "def test_exists(self, remote_mock_dir):\n\n with HdfsHook() as hook:\n assert hook.exists(posixpath.join(remote_mock_dir, \"subdir\"))\n assert hook.exists(posixpath.join(remote_mock_dir, \"test.txt\"))\n assert not hook.exists(posixpath.join(remote_mock_dir, \"non-existing.txt\"))", "def isdir (self, path):\r\n pass", "def _is_valid_log_dir(log_dir):\n return os.path.isdir(log_dir)", "def _is_valid_log_dir(log_dir):\n return os.path.isdir(log_dir)", "def find_logs():\n dirname = os.path.normpath('./logs')\n d = 1\n\n while d < 5:\n if os.path.exists(dirname):\n return os.path.normpath(dirname)\n d += 1\n dirname = os.path.join('../', dirname)\n\n return dirname", "def validate(self):\n if not self.path.is_dir() or not self.path.exists():\n raise NotADirectoryError", "def test_get_result_directory(self):\n pass", "def check_directory(self, directory: str) -> bool:\n return self.run(\"/\", \"root\", [\"test\", \"-d\", directory], check=False).returncode == 0", "def checkDirExists(dirPath):\n if not MyFile.checkFileExists(dirPath):\n MyFile.makeDir(dirPath)", "def dir_exists(self):\r\n if os.path.exists(self.csvdir):\r\n return True\r\n else:\r\n return False", "def test_nonExistentDir(self):\n e = self.assertRaises(\n IOError, logfile.LogFile, self.name, \"this_dir_does_not_exist\"\n )\n self.assertEqual(e.errno, errno.ENOENT)", "def test_does_static_directory_exist(self):\n does_static_dir_exist = os.path.isdir(self.static_dir)\n does_css_static_dir_exist = os.path.isdir(os.path.join(self.static_dir, 'css'))\n does_js_static_dir_exist = os.path.isdir(os.path.join(self.static_dir, 'js'))\n \n self.assertTrue(does_static_dir_exist, f\"{FAILURE_HEADER}The static directory was not found in the expected location. Check and try again.{FAILURE_FOOTER}\")\n self.assertTrue(does_css_static_dir_exist, f\"{FAILURE_HEADER}The css subdirectory was not found in your static directory.{FAILURE_FOOTER}\")\n self.assertTrue(does_js_static_dir_exist, f\"{FAILURE_HEADER}The js subdirectory was not found in your static directory.{FAILURE_FOOTER}\")", "def testDirectoryReturn(self):\n self.assertEqual(\n self.directory,\n self.mr.directory\n )\n\n self.mr._dir = 'burp'\n\n self.assertEqual(\n 'burp',\n self.mr.directory\n )", "def test_supply_directory(self):\n supplied_value = '/tmp'\n returned_value = generic.check_path(supplied_value)\n\n self.assertEqual(supplied_value, returned_value)", "def path_exists(dir):\n if os.path.exists(dir): return 1\n else: return 0", "def folderExistsWithTimeOut(dirToCheck, waitIntervalSec, sleepInterSec, outStream):\n\twtime=0\n\tfound=0\n\t#wait for the directory to appear\n\twhile ((wtime<=waitIntervalSec) and (found==0)):\n\t\tif not (os.access (dirToCheck, os.F_OK)):\n\t\t\twtime+=sleepInterSec\n\t\t\ttime.sleep(sleepInterSec)\n\t\t\toutStream.write (\".\")\n\t\telse:\n\t\t\tfound=1\n\t#exit if it times out\n\treturn os.access (dirToCheck, os.F_OK)", "def testIsDir(self):\r\n P=lambda p:ufsi.NativeUnixPath(p)\r\n existingDirPath=P(self.existingDirPathStr)\r\n nonExistingDirPath=P(self.nonExistingDirPathStr)\r\n existingDirNoTrailingSlashPath=P(self.existingDirPathStr[:-1])\r\n existingValidDirSymlinkPath=P(self.existingValidSymlinkDirPathStr)\r\n existingInvalidDirSymlinkPath=P(self.existingInvalidSymlinkDirPathStr)\r\n existingFilePath=P(self.existingFilePathStr)\r\n\r\n # 1\r\n self.assertEquals(existingDirPath.isDir(),True,\r\n '%r is a dir'%str(existingDirPath))\r\n\r\n # 2\r\n self.assertEquals(nonExistingDirPath.isDir(),False,\r\n '%r does not exist'%str(nonExistingDirPath))\r\n\r\n # 3\r\n self.assertEquals(existingDirNoTrailingSlashPath.isDir(),True,\r\n '%r is a dir'%str(existingDirNoTrailingSlashPath))\r\n\r\n # 4\r\n self.assertEquals(existingValidDirSymlinkPath.isDir(),True,\r\n '%r is a dir'%str(existingValidDirSymlinkPath))\r\n\r\n # 5\r\n self.assertEquals(existingInvalidDirSymlinkPath.isDir(),False,\r\n '%r is an invalid symlink'\r\n %str(existingInvalidDirSymlinkPath))\r\n\r\n # 6\r\n self.assertEquals(existingFilePath.isDir(),False,\r\n '%r is a file'%str(existingFilePath))", "def test_scan_dir_not_found(self, dir_path):\n with self.assertRaises(FileNotFoundError):\n self.file_scanner.scan(dir_path)", "def test_verify_path_4(self):\n result = basic.verify_path(str(self.test_directory1) + \"abcxyz\", \"dir\")\n self.assertFalse(result)", "def dir_exception_handler(dpath: str,\n dryrun: bool,\n dirs_created: list = [],\n overwrite: bool = False) -> bool:\n # If this dir was created during this session, do not create it again\n if dpath in dirs_created:\n return False\n elif os.path.exists(dpath):\n if dryrun == False:\n # Get user input\n while overwrite not in ['Y', 'y', 'N', 'n', True]:\n overwrite = input(f\"\\n*** WARNING: Your directory {dpath} already exists. Overwrite? Y/N: \")\n if overwrite == True or overwrite.lower() == 'y':\n print(f\"Your directory {dpath} will be overwritten\")\n shutil.rmtree(dpath)\n return True\n else:\n return False\n else: # If dry run:\n print(f\"\\n*** WARNING: This is a dry run but if you run cp_packager in normal mode,\")\n print(f\"*** your directory {dpath} may be overwritten\")\n else:\n return True", "def check_charm_dir_exists(charm_dir: Path) -> None:\n assert charm_dir.is_dir()", "def test_nodelog_missing_files(self):\n build_dir = self.BUILD_DIR + 'nodelog?pod=abc'\n response = app.get('/build' + build_dir, status=404)\n self.assertIn('Unable to find', response)", "def check_dir(path, create = True):\n if os.path.exists(path):\n if os.path.isdir(path):\n return path\n else:\n return False\n if create:\n msg = \"Creating directory: '%s'\" % (path)\n print msg\n log.info(msg)\n os.mkdir(path)\n else:\n return False", "def test_add1_dir(self):\n try:\n TempfileManager.add_tempfile(tempdir + 'add1')\n self.fail(\n \"Expected IOError because directory 'add1' does not exist\")\n except IOError:\n pass", "def displayPathInfo():\n # TODO: Remove unwanted / unused functions\n\n dirpath = os.getcwd()\n logging.info(\"Current Directory is : \" + dirpath)\n foldername = os.path.basename(dirpath)\n logging.info(\"Directory name is : \" + foldername)", "def test_log_filenames_file_not_found(self):\n\n filename = '/%s' % (uuid.uuid4())\n while os.path.exists(filename): # pragma: no cover\n filename = '/%s' % (uuid.uuid4())\n\n with self.assertRaises(Exception):\n self.app.log_filenames([filename])\n self.assertEqual(self.get_track_count(), 0)", "def isPath(self,pin,head=\"check path exist\",exit_on_error=False,logmsg=False):\n p = os.path.abspath(self.expandvars(pin))\n if os.path.isdir(p):\n if logmsg:\n logger.info(head + \"\\n --> dir exist: {}\\n -> abs dir{:>18} {}\".format(pin,':',p))\n return p\n #--- error no such file\n logger.error(head + \"\\n --> no such directory: {}\\n -> abs dir{:>18} {}\".format(pin,':',p))\n if exit_on_error:\n raise SystemError(self.__MSG_CODE_PATH_NOT_EXIST)\n return False", "def report_dir (self, dir_path):\n print len(os.listdir(dir_path)), 'in archive directory'\n dupset = self.find_dups_for_directory (dir_path)\n keys = dupset.keys()\n keys.sort()\n print '- ', len(keys), 'dups found'\n for key in keys:\n # print '\\n', key.replace(archives_base_path, '')\n dedup_key_path = self.make_deduped_path(key)\n # print '\\n', '{}{}'.format(dedup_key_path, os.path.exists(dedup_key_path) and ' *' or '')\n print '\\n', '{}{}'.format(self.get_dup_display_path(dedup_key_path), os.path.exists(dedup_key_path) and ' *' or '')\n dups = dupset[key]\n for dup in dups:\n dedup_path = self.make_deduped_path(dup)\n # print ' - {}{}'.format(dedup_path, os.path.exists(dedup_path) and ' *' or '')\n print ' - {}{}'.format(self.get_dup_display_path(dedup_path), os.path.exists(dedup_path) and ' *' or '')", "def __is_directory_name(filename):\n return filename[-1] == '/'", "def testListDirectory(self):\n test_file_path = self._GetTestFilePath(['unified_logging'])\n self._SkipIfPathNotExists(test_file_path)\n\n test_helper = dfvfs_helpers.DFVFSFileSystemHelper(None)\n\n path_spec = path_spec_factory.Factory.NewPathSpec(\n dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path)\n test_helper.OpenFileSystem(path_spec)\n\n expected_directory_entries = [\n '0000000000000030.tracev3',\n '0000000000000f85.tracev3',\n 'timesync',\n 'uuidtext']\n\n directory_entries = sorted(test_helper.ListDirectory(test_file_path))\n self.assertEqual(directory_entries, expected_directory_entries)", "def dirCheck(dirPath):\n if not os.path.exists(dirPath):\n os.mkdir(dirPath)\n return dirPath", "def dir_is_empty(dir):\n if os.path.exists(dir) and os.path.isdir(dir):\n if not os.listdir(dir):\n return True\n else:\n return False\n else:\n print(\"Given Directory don't exists\")", "def test_verify_path_3(self):\n result = basic.verify_path(str(self.test_directory1), \"dir\")\n self.assertTrue(result)", "def test_02_not_exist(self):\n with self.assertLogs(\"borg\", \"WARNING\") as logger:\n self.api.extract(self.archive, self.file_3)\n message = logger.records[0].getMessage()\n self.assertRegex(\n message,\n r\".*?file_3.*never\",\n \"Warning not logged for bad path\",\n )", "def testDirExists(self, mock_dir, mock_exists, mock_listdir):\n mock_dir.return_value = True\n mock_exists.return_value = True\n mock_listdir.return_value = self.files\n\n self.assertEqual(\n self.is_seq,\n self.mr.is_seq\n )\n\n if len(self.seqs) > 0:\n self.assertEqual(\n self.seqs[0],\n self.mr.seq\n )\n else:\n self.assertEqual(\n None,\n self.mr.seq\n )\n\n self.assertEqual(\n self.seqs,\n self.mr.seqs\n )\n\n mock_listdir.assert_called_once_with(self.mr.path)", "def checkExistenceDir(path):\n path = os.path.abspath(path)\n if not os.path.isdir(path):\n logger.warning(\n \"Directory {} does not seem to exist, creating one.\".format(path)\n )\n os.mkdir(path)", "def santityCheckInitialization(self):\r\n\r\n for obj in self.config[\"repos\"]:\r\n if not isdir(obj[\"path\"]):\r\n print(\"ERROR : Initialization Failed missing {} at path {}\".format(obj[\"name\"], obj[\"path\"]))", "def _assert_dir_already_exists(dirname):\n\n if not dirname:\n return\n\n assert os.path.isdir(dirname), dirname\n assert os.access(dirname, os.R_OK), dirname\n assert os.access(dirname, os.W_OK), dirname", "def directory_exists(destination):\n\n if not os.path.isdir(destination):\n raise RuntimeError('Directory %s does not exists' % (destination))\n\n return True", "def _Run(self, dir_exists):\n with patch(os.path, 'isdir', return_value=dir_exists):\n self.RunStage()", "def is_valid_directory(args):\n if args.directory is not None:\n return True\n return False", "def test_missing_dir_in_custom_log_path(set_tempdir, mock_settings_env_vars):\n temp_dir = set_tempdir\n custom_log_path = Path(temp_dir) / \"another_dir\" / \"main.log\"\n log_path_matcher = LogPathCorrectnessMatcher(custom_log_path)\n os.environ[LOG_ENV_VARIABLE_NAME] = str(custom_log_path)\n tasks = run_n_simple_tasks(1)\n\n log_path = UsedLogPath(tasks[0])\n assert log_path == log_path_matcher", "def NotADirectoryError(inst):\n return hasattr(inst, 'errno') and inst.errno == errno.ENOTDIR", "def _look_for_stat(self, dir):\n\n if os.path.exists('stat.json'):\n self.stat_files.append(os.path.join(dir,'stat.json'))\n return True\n else:\n return False", "def check_directory_valid(self):\n Util.print_standout(\"check is there haven`t empty directory.\")\n for p, dirs, filename_list in os.walk(self.data_dir):\n for dir_name in dirs:\n if not os.listdir(os.path.join(p, dir_name)):\n Util.print_error(\"There shouldn't be a empty directory in [%s] of [%s]\" % (dir_name, self.data_dir))\n return False\n return True", "def _dodir ( self, dirpath, mkdir_p ):\n return", "def test_empty_directory(self):\n\n # create indexer object\n indexer = indexing_module.IndexModule()\n\n # index the location (storage/data/test/empty_directory)\n indexer.index(_path_prefix + 'empty_directory')\n\n # search for few words and check that the result is empty\n result = indexer.search(\"\")\n print(result)\n self.assertTrue(result == {})\n\n result = indexer.search(\"hello\")\n self.assertTrue(result == {})\n\n result = indexer.search(\"world\")\n self.assertTrue(result == {})", "def dirChecking(dir):\n if not os.path.exists(dir):\n os.mkdir(dir)", "def test_verify_path_6(self):\n result = basic.verify_path(str(self.test_directory1) + \"abcxyz\")\n self.assertFalse(result)", "def _directory(self):\n dir = self.target\n\n if not os.path.exists(dir):\n return os.makedirs(dir)\n return True", "def pytest_logger_logsdir(self, config):", "def is_dir(self, path: PathLike):", "def test_infodir(self):\n self.chck_triple('infodir')", "def on_dir_changed(self, event):\r\n\r\n if not self.searchin_update:\r\n pth = event.directory\r\n if pth is not None and exists(pth):\r\n self.searchin_update = True\r\n self.m_searchin_text.safe_set_value(pth)\r\n self.searchin_update = False\r\n event.Skip()", "def _check_is_dir(self, path):\n if os.path.isdir(path) and os.path.exists(path):\n self.__save_dir = path\n else:\n print(f'No existing directory found. Creating new directory at {path}')\n os.mkdir(path)\n self.__save_dir = path", "def _assert_dir_exists(dirname):\n\n if not dirname:\n return\n\n if not os.path.exists(dirname):\n text = \"directory %s doesn't exist, so creating\"\n print(\"\\033[93m\" + text % dirname + \"\\033[0m\")\n\n os.makedirs(dirname)\n\n assert os.path.isdir(dirname), dirname\n assert os.access(dirname, os.R_OK), dirname\n assert os.access(dirname, os.W_OK), dirname", "def __check_exist_path(self):\n if 'path_out' not in self.params:\n raise ValueError('missing \"path_out\" among parameters')\n self.params['path_out'] = update_path(self.params.get('path_out'))\n list_names = [n for n in self.params if any(m in n.lower() for m in ['path', 'dir', 'file'])]\n for n in list_names:\n p = os.path.abspath(os.path.expanduser(self.params[n]))\n if not os.path.exists(p):\n raise FileNotFoundError('given path/file/dir \"%s\" does not exist!' % p)\n self.params[n] = p\n for n in [n for n in self.params if 'exec' in n]:\n # in case you define executable in your home\n if os.path.expanduser(self.params[n]) != self.params[n]:\n self.params[n] = os.path.expanduser(self.params[n])", "def test_base_dir(self):\n old_base_dir = self.path_translator.BASE_REAL_DIR\n self.path_translator.BASE_REAL_DIR = \"/tmp/study\"\n import os\n stat_f = lambda x: FakeStat(33188, 16398844, 65024L, 1, 1049, 1049, 0,\n 1409046988, 1409046988, 1409046988)\n os.stat = stat_f\n os.lstat = stat_f\n expected = [\"subdir1\", \"subdir2\"]\n result = [r[0] for r in self.path_translator.list_directory(\n \"/{0}/search1\".format(self.search.instance))]\n self.assertEqual(result, expected)\n self.path_translator.BASE_REAL_DIR = old_base_dir", "def info_directory(self) -> Optional[str]:\n raise NotImplementedError()" ]
[ "0.74612963", "0.66516936", "0.6648649", "0.6637668", "0.6592905", "0.65451527", "0.6507091", "0.6476768", "0.6445445", "0.64397144", "0.641731", "0.6323137", "0.6317305", "0.6291415", "0.6287829", "0.6264551", "0.62638617", "0.62398607", "0.6232754", "0.6224753", "0.62079227", "0.62067014", "0.62065524", "0.61675984", "0.6151805", "0.61463827", "0.6136915", "0.6132248", "0.61214143", "0.6103149", "0.6100163", "0.6097386", "0.60957223", "0.608083", "0.6065482", "0.60518456", "0.6046728", "0.6035486", "0.60215497", "0.6014043", "0.60073876", "0.600359", "0.59953797", "0.59937644", "0.59937644", "0.59833133", "0.59664774", "0.59642905", "0.59635735", "0.59602094", "0.5958913", "0.5956306", "0.59530866", "0.59317225", "0.5926092", "0.592229", "0.5909389", "0.5897723", "0.58751345", "0.58719367", "0.5862506", "0.5856225", "0.5846284", "0.58240646", "0.58218735", "0.58081585", "0.5803799", "0.58012646", "0.5791056", "0.5790843", "0.57822883", "0.57788306", "0.577749", "0.5776033", "0.57600355", "0.57569546", "0.57556874", "0.5751005", "0.5747697", "0.5743646", "0.57378113", "0.5737012", "0.57324004", "0.57300735", "0.5725536", "0.57232285", "0.571928", "0.57178175", "0.57173747", "0.57130146", "0.5711905", "0.5706118", "0.570385", "0.56910104", "0.568738", "0.5681685", "0.5678763", "0.5678495", "0.56776744", "0.56748974" ]
0.7359447
1
Create a new websocket and connect its input and output to the subprocess with the specified PID.
async def websocket_handler(self, request, ws): if self.repl_mgr is None: return sanic.response.HTTPResponse(status=404) log.info('initiating websocket') await self.repl_mgr.process_websocket(ws) log.info('terminating websocket')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def launch_web_socket(vnc_port, web_socket_port, server):\n\n path = os.path.abspath(os.path.dirname(__file__))\n ws = os.path.join(path, \"../../webConsole/bin/websockify.py\")\n\n web_socket_path = os.path.abspath(ws)\n\n cmd = \"%s %s:%s %s:%s --idle-timeout=120 &\" % (web_socket_path, server, vnc_port, server, web_socket_port)\n\n logger.debug(cmd)\n\n proc = subprocess.Popen(cmd, shell=True, close_fds=True)\n time.sleep(1)\n return proc.pid", "def setup_websocket(ws_url, service_account_file, audience, router_password, source_port, dest_ip, dest_port):\n def on_message(ws, message):\n \"\"\"Handle a message\"\"\"\n handle_message(ws, message, router_password, source_port, dest_ip, dest_port)\n\n def on_error(ws, error):\n \"\"\"Handle an error by exiting or closing if it is a KeyboardInterrupt (Ctrl+C)\"\"\"\n if type(error) is KeyboardInterrupt:\n logger.info('Cancel requested (Ctrl+C), closing connection.')\n ws.close()\n else:\n logger.error(\"The following error occurred:\\n{error}\".format(error=error))\n sys.exit(1)\n\n def on_close(ws):\n \"\"\"Handle the WebSocket close\"\"\"\n logger.info('WebSocket closed.')\n\n def on_open(ws):\n \"\"\"Handle the WebSocket opening\"\"\"\n logger.info('WebSocket open, sending authentication.')\n authenticate(ws, service_account_file, audience)\n ws.send(STATUS_COMMAND_FORMAT.format(status_payload=json.dumps(get_status(router_password, source_port, dest_ip, dest_port))))\n\n return websocket.WebSocketApp(ws_url,\n on_open=on_open,\n on_message=on_message,\n on_error=on_error,\n on_close=on_close)", "def main():\n global APP\n APP = make_app()\n APP.clients = [] # global list of all connected websocket clients\n APP.printer = Serial('/dev/ttyUSB0', baudrate=19200)\n APP.listen('1337', '0.0.0.0')\n log('Listening on http://0.0.0.0:1337')\n tornado.ioloop.IOLoop.current().start()", "async def main():\n await serve_websocket(handle_server, SERVER, PORT, ssl_context=None)", "def add_ws(self):\n def on_message(ws, message):\n print(message)\n\n def on_error(ws, error):\n pass\n\n def on_close(ws):\n pass\n\n def on_open(ws):\n thread.start_new_thread(self.run, ())\n\n ws = websocket.WebSocketApp(self.url + '/',\n on_message = on_message,\n on_error = on_error,\n on_close = on_close)\n\n ws.on_open = on_open\n self.ws = ws", "async def create_websocket_server(sock, filter=None): # pylint: disable=W0622\n ws = Websocket()\n await ws.start_server(sock, filter=filter)\n return ws", "def start_server():\n server = WebsocketServer(9001, host='0.0.0.0')\n server.set_fn_message_received(message_received)\n server.set_fn_client_left(client_left)\n print(\"Started\")\n server.run_forever()", "def __init__(self, websocket_ip, port=9090):\n print(\"Connecting to websocket: {}:{}\".format(websocket_ip, port))\n self.ws = websocket.create_connection(\n 'ws://' + websocket_ip + ':' + str(port))\n self._advertise_dict = {}", "def initSocket(self):\n \n # Check WebSocket support\n if self.nodejs:\n try:\n WebSocket = require('ws')\n except Exception:\n # Better error message\n raise \"FAIL: you need to 'npm install -g ws' (or 'websocket').\"\n else:\n WebSocket = window.WebSocket\n if (WebSocket is undefined):\n window.document.body.innerHTML = 'Browser does not support WebSockets'\n raise \"FAIL: need websocket\"\n # Open web socket in binary mode\n self.ws = ws = WebSocket(window.flexx.ws_url)\n #ws.binaryType = \"arraybuffer\" # would need utf-decoding -> slow\n \n def on_ws_open(evt):\n window.console.info('Socket connected')\n ws.send('hiflexx ' + flexx_session_id)\n def on_ws_message(evt):\n window.flexx.last_msg = msg = evt.data or evt\n #msg = window.flexx.decodeUtf8(msg)\n window.flexx.command(msg)\n def on_ws_close(evt):\n self.ws = None\n msg = 'Lost connection with server'\n if evt and evt.reason: # nodejs-ws does not have it?\n msg += ': %s (%i)' % (evt.reason, evt.code)\n if (not window.flexx.is_notebook) and (not self.nodejs):\n window.document.body.innerHTML = msg\n else:\n window.console.info(msg)\n def on_ws_error(self, evt):\n self.ws = None\n window.console.error('Socket error')\n \n # Connect\n if self.nodejs:\n ws.on('open', on_ws_open)\n ws.on('message', on_ws_message)\n ws.on('close', on_ws_close)\n ws.on('error', on_ws_error)\n else:\n ws.onopen = on_ws_open\n ws.onmessage = on_ws_message\n ws.onclose = on_ws_close\n ws.onerror = on_ws_error", "def start(turn_handler):\n\n if os.environ.get('BOTBOX_SECRET'):\n print('Using env secret:', os.environ['BOTBOX_SECRET'])\n headers = {'Authorization': os.environ['BOTBOX_SECRET']}\n elif len(sys.argv) > 1:\n print('Using cli secret:', sys.argv[1])\n headers = {'Authorization': sys.argv[1]}\n else:\n print('Using no authentication')\n headers = []\n\n # get the URL for the server from an environment variable if it is set,\n # otherwise use the default localhost\n if os.environ.get('BOTBOX_SERVER'):\n url = (WS_SERVER_SCHEME + '://'\n + os.environ['BOTBOX_SERVER'] + ':' + WS_SERVER_PORT)\n else:\n url = WS_SERVER_SCHEME + '://' + WS_SERVER_URL + ':' + WS_SERVER_PORT\n\n print(\"Connecting to:\", url)\n\n ws = websocket.WebSocketApp(\n url,\n on_open = _on_open,\n on_message = lambda ws, msg: _on_message(ws, msg, turn_handler),\n on_error = _on_error,\n on_close = _on_close,\n header = headers\n )\n\n ws.run_forever()", "def _launch_process_watcher(self, parent_pid, child_pid, child_host, child_port, minecraft_dir):\n\n multiprocessing.freeze_support()\n parent_conn, child_conn = multiprocessing.Pipe()\n self._logger.info(\"Starting process watcher for process {} @ {}:{}\".format(child_pid, child_host, child_port))\n p = multiprocessing.Process(\n target=InstanceManager._process_watcher, args=(\n parent_pid, child_pid, \n child_host, child_port, \n minecraft_dir, child_conn))\n \n def update_port(port):\n parent_conn.send([port])\n # p.daemon = True\n\n p.start()\n return p, update_port", "def connect_subproc(args, service=VoidService, config={}):\n from subprocess import Popen, PIPE\n proc = Popen(args, stdin=PIPE, stdout=PIPE)\n conn = connect_pipes(proc.stdout, proc.stdin, service=service, config=config)\n conn.proc = proc # just so you can have control over the process\n return conn", "def websocket(self) -> Websocket:\n self.__http_client.data_snapshot()\n host_uri = f'ws://{self.__http_client.host_ip}/api/v1/data/stream'\n subprotocols = [f'SessionToken_{self.__http_client.session_token}', \"object\"]\n return Websocket(host_uri, subprotocols, timeout=self.__http_client.request_timeout)", "def initSocket(self):\n \n # Check WebSocket support\n if self.nodejs:\n try:\n WebSocket = require('ws') # does not work on Windows?\n #WebSocket = require('websocket').client\n except Exception:\n # Better error message\n raise \"FAIL: you need to 'npm install -g ws'.\"\n else:\n WebSocket = window.WebSocket\n if (window.WebSocket is undefined):\n document.body.innerHTML = 'This browser does not support WebSockets'\n raise \"FAIL: need websocket\"\n # Open web socket in binary mode\n self.ws = ws = WebSocket(flexx.ws_url)\n ws.binaryType = \"arraybuffer\"\n \n def on_ws_open(evt):\n console.info('Socket connected')\n def on_ws_message(evt):\n flexx.last_msg = evt.data or evt\n msg = flexx.decodeUtf8(flexx.last_msg)\n flexx.command(msg)\n def on_ws_close(evt):\n self.ws = None\n msg = 'Lost connection with server'\n if evt and evt.reason: # nodejs-ws does not have it?\n msg += ': %s (%i)' % (evt.reason, evt.code)\n if (not flexx.is_notebook) and (not self.nodejs):\n document.body.innerHTML = msg\n else:\n console.info(msg)\n def on_ws_error(self, evt):\n self.ws = None\n if flexx.is_notebook:\n console.error('Socket error: re-run flexx.app.init_socket() to connect.')\n else:\n console.error('Socket error')\n \n # Connect\n if self.nodejs:\n ws.on('open', on_ws_open)\n ws.on('message', on_ws_message)\n ws.on('close', on_ws_close)\n ws.on('error', on_ws_error)\n else:\n ws.onopen = on_ws_open\n ws.onmessage = on_ws_message\n ws.onclose = on_ws_close\n ws.onerror = on_ws_error", "def start_server(self):\n self.logger.info(\"Starting WebSocket server on port %d\" % self.port)\n http_server = Thread(target=tornado.ioloop.IOLoop.instance().start)\n http_server.start()", "def _spawn_stream_process(self, process_id, name, module, cls, config, proc_attr):\n process_instance = self._create_process_instance(process_id, name, module, cls, config, proc_attr)\n\n listen_name = get_safe(config, \"process.listen_name\") or name\n log.debug(\"Stream Process (%s) listen_name: %s\", name, listen_name)\n process_instance._proc_listen_name = listen_name\n\n process_instance.stream_subscriber = StreamSubscriber(process=process_instance, exchange_name=listen_name, callback=process_instance.call_process)\n\n # Add publishers if any...\n publish_streams = get_safe(config, \"process.publish_streams\")\n pub_names = self._set_publisher_endpoints(process_instance, publish_streams)\n\n rsvc = self._create_listening_endpoint(node=self.container.node,\n from_name=process_instance.id,\n process=process_instance)\n\n # cleanup method to delete process queue (@TODO: leaks a bit here - should use XOs)\n def cleanup(*args):\n self._cleanup_method(process_instance.id, rsvc)\n for name in pub_names:\n p = getattr(process_instance, name)\n p.close()\n\n proc = self.proc_sup.spawn(name=process_instance.id,\n service=process_instance,\n listeners=[rsvc, process_instance.stream_subscriber],\n proc_name=process_instance._proc_name,\n cleanup_method=cleanup)\n proc.proc._glname = \"ION Proc %s\" % process_instance._proc_name\n self.proc_sup.ensure_ready(proc, \"_spawn_stream_process for %s\" % process_instance._proc_name)\n\n # map gproc to process_instance\n self._spawned_proc_to_process[proc.proc] = process_instance\n\n # set service's reference to process\n process_instance._process = proc\n\n self._process_init(process_instance)\n self._process_start(process_instance)\n\n try:\n proc.start_listeners()\n except IonProcessError:\n self._process_quit(process_instance)\n self._call_proc_state_changed(process_instance, ProcessStateEnum.FAILED)\n raise\n\n return process_instance", "def start(self) -> None:\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n self.wserver = websockets.serve(self.__producer_handler, port=self.port, loop=loop)\n try:\n # run server forever\n self.server = asyncio.get_event_loop()\n self.server.run_until_complete(self.wserver)\n self.server.run_forever()\n except Exception:\n self.close()\n\n loop.run_forever()", "def start(self, websocketport: Optional[int] = None) -> None:\n if self.config:\n websocketport = self.config.websocketport\n\n if not websocketport:\n LOGGER.error(\"No websocket port specified\")\n return\n\n self.websocket = WSClient(\n self.session, self.host, websocketport, self.session_handler\n )\n self.websocket.start()", "def run(on_create):\n from twisted.internet import reactor\n\n # multiple, configurable transports, either via dict-like config, or\n # from native Twisted endpoints\n transports = [\n {\n \"type\": \"websocket\",\n \"url\": \"ws://127.0.0.1:8080/ws\"\n }\n ]\n\n # a connection connects and automatically reconnects WAMP client\n # transports to a WAMP router. A connection has a listener system\n # where user code can hook into different events : on_join\n connection = Connection(on_create, realm='public',\n transports=transports, reactor=reactor)\n\n # the following returns a deferred that fires when the connection is\n # finally done: either by explicit close by user code, or by error or\n # when stop reconnecting\n done = connection.connect()\n\n def finish(res):\n print(res)\n reactor.stop()\n\n done.addBoth(finish)\n\n reactor.run()", "def open(self, pysession_id):\n self.id = id(self)\n self.funcserver = self.application.funcserver\n self.pysession_id = pysession_id\n\n # register this connection with node\n self.state = self.funcserver.websocks[self.id] = {\"id\": self.id, \"sock\": self}", "def createConnectionToCli(self):\n connected = False\n # loop until connected\n while not connected:\n try:\n self.dataClient = Client(\n ('localhost', 5000), authkey=b'secret password')\n connected = True\n except ConnectionRefusedError:\n pass\n\n self.logger.debug('Connected to Process!')", "def start(self) -> None:\n if self.config:\n self.websocket = self.ws_client(\n self.loop, self.session, self.host,\n self.config.websocketport, self.async_session_handler)\n self.websocket.start()\n else:\n _LOGGER.error('No deCONZ config available')", "def open(self):\n self._lock.acquire()\n try:\n self._relaypid, self._portoffset = self._check_tcprelay()\n logger.debug(\n \"PIGGYBACK TCPRELAY\"\n \"PID: {0} PORT: {1}\".format(self._relaypid,\n self._portoffset))\n except AttributeError:\n # TODO: tcprelays might want to close when test is over???\n self._portoffset = get_available_portoffset()\n command = \"/usr/local/bin/tcprelay --portoffset {0} \" \\\n \"--locationid {1} rsync telnet \" \\\n \"ssh > /tmp/tcprelay.{1}.log 2>&1\" \\\n \" &\".format(self._portoffset, self.locationid_param)\n logger.debug(\"SPAWNING TCPRELAY - {0}\".format(command))\n child = subprocess.Popen([\"bash\", \"-c\", command], close_fds=True)\n time.sleep(0.5)\n try:\n self._relaypid, self._portoffset = self._check_tcprelay()\n except AttributeError:\n logger.error(\n \"FAILED to SPAWN TCPRELAY - CMD {0} \"\n \"OUTPUT: {1} ERROR: {2} RC: {3}\".format(command,\n child.stdout,\n child.stderr,\n child.returncode))\n finally:\n self._lock.release()", "def connect_to_worker():\n socket = context.socket(zmq.REQ)\n socket.connect(\"tcp://localhost:5555\")\n return socket", "async def _outgoing_ws(self, pid, websocket):\n character = self.players[pid]\n\n while not websocket.closed:\n msg = await character.msgs.get()\n\n # TODO: try to get more messages and buffer writes?\n try:\n await websocket.send(msg + \"\\n\\r\")\n except websockets.exceptions.ConnectionClosed:\n break\n\n logging.debug(\"_outgoing_ws closed for %s\", pid)", "def startNode(klass):\n try:\n ws = klass('ws://localhost:8080/ws')\n ws.daemon = False\n ws.connect()\n except:\n ws.close()", "def __init__(self, port: int, max_worker_threads: int = 4):\n super().__init__(max_worker_threads=max_worker_threads)\n self.do_stop = False\n self.port: int = port\n self._is_running: bool = False\n self._server: WebSocketServer = websockets.serve(\n ws_handler=self._handler,\n host='127.0.0.1',\n port=self.port,\n loop=self._loop)", "async def gw_start(test_cli):\n gw_url = await get_gw(test_cli)\n return await websockets.connect(gw_url)", "def _ws_connect(self):\n\n return websocket.websocket_connect(\n 'ws://localhost:{}{}'.format(self.get_http_port(), self.request)\n )", "def open(self):\n broker = os.path.join(getsitepackages()[0], 'pynq_networking', 'rsmb',\n 'rsmb', 'src', 'broker_mqtts')\n\n self.close()\n os.system(f\"nohup {broker} > {self.log} &\")\n\n for t in MQTT_PACKET_TYPES:\n bind_layers(MQTT, t, {'type': t.type})\n\n bind_layers(TCP, MQTT_Stream, {'dport': self.mqtt_port})\n bind_layers(TCP, MQTT_Stream, {'sport': self.mqtt_port})\n\n for t in MQTTSN_PACKET_TYPES:\n bind_layers(MQTTSN, t, {'type': t.type})\n\n bind_layers(UDP, MQTTSN, {'dport': self.mqttsn_port})\n bind_layers(UDP, MQTTSN, {'sport': self.mqttsn_port})", "def websocket_init(self, payload, *args, **kwargs):\n data = json.loads(str(payload, \"utf-8\"))\n self.is_connecting = False\n if url := data.get(\"url\"):\n self.gateway = f\"{url}/?v={DISCORD_API_VERSION}&encoding=json\".encode(\"utf-8\")\n useragent = kwargs.pop(\"useragent\", DISCORD_USER_AGENT)\n headers = kwargs.pop(\n \"headers\",\n {\n \"Authorization\": [f\"Bot {DISCORD_BOT_TOKEN}\"],\n \"Content-Type\": [\"application/json\"],\n },\n )\n\n logger.log_info(\"Connecting to Discord Gateway...\")\n WebSocketClientFactory.__init__(\n self, url, *args, headers=headers, useragent=useragent, **kwargs\n )\n self.start()\n else:\n logger.log_err(\"Discord did not return a websocket URL; connection cancelled.\")", "async def test_websocket_application():\n application = URLRouter([path(\"testws/<str:message>/\", KwargsWebSocketApp())])\n communicator = WebsocketCommunicator(application, \"/testws/test/\")\n connected, subprotocol = await communicator.connect()\n # Test connection\n assert connected\n assert subprotocol is None\n message = await communicator.receive_from()\n assert message == \"test\"\n await communicator.disconnect()", "def on_websocket_open(self) -> None:\n raise NotImplementedError() # pragma: no cover", "def main():\n\n symbols = constants.SYMBOL_LIST\n if not symbols:\n logging.info(\"No symbol entered to continue.\")\n sys.exit()\n\n try:\n input_price = float(constants.PRICE_INPUT)\n except ValueError:\n logging.error(\"That wasn't a number!\")\n\n data_feeder = DataFeeder(symbols, input_price)\n\n def on_message(ws, message, data_feeder):\n \"\"\"\n This function is called when a message is received by the web-socket.\n \"\"\"\n data_feeder.store_trading_data_into_db(data=data_feeder.process_received_message(message))\n\n def on_error(ws, error):\n \"\"\"\n This function is called when there is an error in the web-socket connection.\n \"\"\"\n logging.error(\"Error:-\", error)\n\n def on_close(ws):\n \"\"\"\n This function is called when the web-socket connection is closed.\n \"\"\"\n logging.info(\"### Web-socket Connection Closed ###\")\n\n try:\n websocket.enableTrace(True)\n ws = websocket.WebSocketApp(data_feeder.url,\n on_message = lambda ws, message: on_message(ws, message, data_feeder),\n on_error = on_error,\n on_close = on_close)\n ws.run_forever()\n except Exception as e:\n logging.error('Could not connect to web-socket',str(e))", "async def ws_cmd(args):\n url = \"ws://{}:{}/ws/device/\".format(\n args.server, args.port)\n headers = {'devicetoken': args.token}\n while True:\n try:\n async with websockets.connect(\n url, extra_headers=headers) as websocket:\n logger.info(\"ws server connected...\")\n try:\n while True:\n data = await websocket.recv()\n data = json.loads(data)\n\n if data['type'] == 'cmd':\n status, msg = await run_cmd(data['cmd'])\n logging.info(\"result: {}\".format(msg))\n await websocket.send(json.dumps({\n \"type\": \"cmd\",\n \"msg\": msg,\n }))\n except Exception:\n logger.exception(\"{} error\".format(data))\n except Exception:\n await asyncio.sleep(args.retval)\n logger.info(\"retry connected...\")", "def start_websocket_server(self, addr, port):\n app = SLSApplication(self, default_host=addr)\n app.listen(port)\n log.info(f\"Serving SLS/Websocket on ({addr}, {port})\")\n tornado.ioloop.IOLoop.current().start()", "async def test_websocket_communicator():\n communicator = WebsocketCommunicator(SimpleWebsocketApp(), \"/testws/\")\n # Test connection\n connected, subprotocol = await communicator.connect()\n assert connected\n assert subprotocol is None\n # Test sending text\n await communicator.send_to(text_data=\"hello\")\n response = await communicator.receive_from()\n assert response == \"hello\"\n # Test sending bytes\n await communicator.send_to(bytes_data=b\"w\\0\\0\\0\")\n response = await communicator.receive_from()\n assert response == b\"w\\0\\0\\0\"\n # Test sending JSON\n await communicator.send_json_to({\"hello\": \"world\"})\n response = await communicator.receive_json_from()\n assert response == {\"hello\": \"world\"}\n # Close out\n await communicator.disconnect()", "def connect_subproc():\n return factory.connect_subproc([sys.executable, \"-u\", SERVER_FILE, \"-q\", \"-m\", \"stdio\"], \n SlaveService)", "async def on_connect(websocket, path):\n charge_point_id = path.strip('/')\n cp = ChargePoint(charge_point_id, websocket)\n\n await cp.start()", "def websocket_servient():\n\n ws_port = find_free_port()\n ws_server = WebsocketServer(port=ws_port)\n\n servient = Servient(catalogue_port=None)\n servient.add_server(ws_server)\n\n @tornado.gen.coroutine\n def start():\n raise tornado.gen.Return((yield servient.start()))\n\n wot = tornado.ioloop.IOLoop.current().run_sync(start)\n\n property_name_01 = uuid.uuid4().hex\n property_name_02 = uuid.uuid4().hex\n action_name_01 = uuid.uuid4().hex\n event_name_01 = uuid.uuid4().hex\n\n td_dict = {\n \"id\": uuid.uuid4().urn,\n \"name\": uuid.uuid4().hex,\n \"properties\": {\n property_name_01: {\n \"observable\": True,\n \"type\": \"string\"\n },\n property_name_02: {\n \"observable\": True,\n \"type\": \"string\"\n }\n },\n \"actions\": {\n action_name_01: {\n \"input\": {\n \"type\": \"object\"\n },\n \"output\": {\n \"type\": \"string\"\n },\n }\n },\n \"events\": {\n event_name_01: {\n \"type\": \"string\"\n }\n },\n }\n\n td = ThingDescription(td_dict)\n\n exposed_thing = wot.produce(td.to_str())\n exposed_thing.expose()\n\n @tornado.gen.coroutine\n def action_handler(parameters):\n input_value = parameters.get(\"input\")\n arg_b = input_value.get(\"arg_b\") or uuid.uuid4().hex\n raise tornado.gen.Return(input_value.get(\"arg_a\") + arg_b)\n\n exposed_thing.set_action_handler(action_name_01, action_handler)\n\n yield servient\n\n @tornado.gen.coroutine\n def shutdown():\n yield servient.shutdown()\n\n tornado.ioloop.IOLoop.current().run_sync(shutdown)", "def start(self):\n if config['port'] or config['host']:\n port = config['port'] or 5222\n host = config['host'] or sleekxmpp.JID(config['jid']).host\n addr = (host, port)\n else:\n addr = tuple()\n self.connect(addr)\n self.process(threaded=True)", "async def register(websocket):\n app['websockets'].add(websocket)\n await notify_users()", "def get_websocket(host, port, route='/', ssl=False):\n client = MessageBusClient(host, port, route, ssl)\n client.run_in_thread()\n return client", "def push_message(target_binary, target_platform, target_type, target_port, message):\n stderr = \"\"\n stdout = \"\"\n if target_type == APP_TYPE_SERVER:\n try:\n # start the server\n log(\"starting the server\")\n if target_platform == PLATFORM_WINDOWS:\n log(\"using wine\")\n server_instance = subprocess.Popen([\"wine\", target_binary], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n else:\n log(\"running binary\")\n server_instance = subprocess.Popen([target_binary], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n \n # give it time to start up\n log(\"allowing time to start\")\n time.sleep(LOAD_TIME)\n \n # warn the user of potential error message\n log(\"expect some kind of error message, just close it if it pops up\")\n\n # encode message\n encoded_message = str.encode(message) \n\n # send message\n send_message_tcp(\"localhost\", target_port, encoded_message)\n\n # record error message\n stderr = server_instance.stderr.read().decode()\n stdout = server_instance.stdout.read().decode()\n except:\n pass\n finally:\n server_instance.kill()\n else:\n try:\n if target_platform == PLATFORM_WINDOWS:\n log(\"using wine\")\n process_instance = subprocess.Popen([\"wine\", target_binary], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n else:\n log(\"running binary\")\n process_instance = subprocess.Popen([target_binary], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n \n # push map message to stdin\n process_instance.stdin.write(message)\n\n # record error message\n stderr = process_instance.stderr.read().decode()\n stdout = process_instance.stdout.read().decode()\n except:\n pass \n finally:\n process_instance.kill()\n return stdout, stderr", "def test_ducts_with_subprocess(self):\n assert_that(SUBPROCESS_TEST_SCRIPT).exists()\n proc = None\n parent = None\n try:\n parent = MessageDuctParent.psuedo_anonymous_parent_duct()\n parent.bind()\n proc = subprocess.Popen(\n [sys.executable, SUBPROCESS_TEST_SCRIPT, parent.listener_address], env={'PYTHONPATH': ROOT_DIR}\n )\n assert_that(parent.listen()).is_true()\n for _ in range(100):\n parent.send(\"pingpong\")\n parent.poll(1)\n assert_that(parent.recv()).is_equal_to(\"pingpong\")\n parent.send(None)\n time.sleep(1)\n finally:\n if parent:\n parent.close()\n if proc:\n proc.terminate()", "def start(self):\n #url = '{}://{}:{}/'.format('http',\n # self.ip,\n # self.port)\n #self.service_info = ServiceInfo(\n # '_webthing._sub._http._tcp.local.',\n # '{}._http._tcp.local.'.format(self.name),\n # address=socket.inet_aton(self.ip),\n # port=self.port,\n # properties={\n # 'url': url,\n # },\n # server='{}.local.'.format(socket.gethostname()))\n #self.zeroconf = Zeroconf()\n #self.zeroconf.register_service(self.service_info)\n\n # If WebSocketS used and NOT running in thread, and WebServer IS\n # running in thread make shure WebServer has enough stack size to\n # handle also the WebSocket requests.\n log.info('Starting Web Server')\n self.server.Start(threaded=srv_run_in_thread, stackSize=8192)", "def start_mqtt_auth_watcher(run_event):\n print('START MQTT WATCHER')\n cmd = ['/app/src/mosquitto_watcher.sh']\n # , cwd=os.path.join(os.path.dirname(__file__))\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)\n while run_event.is_set():\n time.sleep(1)\n process.terminate()\n process.wait()", "def __init__(self, parent=None):\n super(robotTwoTerminal, self).__init__(parent)\n self.process = QtCore.QProcess(self)\n self.urxvtTerminal = QtWidgets.QWidget(self)\n layout = QtWidgets.QVBoxLayout(self)\n layout.addWidget(self.urxvtTerminal)\n # Works also with urxvt:\n self.process.start('urxvt', ['-embed', str(int(self.winId()))])\n self.setGeometry(90, 460, 1160, 125)", "def _process_watcher(parent_pid, child_pid, child_host, child_port, minecraft_dir, conn):\n port = child_port\n def port_watcher():\n nonlocal port \n \n port = conn.recv()[0]\n\n port_thread = threading.Thread(target=port_watcher)\n port_thread.start()\n\n # Wait for processes to be launched\n time.sleep(1)\n try:\n child = psutil.Process(child_pid)\n except psutil.NoSuchProcess:\n child = None\n try:\n parent = psutil.Process(parent_pid)\n except psutil.NoSuchProcess:\n parent = None\n \n while True:\n try:\n time.sleep(0.1) # Sleep for a short time, and check if subprocesses needed to be killed.\n\n if not parent.is_running() or parent is None:\n if not (child is None):\n try:\n Instance._kill_minecraft_via_malmoenv(child_host,port)\n time.sleep(2)\n except:\n pass\n \n InstanceManager._reap_process_and_children(child)\n try:\n shutil.rmtree(minecraft_dir)\n except:\n logger.warning(\"Failed to delete temporary minecraft directory. It may have already been removed.\")\n pass\n return\n # Kill the watcher if the child is no longer running.\n # If you want to attempt to restart the child on failure, this\n # would be the location to do so.\n if not child.is_running():\n return\n except KeyboardInterrupt:\n pass", "def main():\n my_painting_mqtt_client = MyPaintingMQTTClient()\n my_painting_mqtt_client.run_app()", "def open(self):\n APP.clients.append(self)\n # self.send_status()\n log(\"WebSocket opened. {0} child(s) connected\".\n format(len(APP.clients)))", "async def _incoming_ws(self, pid, websocket):\n # websockets have a convenient __aiter__ interface, allowing\n # us to just iterate over the messages forever.\n # Under the hood, if there are no messages available from the\n # WebSocket, this code will yield and until another message is\n # received.\n\n # If the WebSocket is disconnected unexpectedly, the for loop\n # will produce an exception.\n try:\n async for msg in websocket:\n # Trim whitespace\n msg = msg.strip()\n # Make sure the message isn't an empty string\n if msg:\n # Pass the message onto the server's handler.\n self.on_player_msg(pid, msg)\n # If we get this error, then player probably just logged off.\n except websockets.exceptions.ConnectionClosed:\n pass\n finally:\n logging.debug(\"_incoming_ws closed for %s\", pid)", "def __init__(self, parent=None):\n super(embeddedTerminal, self).__init__(parent)\n self.process = QtCore.QProcess(self)\n self.urxvtTerminal = QtWidgets.QWidget(self)\n layout = QtWidgets.QVBoxLayout(self)\n layout.addWidget(self.urxvtTerminal)\n # Works also with urxvt:\n self.process.start('urxvt', ['-embed', str(int(self.winId()))])\n self.setGeometry(90, 460, 1160, 125)", "def open_rtcp_port(self):\n self.rtcp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)", "def connectionMade(self):\n self._pid = self.transport.pid\n if self._pid:\n self.logger(\"Process has pid %d\" % self._pid)\n self.transport.closeStdin() # close stdin", "def start_process():\n global command, process\n\n def on_data(data):\n data = data.decode().strip()\n print('{}'.format(data))\n\n cmd = command.split(' ')\n\n if process:\n process.terminate()\n\n process = MySubprocess(cmd, -1, functools.partial(on_data), None, None)", "def run():\n\n assert SSH_HOST is not None, 'SSH_HOST not set. Please configure.'\n\n\n def connect():\n port = find_open_port(SSH_HOST)\n if init_tunnel(SSH_HOST, port):\n print 'Tunnel initialized, pid:', PID\n return {'ssh tunnel entry': 'ssh://{}:{}'.format(SSH_HOST, port)}\n return {}\n\n def is_pid_alive(pid):\n processes = subprocess.check_output(['ps', '-fx'])\n for line in processes.splitlines():\n lpid = line.split()[0]\n if lpid == pid:\n return True\n return False\n\n def find_open_port(host, start_port=22222):\n i = 0\n while i < 1000:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex((host, start_port + i))\n if result == 0:\n print \"Port is already used: \", start_port + i\n i += 1\n else:\n return start_port + i\n \n\n \n\n if PID is None:\n return connect()\n else:\n # check if process is still alive\n if is_pid_alive(PID):\n print 'Tunnel still active. Not doing anything.'\n else:\n return connect()", "def wpListenerStart(outboundMessageQueue):\n print \"Starting http POST server for wp updates\"\n wpPush.writeOut = outboundMessageQueue", "def __init__(self, parent=None):\n super(robotFourTerminal, self).__init__(parent)\n self.process = QtCore.QProcess(self)\n self.urxvtTerminal = QtWidgets.QWidget(self)\n layout = QtWidgets.QVBoxLayout(self)\n layout.addWidget(self.urxvtTerminal)\n # Works also with urxvt:\n self.process.start('urxvt', ['-embed', str(int(self.winId()))])\n self.setGeometry(90, 460, 1160, 125)", "def open(self):\n try:\n self.error_count = 0\n self.conn_thread = Thread(target=self.connect, name='Websocket Connection')\n self.conn_thread.start()\n except Exception as e:\n self.conn_thread.join()\n self.on_error(self.ws, \"Error from openning connection. Error -> {}\".format(e))", "def __init__(self, parent=None):\n super(robotThreeTerminal, self).__init__(parent)\n self.process = QtCore.QProcess(self)\n self.urxvtTerminal = QtWidgets.QWidget(self)\n layout = QtWidgets.QVBoxLayout(self)\n layout.addWidget(self.urxvtTerminal)\n # Works also with urxvt:\n self.process.start('urxvt', ['-embed', str(int(self.winId()))])\n self.setGeometry(90, 460, 1160, 125)", "def __init__(self, process_name=sys.argv[0], transport_factory=transport.TransportUnixFactory()):\n self.factory = transport_factory\n self.server = self.factory.serve()\n self.server.addEndpoint(general.EndpointIntrospect())\n processinfo = general.EndpointProcessInfo()\n processinfo.setProcessName(process_name)\n self.server.addEndpoint(processinfo)\n self.server.addEndpoint(tracing.EndpointTraceMapping())\n self.server.addEndpoint(tracing.EndpointNativeTraceSender())", "def __init__(self, parent=None):\n super(robotOneTerminal, self).__init__(parent)\n self.process = QtCore.QProcess(self)\n self.urxvtTerminal = QtWidgets.QWidget(self)\n layout = QtWidgets.QVBoxLayout(self)\n layout.addWidget(self.urxvtTerminal)\n # Works also with urxvt:\n self.process.start('urxvt', ['-embed', str(int(self.winId()))])\n self.setGeometry(90, 460, 1160, 125)", "async def test_run_without_launching(self):\n\n port = get_first_available_port(7860, 7870)\n\n io = gr.Interface(lambda s: s, gr.Textbox(), gr.Textbox()).queue()\n\n config = uvicorn.Config(app=io.app, port=port, log_level=\"warning\")\n\n server = Server(config=config)\n server.run_in_thread()\n\n try:\n async with websockets.connect(f\"ws://localhost:{port}/queue/join\") as ws:\n completed = False\n while not completed:\n msg = json.loads(await ws.recv())\n if msg[\"msg\"] == \"send_data\":\n await ws.send(json.dumps({\"data\": [\"Victor\"], \"fn_index\": 0}))\n if msg[\"msg\"] == \"send_hash\":\n await ws.send(\n json.dumps({\"fn_index\": 0, \"session_hash\": \"shdce\"})\n )\n if msg[\"msg\"] == \"process_completed\":\n completed = True\n assert msg[\"output\"][\"data\"][0] == \"Victor\"\n finally:\n server.close()", "def ws_request(self, ws_url):\n url = \"wss://stream.binance.com:9443/ws/%s\" % (ws_url)\n\n websocket.enableTrace(True)\n ws = websocket.WebSocketApp(url,\n on_error=self.ws_on_error,\n on_close=self.ws_on_close)\n\n return ws", "def _spawn(self, protocol, args, env=None):\n return reactor.spawnProcess(protocol, self.cmd, args, env=env)", "def spawn_new_player(self):\n tmux_invocation = [\n \"tmux\",\n \"new-session\",\n \"-s\" + TMUX_SESSION,\n \"-d\",\n \"mpv\",\n \"--no-video\",\n \"--term-playing-msg='${media-title}'\",\n \"--idle\",\n \"--input-ipc-server=\" + SOCKET_NAME,\n ]\n # start tmux with mpv and wait until ready\n os.spawnvp(os.P_WAIT, tmux_invocation[0], tmux_invocation)\n while not self.connect_player():\n sleep(0.1)", "def spawn(self, pcls, args):\n\n childp, ownp = multiprocessing.Pipe()\n p = pcls(self._id, childp)\n p._loglevel = self._loglevel\n p.start()\n\n childp.close()\n cid = ownp.recv()\n ownp.send((\"setup\", args))\n ownp.send(\"start\")\n\n self._child_procs.append((p.pid, cid))\n\n return cid", "async def _connect(self, subsystem=None, exec_command=None):\n ip, port, user, passwd = await self.dest_info()\n self._extra_info[\"peer\"] = PeerInfo(ip, port)\n\n if self._devinfo.proxy_required(ip):\n host = self.service.get_http_proxy_url(ip)\n elif self._devinfo.should_nat(ip):\n host = await self._devinfo.translate_address(ip)\n else:\n host = ip\n\n self.logger.info(\"Connecting to: %s: %d\", host, port)\n\n # known_hosts is set to None to disable the host verifications. Without\n # this the connection setup fails for some devices\n conn, _ = await asyncssh.create_connection(\n self._client_factory,\n host=host,\n port=port,\n username=user,\n password=passwd,\n client_keys=None,\n known_hosts=None,\n )\n\n chan, cmd_stream = await self._conn.create_session(\n lambda: CommandStream(self, self._loop),\n encoding=None,\n term_type=self.TERM_TYPE,\n subsystem=subsystem,\n command=exec_command,\n )\n self._chan = chan\n return cmd_stream", "async def start(self):\n # Avoid being rate limited by Twitter when restarting the stream with the same follow list.\n if self.sub_process and not set(self.sub_process.follows) != set(self.get_follows()):\n return\n\n # Kill the current stream before starting a new one\n self.stop()\n\n # No need to start a stream if we're not following anyone\n if not self.conf.follows:\n return\n\n # Create a new multi-processes queue, a new stream object and a new Process\n log.info('Creating new sub-process.')\n self.mp_queue = multiprocessing.Queue()\n self.mp_queue.cancel_join_thread()\n self.sub_process = SubProcessStream(self.mp_queue, self.conf.credentials, self.get_follows())\n log.info('Created new sub-process.')\n\n # Schedule the polling daemon (it will take care of starting the child process)\n self.daemon = asyncio.ensure_future(self._run())", "def test_create_websocket_url(self):\n\n self.assertEqual(\n 'ws://host:8888/',\n typhoonae.websocket.create_websocket_url())\n\n self.assertEqual(\n 'ws://host:8888/foo',\n typhoonae.websocket.create_websocket_url('/foo'))", "async def handler(websocket, path):\n\n print(\"Connected\")\n # print(vars(websocket))\n \n # global connected\n # # Register.\n # connected.add(websocket)\n # try:\n # # Implement logic here.\n # await asyncio.wait([ws.send(\"Hello!\") for ws in connected])\n # await asyncio.sleep(10)\n # finally:\n # # Unregister.\n # connected.remove(websocket)\n\n while True:\n listener_task = asyncio.ensure_future(websocket.recv())\n producer_task = asyncio.ensure_future(producer())\n done, pending = await asyncio.wait(\n [listener_task, producer_task],\n return_when=asyncio.FIRST_COMPLETED)\n\n if listener_task in done:\n message = listener_task.result()\n await consumer(message)\n else:\n listener_task.cancel()\n\n if producer_task in done:\n message = producer_task.result()\n await websocket.send(message)\n else:\n producer_task.cancel()", "def launch():\n\n core.openflow.addListenerByName(\"ConnectionUp\", _handle_ConnectionUp)\n log.info(\"Hub running\")", "async def start_sockets(data: dict) -> tuple:\n\n # create Websockets connections\n bnc_websocket = await websockets.connect(data['binance']['url'], max_queue=None, ping_interval=None)\n ftx_websocket = await websockets.connect(data['ftx']['url'], max_queue=None, ping_interval=None)\n\n # subscribing to updates\n await bnc_websocket.send(json.dumps(data['binance']['subscribe_request']))\n await ftx_websocket.send(json.dumps(data['ftx']['subscribe_request']))\n\n return bnc_websocket, ftx_websocket", "def main():\n\tports = glob.glob(\"/dev/tty.wchusbserial*\") + glob.glob(\"/dev/tty.usbserial*\") + glob.glob(\"COM3\") + glob.glob(\"COM4\")\n\tBAUDRATE = 9600\n\tchoice = int(input((str(ports) + \" enter numerical index for port: \")))\n\tportname = ports[choice]\n\tport = None\n\tsending_queue = None\n\treceiving_process_on = None\n\treceiving_process = None\n\ttry:\n\t\tsending_queue = multiprocessing.Queue()\n\t\treceiving_process_on = multiprocessing.Value(c_bool,False)\n\t\treceiving_process = multiprocessing.Process(target = communication, args = (portname,BAUDRATE,sending_queue,receiving_process_on))\n\t\treceiving_process.start()\n\t\twhile True:\n\t\t\tword = input(\"Enter a message: \")\n\t\t\tsending_queue.put(create_chunk(word)) #sending 32 bytes to the process queue\n\t\t\t\n\texcept Exception as e:\n\t\tprint(\"ERROR:\", e)\n\tfinally:\n\t\treceiving_process_on.value = False\n\t\tfor i in range(10): #wait for the process to stop\n\t\t\tpass\n\t\tif receiving_process != None:\n\t\t\treceiving_process.join()\n\t\t\n\t\tif sending_queue != None:\n\t\t\tsending_queue.close()", "def open(self, *args, **kwargs):\n self._open = True\n self._stat.websocket_stream_open += 1\n # Create subscription for the stream\n url = self.request.uri\n self._logger.info(\"Websocket connection %s %s\", url, self)\n\n async_future = asyncio.async(\n self.netconf_subscribe(\n self.request.uri,\n self.request.headers.get(\"Authorization\")), \n loop=self._asyncio_loop)\n yield tornado.platform.asyncio.to_tornado_future(async_future)", "def websocket_server():\n\n servient = Servient()\n\n thing_01_id = uuid.uuid4().urn\n thing_02_id = uuid.uuid4().urn\n\n exposed_thing_01 = ExposedThing(servient=servient, thing=Thing(id=thing_01_id))\n exposed_thing_02 = ExposedThing(servient=servient, thing=Thing(id=thing_02_id))\n\n prop_name_01 = uuid.uuid4().hex\n prop_name_02 = uuid.uuid4().hex\n prop_name_03 = uuid.uuid4().hex\n event_name_01 = uuid.uuid4().hex\n action_name_01 = uuid.uuid4().hex\n\n prop_value_01 = Faker().sentence()\n prop_value_02 = Faker().sentence()\n prop_value_03 = Faker().sentence()\n\n prop_init_01 = PropertyFragmentDict({\n \"type\": \"string\",\n \"observable\": True\n })\n\n prop_init_02 = PropertyFragmentDict({\n \"type\": \"string\",\n \"observable\": True\n })\n\n prop_init_03 = PropertyFragmentDict({\n \"type\": \"string\",\n \"observable\": True\n })\n\n event_init_01 = EventFragmentDict({\n \"type\": \"object\"\n })\n\n action_init_01 = ActionFragmentDict({\n \"input\": {\"type\": \"string\"},\n \"output\": {\"type\": \"string\"}\n })\n\n def async_lower(parameters):\n loop = tornado.ioloop.IOLoop.current()\n input_value = parameters.get(\"input\")\n return loop.run_in_executor(None, lambda x: time.sleep(0.1) or x.lower(), input_value)\n\n exposed_thing_01.add_property(prop_name_01, prop_init_01, value=prop_value_01)\n exposed_thing_01.add_property(prop_name_02, prop_init_02, value=prop_value_02)\n exposed_thing_01.add_event(event_name_01, event_init_01)\n exposed_thing_01.add_action(action_name_01, action_init_01, async_lower)\n\n exposed_thing_02.add_property(prop_name_03, prop_init_03, value=prop_value_03)\n\n ws_port = find_free_port()\n\n ws_server = WebsocketServer(port=ws_port)\n ws_server.add_exposed_thing(exposed_thing_01)\n ws_server.add_exposed_thing(exposed_thing_02)\n\n @tornado.gen.coroutine\n def start():\n yield ws_server.start()\n\n tornado.ioloop.IOLoop.current().run_sync(start)\n\n url_thing_01 = build_websocket_url(exposed_thing_01, ws_server, ws_port)\n url_thing_02 = build_websocket_url(exposed_thing_02, ws_server, ws_port)\n\n yield {\n \"exposed_thing_01\": exposed_thing_01,\n \"exposed_thing_02\": exposed_thing_02,\n \"prop_name_01\": prop_name_01,\n \"prop_init_01\": prop_init_01,\n \"prop_value_01\": prop_value_01,\n \"prop_name_02\": prop_name_02,\n \"prop_init_02\": prop_init_02,\n \"prop_value_02\": prop_value_02,\n \"prop_name_03\": prop_name_03,\n \"prop_init_03\": prop_init_03,\n \"prop_value_03\": prop_value_03,\n \"event_name_01\": event_name_01,\n \"event_init_01\": event_init_01,\n \"action_name_01\": action_name_01,\n \"action_init_01\": action_init_01,\n \"ws_server\": ws_server,\n \"url_thing_01\": url_thing_01,\n \"url_thing_02\": url_thing_02,\n \"ws_port\": ws_port\n }\n\n @tornado.gen.coroutine\n def stop():\n yield ws_server.stop()\n\n tornado.ioloop.IOLoop.current().run_sync(stop)", "def add(self, websocket):\n if websocket in self:\n return\n\n logger.info(\"Managing websocket %s\" % format_addresses(websocket))\n websocket.opened()\n with self.lock:\n fd = websocket.sock.fileno()\n self.websockets[fd] = websocket\n self.poller.register(fd)", "async def relay(websocket, path):\n # register(websocket) sends user_event() to websocket\n await register(websocket)\n try:\n while True:\n try:\n message = await websocket.recv()\n except ConnectionClosed:\n break\n else:\n await relay_message(message, current_user=websocket)\n finally:\n await unregister(websocket)", "async def open_websocket_server(sock, filter=None): # pylint: disable=W0622\n ws = await create_websocket_server(sock, filter=filter)\n try:\n yield ws\n finally:\n await ws.close()", "def openRtpPort(self):\r\n\t\twhile True:\r\n\t\t\ttry:\r\n\t\t\t\tself.rtpSocket_client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n\t\t\t\tself.rtpSocket_client.bind(('', self.rtpPort))\r\n\t\t\t\tself.rtpSocket_client.settimeout(0.5)\r\n\t\t\t\tself.listenRtp()\r\n\t\t\texcept Exception as err:\r\n\t\t\t\tif (str(err) == \"[Errno 9] Bad file descriptor\"):\r\n\t\t\t\t\tbreak", "def launch_proxy(local_port, remote_port, remote_ip):\n\n path = os.path.abspath(os.path.dirname(__file__))\n ws = os.path.join(path, \"../../proxy/bin/wistar_proxy.py\")\n\n wistar_proxy_path = os.path.abspath(ws)\n\n cmd = \"/usr/bin/env python %s --local-port=%s --remote-ip=%s --remote-port=%s &\" % (wistar_proxy_path,\n local_port,\n remote_ip,\n remote_port)\n\n logger.debug(cmd)\n\n proc = subprocess.Popen(cmd, shell=True, close_fds=True)\n time.sleep(1)\n return proc.pid", "async def websocket_client(self):\n return await websocket(CLIENT, \"/websocket\")", "def spawnChild(self, protocolName):\n from twisted.internet import reactor\n\n inheritedSocket = self.dispatcher.addSocket()\n inheritedFD = inheritedSocket.childSocket().fileno()\n\n processProtocol = ChildProcessProtocol(self, inheritedSocket)\n\n # Annoyingly, twistd *has* to make a pid file.\n pidFileFD, pidFileName = mkstemp()\n close(pidFileFD)\n unlink(pidFileName)\n\n arguments = (\n sys.executable, b\"-c\",\n b\"from twisted.scripts.twistd import run; run()\",\n b\"--pidfile\", pidFileName,\n b\"--nodaemon\", b\"--logfile\", b\"-\",\n self.pluginName,\n b\"--inherited-fd=3\",\n b\"--protocol\", protocolName,\n )\n\n self.log.debug(\n u\"Spawning child process for protocol {protocol!r} \"\n u\"with arguments: {arguments}\",\n protocol=protocolName, arguments=arguments,\n )\n\n transport = reactor.spawnProcess(\n processProtocol,\n sys.executable, arguments, env={\n b\"PYTHONPATH\": b\":\".join(sys.path),\n },\n childFDs={0: b\"w\", 1: b\"r\", 2: b\"r\", 3: inheritedFD}\n )\n\n child = ChildProcess(transport, processProtocol)\n\n self.log.info(\n u\"Spawned child process #{child.transport.pid} \"\n u\"for protocol {protocol!r}\",\n child=child, protocol=protocolName, arguments=arguments,\n )\n\n self.children.add(child)", "def main():\n if len(sys.argv) != 3:\n sys.exit(\"Usage: python receiver.py [Receiver Port] [Window Size]\")\n receiver_port = int(sys.argv[1])\n window_size = int(sys.argv[2])\n receiver(receiver_port, window_size)", "def connect(self):\n assert self.listening\n assert not self.connected\n ctx = zmq.Context.instance()\n port = NODE_INFOS[self.ID].port\n self._send_socket = ctx.socket(zmq.PUB)\n self._send_socket.bind(f\"tcp://*:{port}\")\n self.connected = True", "def start(self):\n zmq_uri = (\n \"{protocol}://{address}:{port}\".format(\n protocol=self.protocol, address=self.address, port=self.port\n )\n if self.port\n else \"{protocol}://{address}\".format( # noqa\n protocol=self.protocol, address=self.address\n )\n )\n log.debug(\"ZMQ URI: %s\", zmq_uri)\n self.ctx = zmq.Context()\n if hasattr(zmq, self.type):\n skt_type = getattr(zmq, self.type)\n else:\n skt_type = zmq.PULL\n self.sub = self.ctx.socket(skt_type)\n self.sub.connect(zmq_uri)\n if self.hwm is not None:\n self.sub.setsockopt(zmq.RCVHWM, self.hwm)\n if self.recvtimeout is not None:\n log.debug(\"Setting RCVTIMEO to %d\", self.recvtimeout)\n self.sub.setsockopt(zmq.RCVTIMEO, self.recvtimeout)\n if self.keepalive is not None:\n log.debug(\"Setting TCP_KEEPALIVE to %d\", self.keepalive)\n self.sub.setsockopt(zmq.TCP_KEEPALIVE, self.keepalive)\n if self.keepalive_idle is not None:\n log.debug(\"Setting TCP_KEEPALIVE_IDLE to %d\", self.keepalive_idle)\n self.sub.setsockopt(zmq.TCP_KEEPALIVE_IDLE, self.keepalive_idle)\n if self.keepalive_interval is not None:\n log.debug(\"Setting TCP_KEEPALIVE_INTVL to %d\", self.keepalive_interval)\n self.sub.setsockopt(zmq.TCP_KEEPALIVE_INTVL, self.keepalive_interval)", "def subscribe(self):\n fd = libplasma.subscribe(self.conn)\n self.notification_sock = socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM)\n # Make the socket non-blocking.\n self.notification_sock.setblocking(0)", "def create_connection(url, timeout=None, **options):\r\n websock = WebSocket()\r\n websock.settimeout(timeout != None and timeout or default_timeout)\r\n websock.connect(url, **options)\r\n return websock", "def make_communicator(token):\n\n return WebsocketCommunicator(TokenAuthMiddlewareStack(\n URLRouter(\n websocket_urlpatterns\n )\n ), '/ws/chat/?token=' + token)", "def __init__(self,\n *,\n qrcode: bool = False,\n host: str = \"0.0.0.0\",\n port: int = 8000,\n logger: logging.Logger = logging.getLogger(\n 'mvt.phone_sensor'),\n log_level: int = logging.WARN,\n proxy_client_from: Optional[str] = None):\n\n self._ws: Optional[websockets.WebSocketServerProtocol] = None\n self._out: Queue[Union[websockets.Data, ClientDisconnect]] = Queue()\n self._waiting = False\n self._qrcode = qrcode\n self._proxy_client_from = proxy_client_from\n self.logger = logger\n self.logger.setLevel(log_level)\n self.client_connected = False\n self.loop = asyncio.new_event_loop()\n self._in: asyncio.Queue[str] = asyncio.Queue(loop=self.loop)\n self.stop_flag = self.loop.create_future()\n\n self.server_thread = Thread(target=self._start_server,\n kwargs={'host': host, 'port': port},\n daemon=True)\n self.server_thread.start()\n assert self._out.get() == 'ready', \"server failed to start\"", "async def _listen(self,sub_params): \n async with websockets.connect(self.url) as websocket:\n await websocket.send(json.dumps(sub_params))\n # self.keepalive.start()\n start_time = time.time()\n while not self.shutdown_event.is_set():\n try:\n now = time.time()\n if((now - start_time) > 0.5):\n self.calculate_order_depth()\n start_time = now \n data = await websocket.recv()\n msg = json.loads(data)\n except ValueError as e:\n self.on_error(e)\n except Exception as e:\n self.on_error(e)\n else:\n self.on_message(msg)", "def __init__(self, callback, parent):\n IPC.__init__(self, UPSTREAM_CHANNEL, DOWNSTREAM_CHANNEL, callback)\n self.parent = parent\n self.controller = parent.controller\n self.logger = get_logger(\"ipc_server\")", "def __init__(self, target_addr: str, target_port: int, max_worker_threads: int = 2):\n super().__init__(max_worker_threads=max_worker_threads)\n self._addr: str = target_addr\n self._port: int = target_port\n self._websocket: WebSocketClientProtocol = None\n self._is_running: bool = False", "def make_server(connect_handler=None, message_handler=None, disconnect_handler=None):\n class Server(tornado.websocket.WebSocketHandler):\n def open(self):\n print('new connection')\n if connect_handler:\n return connect_handler(self)\n\n def on_message(self, message):\n if message_handler:\n return message_handler(json.loads(message), self)\n\n def on_close(self):\n print('connection closed')\n if disconnect_handler:\n return disconnect_handler(self)\n\n def check_origin(self, origin):\n return True\n return Server", "def _sendMessageToWeb(self, msg):\n if self.ioLoopInst is not None:\n msg = npToPy(msg)\n json_msg = json.dumps(msg)\n self.ioLoopInst.add_callback(sendWebSocketMessage, wsName='wsUser', msg=json_msg)\n else:\n print(f'WebDisplayMsg {msg}')", "def websocket_proxy_server(url, key=\"\"):\n def create_on_message(conn):\n def _fsend(data):\n data = bytes(data)\n conn.write_message(data, binary=True)\n return len(data)\n on_message = rpc._CreateEventDrivenServer(_fsend, \"WebSocketProxyServer\")\n return on_message\n\n @gen.coroutine\n def _connect(key):\n conn = yield websocket.websocket_connect(url)\n on_message = create_on_message(conn)\n temp = _server_env()\n # Start connecton\n conn.write_message(struct.pack('@i', RPC_MAGIC), binary=True)\n key = \"server:\" + key\n conn.write_message(struct.pack('@i', len(key)), binary=True)\n conn.write_message(key.encode(\"utf-8\"), binary=True)\n msg = yield conn.read_message()\n assert len(msg) >= 4\n magic = struct.unpack('@i', msg[:4])[0]\n if magic == RPC_MAGIC + 1:\n raise RuntimeError(\"key: %s has already been used in proxy\" % key)\n elif magic == RPC_MAGIC + 2:\n logging.info(\"RPCProxy do not have matching client key %s\", key)\n elif magic != RPC_MAGIC:\n raise RuntimeError(\"%s is not RPC Proxy\" % url)\n logging.info(\"Connection established\")\n msg = msg[4:]\n if msg:\n on_message(bytearray(msg), 3)\n\n while True:\n try:\n msg = yield conn.read_message()\n if msg is None:\n break\n on_message(bytearray(msg), 3)\n except websocket.WebSocketClosedError as err:\n break\n logging.info(\"WebSocketProxyServer closed...\")\n temp.remove()\n ioloop.IOLoop.current().stop()\n ioloop.IOLoop.current().spawn_callback(_connect, key)\n ioloop.IOLoop.current().start()", "def create_process(self, args=[], *popenargs, **kwargs):\n try:\n startupinfo = subprocess.STARTUPINFO()\n startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW\n kwargs.setdefault('startupinfo', startupinfo)\n except:\n pass\n kwargs.setdefault('universal_newlines', True)\n kwargs.setdefault('stdin', sys.stdin)\n return subprocess.Popen(self.build_args(args), *popenargs, **kwargs)", "def connect_to_websocket(self):\n conn = yield websocket_connect(\"wss://api.bitfinex.com/ws\")\n\n req = {\n \"event\": \"subscribe\",\n \"channel\": \"book\",\n \"pair\": \"BTCUSD\",\n \"freq\": \"F0\",\n }\n conn.write_message(json.dumps(req))\n while True:\n msg = yield conn.read_message()\n response = json.loads(msg)\n if response:\n if self.snapshot_received:\n # Perform update in database\n # and emit update to client\n self.perform_update(response)\n\n if isinstance(response, list) and not self.snapshot_received: # If true, store snapshot in database\n\n for data in response[1]: # here data is of form [price, count, amount]\n item_type = \"bid\" if data[2] > 0 else \"ask\" # bid if amt > 0, else ask\n item = self.add_new_bitfinex_item(item_type, data[0], data[1])\n self.session.add(item)\n self.session.commit()\n print(\"Bitfinex Snapshot Received\")\n self.snapshot_received = True # Set flag\n else:\n break", "def connect(self):\n if not self.jid or self.jid.node or self.jid.resource:\n raise ValueError,\"Cannot connect: no or bad JID given\"\n if not self.secret:\n raise ValueError,\"Cannot connect: no secret given\"\n if not self.server:\n raise ValueError,\"Cannot connect: no server given\"\n if not self.port:\n raise ValueError,\"Cannot connect: no port given\"\n\n self.lock.acquire()\n try:\n stream=self.stream\n self.stream=None\n if stream:\n stream.close()\n\n self.__logger.debug(\"Creating component stream: %r\" % (self.stream_class,))\n stream=self.stream_class(jid = self.jid,\n secret = self.secret,\n server = self.server,\n port = self.port,\n keepalive = self.keepalive,\n owner = self)\n stream.process_stream_error=self.stream_error\n self.stream_created(stream)\n stream.state_change=self.__stream_state_change\n stream.connect()\n self.stream=stream\n self.state_changed.notify()\n self.state_changed.release()\n except:\n self.stream=None\n self.state_changed.release()\n raise", "def main():\n global discuss_bot_id, discussion_chat_id\n r = requests.get('https://slack.com/api/rtm.connect', {'token': bot_token})\n discuss_bot_id = r.json()['self']['id']\n url = r.json()['url']\n r = requests.get('https://slack.com/api/conversations.list',\n {'token': bot_token})\n for channel in r.json()['channels']:\n if channel['name'] == 'discussion':\n discussion_chat_id = channel['id']\n print(discussion_chat_id)\n ws = websocket.WebSocketApp(\n url=url, on_message=on_message, on_error=on_error, on_close=on_close)\n ws.on_open = on_open\n ws.run_forever()" ]
[ "0.61489266", "0.6004585", "0.58952093", "0.5711386", "0.56981546", "0.5681631", "0.5521888", "0.55072397", "0.5495588", "0.54655325", "0.5456767", "0.5442702", "0.539571", "0.53936225", "0.5373037", "0.5360796", "0.53407484", "0.53166866", "0.52801496", "0.5262081", "0.52579105", "0.5254402", "0.52409124", "0.52344596", "0.5223999", "0.5215537", "0.51427954", "0.5129504", "0.5125885", "0.5121839", "0.5110913", "0.5101862", "0.50816673", "0.50723195", "0.50673586", "0.50544673", "0.50504315", "0.5034717", "0.50247204", "0.5011962", "0.4986483", "0.49851272", "0.4978769", "0.49636152", "0.49586925", "0.49535283", "0.4952887", "0.49380895", "0.49203682", "0.4912716", "0.49126905", "0.4911071", "0.49037394", "0.48782855", "0.48770666", "0.48755172", "0.48737508", "0.4859915", "0.48508734", "0.48465252", "0.4846103", "0.48368907", "0.48361948", "0.4834171", "0.48221582", "0.48208755", "0.4820002", "0.4815585", "0.4811578", "0.4810217", "0.48060796", "0.4803102", "0.48006904", "0.47973946", "0.47897273", "0.4778226", "0.47742003", "0.47741905", "0.47518063", "0.4738176", "0.47373068", "0.47342604", "0.47334433", "0.47236767", "0.4719852", "0.4718756", "0.4712782", "0.47094053", "0.47079933", "0.47060767", "0.46925116", "0.46925095", "0.46892974", "0.46827915", "0.46697548", "0.46693593", "0.46633467", "0.46618983", "0.46604016", "0.4656903", "0.46534008" ]
0.0
-1
Creates a group for a given node list. So far this is only an AiiDA verdi command.
def create_group(name, nodes, description=None): group, created = Group.get_or_create(name=name) if created: print('Group created with PK={} and name {}'.format(group.pk, group.name)) else: print('Group with name {} and pk {} already exists. Do you want to add nodes?[y/n]'.format(group.name, group.pk)) answer = raw_input() if answer.strip().lower() == 'y': pass else: return nodes2 = [] nodes2_pks = [] for node in nodes: try: node = int(node) except ValueError: pass nodes2_pks.append(node) try: nodes2.append(load_node(node)) except:# NotExistentError: pass group.add_nodes(nodes2) print('added nodes: {} to group {} {}'.format(nodes2_pks, group.name, group.pk)) if description: group.description = description return group
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_new_group(self, a, b):\n self.groups[self.group_id] = set([a,b])\n self.node_id[a] = self.node_id[b] = self.group_id\n self.group_id += 1", "def create(self, context=None):\n values = self.obj_get_changes()\n db_nodegroup = self.dbapi.create_nodegroup(values)\n self._from_db_object(self, db_nodegroup)", "def createGroup(self, *group):\n if not self.rank:\n logging.info('Creating atom group {}'.format(group))\n\n if not len(group):\n for idSS in self.pargs['idSS']:\n self.lmp.command('group group{} type {}'.format(idSS, idSS))\n else:\n self.lmp.command('group ' + ('{} ' * len(group)).format(*group))", "def createGroup(self):\n return _libsbml.ListOfGroups_createGroup(self)", "def make_groups(self):\n for g in self.groups:\n self.add_group(groupname=g['groupname'],\n grouptitle=g['grouptitle'],\n path_to_group=g['path'])", "def createGroup(self, name):\n new_group = ET.SubElement(self._root,'group')\n group_name = ET.SubElement(new_group, 'name')\n group_name.text = name\n # update the document's groups\n self._groups = self._root.findall('group') \n print 'Creating group, \\'%s\\'' % name\n return CAGroup(new_group)", "def create_group_command(client: MsGraphClient, args: dict) -> tuple[str, dict, dict]:\n required_properties = {\n 'displayName': str(args.get('display_name')),\n 'mailNickname': str(args.get('mail_nickname')),\n 'mailEnabled': args.get('mail_enabled') == 'true',\n 'securityEnabled': args.get('security_enabled')\n }\n\n # create the group\n group = client.create_group(required_properties)\n\n # display the new group and it's properties\n group_readable, group_outputs = parse_outputs(group)\n human_readable = tableToMarkdown(name=f\"{required_properties['displayName']} was created successfully:\",\n t=group_readable,\n headers=['ID', 'Display Name', 'Description', 'Created Date Time', 'Mail',\n 'Security Enabled', 'Mail Enabled'],\n removeNull=True)\n entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': group_outputs}\n return human_readable, entry_context, group", "def createMainGroup(self):\n\t\tmc.group( n = self.grp.name, em = True )", "def test_create_group(self):\n pass", "def test_create_group(self):\n pass", "def crea_grupo(self):\r\n \r\n self.comprueba_casos_seleccionados()", "def createGroup(root, group, fileList):\n topGroupElem = ElementTree.SubElement(root, ELEM_GROUP, {ATTR_NAME: group})\n headerGroupElem = None\n sourceGroupElem = None\n pathElem = None\n for fl in fileList:\n if fl.endswith(\".h\"):\n if headerGroupElem == None:\n headerGroupElem = ElementTree.SubElement(topGroupElem, ELEM_GROUP, {ATTR_NAME: GRP_HEADER})\n pathElem = ElementTree.SubElement(headerGroupElem, ELEM_PATH)\n else:\n if sourceGroupElem == None:\n sourceGroupElem = ElementTree.SubElement(topGroupElem, ELEM_GROUP, {ATTR_NAME: GRP_SRC})\n pathElem = ElementTree.SubElement(sourceGroupElem, ELEM_PATH)\n pathElem.text = fl", "def create_groups():\n groups = [\"iDRAC-Administrators\", \"iDRAC-Operators\", \"iDRAC-Readonly\"]\n group_priviledges = [\"0x000001ff\", \"0x000000f9\", \"0x00000001\"]\n for host in online_hosts:\n for index in [1,2,3]:\n print index,\" \", groups[index-1]\n with settings(warn_only=True):\n\n result1 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupName \"+groups[index-1])\n if result1.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupName failed \")\n\n result2 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupDomain corp.inmobi.com\")\n if result2.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupDomain failed \")\n\n result3 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupPrivilege \"+ group_priviledges[index-1])\n if result3.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupPriviledge failed \")", "def createNewGroup():\n if request.method == 'POST':\n groupname = request.form['groupname1']\n internal = request.form['internal1']\n external = request.form['external1']\n userNo = request.form['usersNo1']\n if 'node1' in request.form:\n node = int(request.form['node1'])\n else:\n node = -1\n\n if int(userNo) == 0:\n if hl.createGroup(groupname, internal, external, node):\n return True\n elif int(userNo) > 0:\n if hl.createGroup(groupname, internal, external, node, genUsers=True, numUsers=int(userNo)):\n return True\n\n return False", "def createGroup(self):\n return _libsbml.GroupsModelPlugin_createGroup(self)", "def create_groups(**kwargs):\n for gname in SEC_GROUP_NAMES.itervalues():\n Group.objects.get_or_create(name=gname)", "def createGroup(self, group, members):\n connection = self.sock\n\n connection.send(\"create_group\".encode())\n\n status_code = connection.recv(2)\n\n if status_code != SUCCESS:\n print(\"Error\")\n return -1\n message = []\n message.append(\"gname:\")\n message.append(group)\n message.append(\";\")\n message.append(\"members:\")\n for i in members:\n message.append(i)\n message.append(\",\")\n if members:\n message.pop()\n message = ''.join(message)\n message = message.encode()\n connection.send(message)\n result = connection.recv(2)\n if result != SUCCESS:\n return -1\n\n packed_gid = connection.recv(4)\n gid = struct.unpack(\"<L\", packed_gid)\n repoids.append(gid)\n return 1", "def __make_group_by_res(self, group_name, name_list):\r\n if group_name not in self.groups:\r\n res_group = self.group['Residue'].getChildGrps()\r\n groups = [ res for res in res_groups if res.name in name_list ]\r\n new_group = Group(parent=[], id=-1, type=group_name, childs=groups)\r\n self.groups[group_name] = new_group", "def _create_child_group(self, name) -> \"GroupBase\":\n pass", "def create_volume_group() :\n volume_group_present = False\n for node in nodes:\n volume_group_status = \\\n send_cli_via_ssh(node, 'volume_group_status').strip()\n if volume_group_status:\n print( 'Volume group on {} already exist: {}'.format(\n node,\n ' '.join([line.split(';')[0] for line in\n volume_group_status.split()])\n ))\n volume_group_present = True\n else:\n print_with_timestamp( 'Creating vg00 on: {}'.format(node))\n send_cli_via_ssh(node, 'unit_manager create S001 vg00')\n volume_group_status = \\\n send_cli_via_ssh(node, 'volume_group_status').strip()\n if volume_group_status:\n print_with_timestamp( 'Created vg00 on: {}'.format(node))\n volume_group_present = True\n else :\n print_with_timestamp( 'Cannot create vg00 on: [}'.format(node))\n volume_group_present = False\n return volume_group_present", "def __make_group_by_atom(self, group_name, name_list):\r\n pass", "def create_mailing_list_group(sender, instance, **kwargs):\n\tname = instance.name\n\treturn requests.post(\"https://api.mailgun.net/v3/lists\",\n auth=('api', settings.MAILGUN_API_KEY),\n data={'address': '{}@arenbergorkest.be'.format(name),\n 'name': name})", "def _make_node_list(child_node, list_count):\n parent = GroupNode(child_node.node.parentnode) \n parent.add_child(child_node)\n if list_count == 0:\n return parent\n else:\n list_count -= 1\n return _make_node_list(parent, list_count)", "def group_assignmentgroups(assignment_group_list):\n return group_nodes(assignment_group_list, 2)", "async def create_group(ctx, name: str, role: str, group_type: str=None, comp: str=None, rating: int=None, time: str=None):\n\n owner = ctx.message.author.name\n \n if comp:\n comp = [int(i) for i in comp.split()] # convert string input to array\n\n new_group = Group(owner, name, role, group_type, rating, time, comp)\n bg_bot.manager.add_group(owner, new_group)\n \n await ctx.send(f'Created new {group_type} group for leader {owner}!')", "def __call__(self, *args: FParams.args, **kwargs: FParams.kwargs) -> DAGNode:\n return self._create_task_group(TaskGroup, *args, **kwargs)", "def create_nodes(self, nodes: List[Node]):\n nodes_str = \",\\n\".join([str(n) for n in nodes])\n query = \"\"\"CREATE %s\"\"\" % nodes_str\n return self.create_tx(query)", "async def async_create_group(\n hass: HomeAssistant,\n name: str,\n entity_ids: Collection[str] | None = None,\n user_defined: bool = True,\n icon: str | None = None,\n object_id: str | None = None,\n mode: bool | None = None,\n order: int | None = None,\n ) -> Group:\n group = Group.async_create_group_entity(\n hass, name, entity_ids, user_defined, icon, object_id, mode, order\n )\n\n # If called before the platform async_setup is called (test cases)\n await _async_get_component(hass).async_add_entities([group])\n return group", "def create_group():\n groupname = request.get_json().get(\"name\")\n description = request.get_json().get(\"description\")\n grp = admin.create_group(current_app.scoped_session(), groupname, description)\n if grp:\n response = admin.get_group_info(current_app.scoped_session(), groupname)\n else:\n response = {\"result\": \"group creation failed\"}\n response = jsonify(response)\n return response", "def test_060_add_group_to_group(self):\n\n testflow.step(\"Adding group %s to group %s\", TEST_GROUP1, TEST_GROUP2)\n assert MANAGE_CLI.run(\n 'groupadd',\n TEST_GROUP1,\n group=TEST_GROUP2,\n )[0], \"Failed to add group to group '%s'\" % TEST_GROUP1", "def test_create_group(self):\n groupid = 'villains'\n\n # create the group\n resp = self.app.post('/groups', data=json.dumps({'name':groupid}))\n assert resp.status_code == 200\n\n # Fetch the group to check that it persists\n resp = self.app.get('/groups/{}'.format(groupid))\n assert resp.status_code == 200", "def generate_groups(ctx):\n asyncio.run(generate_groups_impl(ctx.obj[\"config\"]))", "def _make_group(self, _rk, _group_hint):\n\n if isinstance(_group_hint, dict):\n # _group_hint is a single key/value pair\n g = _group_hint[list(_group_hint)[0]]\n\n r_type = g.get(\"type\", \"none\")\n if r_type != \"OS::Nova::ServerGroup\":\n return \"support only ServerGroup resource\"\n\n properties = g.get(\"properties\", {})\n if len(properties) == 0:\n return \"no properties\"\n\n group_name = properties.get(\"name\", None)\n if group_name is None:\n return \"no group name\"\n group_name = group_name.strip()\n\n policies = properties.get(\"policies\", [])\n if len(policies) == 0:\n return \"no policy of the group\"\n\n if len(policies) > 1:\n return \"multiple policies\"\n\n # TODO: exclude soft-affinity and soft-anti-affinity?\n\n if group_name in self.groups.keys():\n group = self.groups[group_name]\n else:\n group = Group(group_name)\n\n policy = policies[0].strip()\n if policy == \"anti-affinity\":\n group_type = \"diversity\"\n else:\n group_type = policy\n\n group.group_type = group_type\n group.factory = \"server-group\"\n group.level = \"host\"\n\n self.groups[group_name] = group\n else:\n # group hint is uuid string.\n rg = self.resource.get_group_by_uuid(_group_hint)\n if rg is None:\n return \"unknown group found while making group\"\n\n # TODO: exclude soft-affinity and soft-anti-affinity?\n\n if rg.name in self.groups.keys():\n group = self.groups[rg.name]\n else:\n group = Group(rg.name)\n\n group.group_type = rg.group_type\n group.factory = rg.factory\n group.level = \"host\"\n\n self.groups[rg.name] = group\n\n if group is not None:\n group.server_list.append(self.app_name + \":\" + _rk)\n\n return \"ok\"", "def create_group(self, **kwargs):\n post_body = json.dumps({'group': kwargs})\n resp, body = self.post('groups', post_body)\n self.expected_success(201, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)", "def CreateGroupPostData(input, collection, grouping, item, groupname):\n root = etree.Element(collection)\n name = etree.SubElement(root, 'name')\n name.text = groupname\n is_smart = etree.SubElement(root, 'is_smart')\n is_smart.text = 'false'\n itemlist = etree.SubElement(root, grouping)\n \n for i in input:\n add_element = etree.SubElement(itemlist, item)\n add_element_id = etree.SubElement(add_element, 'id')\n add_element_id.text = i\n \n return etree.tostring(root)", "def get_groups():\n\n groups = [\"shelter\", \"sharing\", \"unsheltered\", \"motel\"]\n\n for item in groups:\n group = Group(group_name=item)\n\n db.session.add(group)\n\n db.session.commit()", "def addGroup(self, *args):\n return _libsbml.ListOfGroups_addGroup(self, *args)", "def make_grp(self):\n try:\n self.base['grp']\n except:\n self.base['grp'] = np.zeros(len(self.base),dtype='i')\n\n for halo in self._halos.values():\n halo[name][:] = halo._halo_id\n\n if config['verbose']: print \"writing %s\"%(self._base().filename+'.grp')\n self._base().write_array('grp',overwrite=True,binary=False)", "def create_initial_groups():\n \n from base import get_group_database, get_user_database\n import api\n \n # we want any groups we create in here to be active immediately\n save_min_sponsors = Group._min_sponsors\n Group._min_sponsors = 1\n \n user_db = get_user_database()\n group_db = get_group_database()\n \n user_admin = user_db['admin']\n \n def create_group(user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit=''):\n if not group_db.has_key(user_id):\n g = group_db.create_group(user_id=user_id,\n name=name,\n description=desc,\n owner=owner,\n no_pay=True)\n group_db.force_accept(g)\n if parent_id:\n group_db.join_group(g, group_db[parent_id], force=1)\n \n g = group_db[user_id]\n if join_pol:\n api.group_set_join_policy(user_admin, g, join_pol)\n if join_pol == 'open':\n # if membership is open, allow non-members to read\n api.group_set_other_perms(user_admin, g, 'ro')\n if memb_vis:\n api.group_set_membership_visible(user_admin, g, memb_vis)\n if desc:\n api.group_set_settings(user_admin, g, description=desc)\n if memb_edit:\n api.group_set_member_edit(user_admin, g, memb_edit)\n \n # set date of formation\n create = datetime(2004, 05, 10, 12, 0, 0)\n g.date = create\n \n \n groups = [\n ('top', 'Top', 'This group contains the top-level groups.', user_admin, None, '', 'open', ''),\n ('regional', 'Regional', 'Contains groups with a regional focus.', user_admin, 'top', '', 'open', ''),\n ('orgs', 'Organizations', 'Contains categories of organizations.', user_admin, 'top', '', 'open', ''),\n ('community', 'Community', 'Contains groups that are focused or based on ned.com.', user_admin, 'top', '', 'open', ''),\n ('issues', 'Issues', 'Contains groups focused on particular issues.', user_admin, 'top', '', 'open', ''),\n ('general', 'General', 'Contains groups that don\\'t belong in other categories.', user_admin, 'top', 'open', 'open', ''),\n ('general-other', 'General', 'Contains groups that don\\'t belong in other categories.', user_admin, 'general', 'open', 'open', ''),\n ('help', 'Help', 'Contains site help.', user_admin, 'community', '', 'open', ''),\n ('community-general', 'Community - General',\n '', user_admin, 'community', 'open', 'open', 'member'),\n ('suggestions', 'Suggestions', 'For community suggestions.', user_admin, 'community-general', '', 'open', ''),\n ('public', 'Public sector',\n 'Groups operating in the public sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('private', 'Private sector',\n 'Groups operating in the private sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('social', 'Social sector',\n 'Groups operating in the social sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('orgs-general', 'Organizations - General',\n \"For organizations that don't fit in other categories.\", user_admin, 'orgs', 'open', 'open', 'member'),\n ('issues-business', 'Business',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-cyf', 'Children - Youth - Families',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-education', 'Education',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-env', 'Environment - Conservation',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-health', 'Health Care',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-pol', 'Policy - Politics',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-religion', 'Religion',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-soc', 'Social Justice - Human Services',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-tech', 'Technology',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-general', 'Issues - General',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('ned', '<ned> Network',\n '', user_admin, '', '', '', ''),\n ('ned-internal', 'Ned - Internal',\n '', user_admin, '', '', '', ''),\n ('sitedev', 'Site Development',\n '', user_admin, 'ned-internal', '', '', ''),\n ]\n \n for user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit in groups:\n create_group(user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit)\n \n # Help group\n g_help = group_db['help']\n api.group_set_anon_read(user_admin, g_help, True)\n \n # ON groups\n g_on = group_db['ned']\n group_db.join_group(g_on, group_db['private'], force=1)\n group_db.join_group(g_on, group_db['public'], force=1)\n group_db.join_group(g_on, group_db['social'], force=1)\n api.group_set_owners_by_user_id(user_admin, g_on, ['admin', 'jimc'])\n api.group_set_join_policy(user_admin, g_on, 'owner')\n api.group_set_invite_policy(user_admin, g_on, 'owner')\n api.group_set_membership_visible(user_admin, g_on, 'open')\n api.group_set_member_edit(user_admin, g_on, True)\n api.group_set_anon_read(user_admin, g_on, True)\n \n g_on_int = group_db['ned-internal']\n api.group_set_owners_by_user_id(user_admin, g_on_int, ['admin', 'jimc'])\n api.group_set_join_policy(user_admin, g_on_int, 'owner')\n api.group_set_invite_policy(user_admin, g_on_int, 'owner')\n api.group_set_membership_visible(user_admin, g_on_int, 'member')\n api.group_set_member_edit(user_admin, g_on_int, True)\n api.group_set_anon_read(user_admin, g_on_int, False)\n \n g_sitedev = group_db['sitedev']\n api.group_set_owners_by_user_id(user_admin, g_sitedev, ['admin', 'jimc'])\n \n Group._min_sponsors = save_min_sponsors", "def make_custom_group(self, qid, name='', path='', attrs={}):\n gslash = \"/\"\n parent = self\n sdef, name, path = self.file.get_custom_node_info(qid, gslash, name, path, parent) \n grp = Group(self.file, sdef, name, path, attrs, parent)\n return grp", "def test_add_group(self):\n pass", "def group_assignments(assignment_list):\n return group_nodes(assignment_list, 1)", "def make_custom_group(self, qid, name='', path='', attrs={}):\n gslash = \"/\"\n sdef, name, path = self.get_custom_node_info(qid, gslash, name, path) \n parent = None # no parent since this node created from File object (top level)\n grp = Group(self, sdef, name, path, attrs, parent)\n return grp", "def make_grp(self, name='grp', v=False):\n self.base[name] = self.get_group_array(v=v) #np.zeros(len(self.base), dtype=int)#self.get_group_array()", "def make_grp(self, name='grp'):\n self.base[name] = self.get_group_array()", "def createGroup(listOfPerson):\n atk=Department()\n atk.members=listOfPerson\n return atk", "def make_group(self, qid, name='', path='', attrs={}, link='', abort=True):\n gqid = qid + \"/\"\n sdef = self.get_sdef(gqid, self.default_ns, \"referenced in make_group\")\n id = sdef['id']\n ns = sdef['ns']\n path = self.deduce_path(id, ns, path)\n if not abort:\n id_noslash = id.rstrip('/') # could be different from gqid if namespace present\n grp = self.get_existing_group(path, id_noslash, name)\n if grp:\n # found already existing group\n return grp \n link_info = self.extract_link_info(name, link, Group)\n # create the group\n parent = None # no parent since this node created from File object (top level)\n grp = Group(self, sdef, name, path, attrs, parent, link_info)\n return grp", "def create_group(self, group_name, user_ids=[], role_ids=[]):\n payload = {}\n payload['name'] = group_name\n payload['user_ids'] = user_ids\n payload['role_ids'] = role_ids\n return Client._post(self, payload)", "def add_group(self):\n items = self.group_list.selectedItems()\n for item in items:\n self.parent.add_group_data(item.text())", "def test_new_group(self, inventoryloader):\n inventoryloader.add_group(u'newgroup')\n assert 'newgroup' in inventoryloader.groups", "def make_group(self, id, name='', attrs={}, link='', abort=True ): \n gid = id + \"/\"\n sgd = self.get_sgd(gid, name)\n path = self.full_path\n link_info = self.file.extract_link_info(name, link, Group)\n if not abort:\n # id = sgd['id'].rstrip('/') # not sure if need this\n grp = self.file.get_existing_group(path, id, name)\n if grp:\n return grp\n grp = Group(self.file, sgd, name, path, attrs, self, link_info)\n # self.mstats[gid]['created'].append(grp)\n return grp", "def test_create_device_group(self):\n pass", "def create_tree(outFile, tree, path='/'):\n for key, foo in tree.list():\n if outFile.has_node(path, key):\n logging.debug('Path already found:', path, key)\n continue\n logging.debug('Creating group:', path, key)\n outFile.create_group(path, key, key)\n dest = path + key + '/'\n if outFile.has_node(dest):\n continue\n create_tree(outFile, tree.child(key), dest)", "def test_ipam_vlan_groups_create(self):\n pass", "def groups_create(self, mar, request):\n if not permissions.CanCreateGroup(mar.perms):\n raise permissions.PermissionException(\n 'The user is not allowed to create groups.')\n\n user_dict = self._services.user.LookupExistingUserIDs(\n mar.cnxn, [request.groupName])\n if request.groupName.lower() in user_dict:\n raise exceptions.GroupExistsException(\n 'group %s already exists' % request.groupName)\n\n if request.ext_group_type:\n ext_group_type = str(request.ext_group_type).lower()\n else:\n ext_group_type = None\n group_id = self._services.usergroup.CreateGroup(\n mar.cnxn, self._services, request.groupName,\n str(request.who_can_view_members).lower(),\n ext_group_type)\n\n return api_pb2_v1.GroupsCreateResponse(\n groupID=group_id)", "def post_groups(\n data: PostGroupIn, tkn: Token = Depends(from_authotization_header_nondyn),\n):\n assert_has_clearance(tkn.owner, \"sni.create_group\")\n grp = Group(\n description=data.description,\n members=[tkn.owner],\n group_name=data.group_name,\n owner=tkn.owner,\n ).save()\n logging.debug(\n \"Created group %s (%s) owned by %s\",\n data.group_name,\n str(grp.pk),\n tkn.owner.character_name,\n )\n return GetGroupOut.from_record(grp)", "def creategroup(body):\n group = body.get(\"groupname\", None)\n pps = body.get(\"pilotpoints\", None)\n print('lol',group, pps)\n print(type(pps))\n\n # Does the person exist already?\n if group not in group_dict and group is not None:\n group_dict[group] = {\n \"groupname\": group,\n \"pilotpoints\": pps,\n }\n return group_dict[group], 201\n\n # Otherwise, they exist, that's an error\n else:\n abort(\n 406,\n \"Person with last name {group} already exists\".format(group=group),\n )", "def append_node(ifaces_well, wellid, node_number, k, i, j):\n group_region.append([k, i, j, k, i, j])\n if default_ifaces is None:\n ifaces.append(ifaces_well)\n face_ct.append(len(ifaces_well))\n else:\n ifaces.append(default_ifaces)\n face_ct.append(len(default_ifaces))\n group_name.append('{}{}'.format(wellid, node_number))\n group_placement.append([Grid, GridCellRegionOption,\n PlacementOption,\n ReleaseStartTime,\n ReleaseOption,\n CHeadOption])", "def handle(self, *args, **options):\n new_group, created = Group.objects.get_or_create(name=options.get('group_name')) \n self.stdout.write(f\"Group {options.get('group_name')} created\")", "def create_group(user):\n if connexion.request.is_json:\n users_group = [User.from_dict(d) for d in connexion.request.get_json()]\n response = (\"success\", 201)\n if len(users_group) > 4:\n response = (\"Max number of player is 4\", 400)\n else:\n groupId = GroupStorageController.add_new_group(users_group)\n return response", "def create_nodes(self):", "def groups(self, create, extracted, **kwargs):\n if not create:\n # Simple build, do nothing.\n return\n\n if extracted:\n # A list of groups were passed in, use them\n for group in extracted:\n # pylint: disable=no-member\n self.groups.add(group)", "def create_TestGroup(test_case, # type: AnyMagpieTestCaseType\n override_group_name=null, # type: Optional[Str]\n override_discoverable=null, # type: Optional[bool]\n override_data=null, # type: Optional[JSON]\n override_headers=null, # type: Optional[HeadersType]\n override_cookies=null, # type: Optional[CookiesType]\n ): # type: (...) -> JSON\n app_or_url = get_app_or_url(test_case)\n data = override_data\n if override_data is null:\n data = {\"group_name\": override_group_name if override_group_name is not null else test_case.test_group_name}\n # only add 'discoverable' if explicitly provided here to preserve original behaviour of 'no value provided'\n if override_discoverable is not null:\n data[\"discoverable\"] = override_discoverable\n grp_name = (data or {}).get(\"group_name\")\n if grp_name:\n test_case.extra_group_names.add(grp_name) # indicate potential removal at a later point\n resp = test_request(app_or_url, \"POST\", \"/groups\", json=data,\n headers=override_headers if override_headers is not null else test_case.json_headers,\n cookies=override_cookies if override_cookies is not null else test_case.cookies)\n return check_response_basic_info(resp, 201, expected_method=\"POST\")", "def createVolumeGroup(self, pvs, name):\n vg = {}\n vg['command'] = 'create:volgroup'\n vg['extentSize'] = EXTENT_SIZE\n vg['pvs'] = pvs\n vg['name'] = name\n\n return vg", "def create_group():\n incoming = request.get_json()\n chatroom = Chatroom(\n name = incoming['name'],\n tag = incoming['tag'],\n )\n db.session.add(chatroom)\n db.session.commit()\n participant = Participant(\n user_id = session['user_id'],\n room_id = chatroom.room_id,\n )\n db.session.add(participant)\n db.session.commit()\n return jsonify(results = chatroom.room_id)", "def execute(self, context):\n global array_nodes\n sub_tree = bpy.data.node_groups.new('Armory group', 'ArmGroupTree') # creating subtree\n sub_tree.use_fake_user = True\n group_node = array_nodes[self.node_index]\n group_node.group_tree = sub_tree # link subtree to group node\n sub_tree.nodes.new('LNGroupInputsNode').location = (-250, 0) # create node for putting data into subtree\n sub_tree.nodes.new('LNGroupOutputsNode').location = (250, 0) # create node for getting data from subtree\n context.space_data.path.append(sub_tree, node=group_node)\n sub_tree.group_node_name = group_node.name\n return {'FINISHED'}", "def group_nodes(node_list, tree_height):\n dict = OrderedDict()\n for node in node_list:\n nodelist = _make_node_list(GroupNode(node), tree_height)\n if nodelist.get_name() not in dict:\n dict[nodelist.get_name()] = nodelist\n else:\n dict[nodelist.get_name()].merge(nodelist)\n return list(dict.values()) # we usually need to know the length, so values() instead of itervalues()", "def allocate_group(remote, objectid):\n cmd1 = mmapi.StoredCommands()\n key1 = cmd1.AppendSceneCommand_AllocateNewGroupID(objectid)\n remote.runCommand(cmd1)\n result_val = mmapi.any_result()\n cmd1.GetSceneCommandResult_AllocateNewGroupID(key1, result_val)\n return result_val.i", "def test_groups():\n graph = Graph()\n for one, two in [(1, 2), (2, 3), (1, 4), (4, 3), (3, 1)]:\n graph.add_edge(one, two)\n groups = graph.group()\n eq_(len(groups), 3)", "async def groupadd(bot: fido, channel: str, sender: str, args: List[str]):\n\n if len(args) == 0:\n return \"Usage: \" + IRC.commandPrefix + \"groupadd <groupname> <nickname> <phonenumber>\"\n\n lines = []\n print(f\"Args: {args}\")\n number = ''\n nickname = ''\n group = ''\n for arg in args:\n if arg == \"\":\n continue # Ignore blank args.\n print(f\"Arg: [{arg.strip()}]\")\n if arg.startswith('+'):\n number = arg\n elif arg in bot.users:\n nickname = arg\n else:\n group = arg\n if not group or not nickname or not number:\n await bot.message(channel, \"Incorrect command usage. Ensure user is in channel, and that number has +<country code>.\")\n return\n add_group(mygroup=group, nickname=nickname, number=number)\n await bot.message(channel, f\"Added {nickname} to SMS group {group} with number {number}\")", "def add_group(self, name: str, leds_list: List[str]) -> Tuple[Optional['LedGroup'], str]:\n new_group: LedGroup = LedGroup(name, leds_list)\n verified_ledgroup = LedGroup.verify_led_group(new_group)\n if not verified_ledgroup:\n return None, 'wrong_group_name'\n is_unique = AuxEffects.check_unique(self, verified_ledgroup, 'LedGroup', None)\n if not is_unique:\n return None, 'group_exists'\n self.LedGroups.append(verified_ledgroup)\n return verified_ledgroup, \"\"", "def test_get_groups_3(\n self, management_client, internal_client, inventory_attributes\n ):\n\n did = \"some-device-id\"\n internal_client.create_device(did, inventory_attributes)\n for i in range(10):\n group = management_client.group(group=\"group\" + str(i))\n management_client.addDeviceToGroup(group, did)\n\n assert len(management_client.getAllGroups()) == 1", "def create_group(self, label):\n group = OptionGroup(label)\n self.append(group)\n return group", "def newSimgroup(simlist): \n nSims = len(simlist)\n \n simgrp = simgroup(nSims)\n \n for i, s in enumerate(simlist):\n \n simgrp[i] = newSim(s)\n \n return simgrp", "def _make_valet_groups(self, _rk, _az, _rule_list):\n\n for rn in _rule_list:\n rule = self.resource.group_rules[rn]\n\n # Valet group naming convention.\n # It contains datacenter id and availability_zone\n # followed by service id and vnf id\n # depending on scope.\n # And concatenate rule name.\n # Exception: quorum-diversity\n\n group_id = self.datacenter_id + \":\"\n\n if rule.rule_type != \"quorum-diversity\":\n group_id += _az + \":\"\n\n if rule.app_scope == \"lcp\":\n group_id += rn\n elif rule.app_scope == \"service\":\n group_id += self.service_instance_id + \":\" + rn\n elif rule.app_scope == \"vnf\":\n group_id += self.service_instance_id + \":\" + self.vnf_instance_id + \":\" + rn\n else:\n return \"unknown app_scope value\"\n\n if group_id in self.groups.keys():\n group = self.groups[group_id]\n else:\n group = Group(group_id)\n group.group_type = rule.rule_type\n group.factory = \"valet\"\n group.level = rule.level\n\n self.groups[group_id] = group\n\n group.server_list.append(self.app_name + \":\" + _rk)\n\n return \"ok\"", "def add_group(self, parent_id: int = None, id: int = None, name: str = None):\n if parent_id is not None:\n assert (parent_id in self.group_ids), ('Parent id does not exist')\n\n if id is None:\n id = int(np.nanmax(np.asarray(self.group_ids, dtype=np.float)) + 1)\n else:\n assert (id not in self.groups_ids()), ('Id already exists')\n\n if name is None:\n name = 'Group {}'.format(id)\n\n new_group = wknml.Group(id, name, [])\n if parent_id is None:\n self.groups.append(new_group)\n else:\n self.groups = Skeleton._group_append(self.groups, parent_id, new_group)\n\n return id, name", "def create_group(self, properties: dict[str, Any | None]) -> dict:\n group = self.ms_client.http_request(method='POST', url_suffix='groups', json_data=properties)\n return group", "def create():\n name = request.json['name']\n level = request.json['level']\n manager = request.json['manager']\n if models.user.Group.get(name):\n raise Conflict('Group already exists.', creation=False)\n else:\n authorize(manager, level=level)\n group = models.user.Group(name=name, level=level, manager=manager)\n models.db.session.add(group)\n models.db.session.commit()\n return response(200, creation=True)", "def create_groups(groups):\n for group_name in groups:\n try:\n Group.objects.get_or_create(name=group_name)\n except Exception as e:\n raise CouldNotCreateGroup(group_name, e)", "def create_group(self, name) -> \"GroupBase\":\n ancestor, group_names, last_name = self._descend(name)\n parent = ancestor._require_descendant_groups(*group_names)\n if last_name in parent:\n raise FileExistsError(f\"Group or dataset found at '{name}'\")\n return parent._create_child_group(last_name)", "def _get_node_group(self, node_name):\n\n pass", "def grp(self, grpNode):\n\t\tself._grp = grpNode", "def create_group(self, path):\n if self.options['storage_method'] == 'hdf5':\n # execute h5py command\n self.file_pointer.create_group(path)\n elif self.options['storage_method'] == 'none':\n # save command for later processing\n self.h5commands.append((\"create_group\", path,))\n else:\n raise Exception('Invalid option value for storage_method (%s)' % storage_method)", "def post(self):\n args = parser.parse_args()\n user_group = UserGroup()\n user_group.name = args['name']\n user_group.createdby = auth.username()\n db_session.add(user_group)\n db_session.commit()\n return user_group, 201", "def addGroup(self, *args):\n return _libsbml.GroupsModelPlugin_addGroup(self, *args)", "def async_create_group_entity(\n hass: HomeAssistant,\n name: str,\n entity_ids: Collection[str] | None = None,\n user_defined: bool = True,\n icon: str | None = None,\n object_id: str | None = None,\n mode: bool | None = None,\n order: int | None = None,\n ) -> Group:\n if order is None:\n hass.data.setdefault(GROUP_ORDER, 0)\n order = hass.data[GROUP_ORDER]\n # Keep track of the group order without iterating\n # every state in the state machine every time\n # we setup a new group\n hass.data[GROUP_ORDER] += 1\n\n group = Group(\n hass,\n name,\n order=order,\n icon=icon,\n user_defined=user_defined,\n entity_ids=entity_ids,\n mode=mode,\n )\n\n group.entity_id = async_generate_entity_id(\n ENTITY_ID_FORMAT, object_id or name, hass=hass\n )\n\n return group", "def test_createGroup(self):\n\t\tself.client.force_authenticate(user=User.objects.get(id=1))\n\t\turl = \"/groups/\"\n\t\tdata = {\n\t\t\t'name' : 'testGroup3',\n\t\t\t'description' : 'This is another test group that just created.',\n\t\t\t'isPublic' : True\n\t\t}\n\t\tresponse = self.client.post(url, data, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\t\tself.assertEqual(response.data[\"id\"], 3)\n\t\tself.assertEqual(response.data[\"name\"], 'testGroup3')", "def create_default_groups():\n from flaskbb.fixtures.groups import fixture\n result = []\n for key, value in fixture.items():\n group = Group(name=key)\n\n for k, v in value.items():\n setattr(group, k, v)\n\n group.save()\n result.append(group)\n return result", "def create_groups(group_qty):\n if group_qty > 8 or not group_qty or group_qty == 0:\n raise Exception('Group quantity must be between 1 and 8.')\n group_list = []\n for x in range(0, group_qty):\n group_list.append(x+1)\n return group_list", "def create_device_group(self, devicegroup, devices=None):\n self._logger.debug(\"Create device-group: %s\" % (devicegroup,))\n if devices is not None:\n self.set_device_group(devicegroup, devices, exclusive=True)\n else:\n self.xapi.set(pandevice.XPATH_DEVICE_GROUPS + \"/entry[@name='%s']\" % (devicegroup,))", "def pgroup(pynodes, world = False, re = \"\", suffix = \"\"):\n # Initiate return variable\n output = []\n # Filter supplied pynodes, if equal to 0 then return false\n if len(pynodes) == 0:\n return output\n # Group created on each object transformation\n if not world:\n for o in pynodes:\n # Name var\n the_name = o.name()\n # Replace object name if any\n if re != \"\":\n the_name = the_name.replace(re, suffix)\n else:\n the_name = the_name + suffix\n # Create group for each specified PyNode\n grp = pm.group(empty = True, name = the_name)\n # Align the pgroup to each PyNode transformation\n transformation.align(grp, o, mode = 'transform')\n # Get object parent\n parent = o.getParent()\n # If the object have parent,\n # Parent the group to object parent\n if parent:\n grp.setParent(parent)\n # Parent the object to pgroup\n o.setParent(grp)\n # Collect group to output\n output.append(grp)\n\n else:\n # Name var\n the_name = pynodes[0].name()\n # Replace object name if any\n if re != \"\":\n the_name = the_name.replace(re, suffix)\n else:\n the_name = the_name + suffix\n # Create single group\n grp = pm.group(empty = True, name = the_name)\n # Collect group to output\n output.append(grp)\n # Parent all specified PyNodes to pgroup\n pm.parent(pynodes, grp)\n\n return output", "def create_target_groups(ctx):\n data = self.create_target_groups()\n ctx.info('Created target groups for the load balancer {}:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)", "def add_node(self, node: base.Node, label: str = None) -> HandleType:\n\n if self._current_group:\n if label and label != self._current_group:\n raise ValueError('The given label does not match the current group: '\n f'{label} vs {self._current_group}.')\n label = self._current_group\n else:\n if not label:\n raise ValueError('Label should not be empty.')\n if label not in self._groups:\n self._groups[label] = [node]\n else:\n self._groups[label].append(node)\n return node.create_handle()", "def _node_groups(self, node, log=None):\n hostgroups = set(self.settings.MON_ZABBIX_HOSTGROUPS_NODE)\n hostgroups.update(node.monitoring_hostgroups)\n\n return self._get_or_create_hostgroups(self._node_kwargs(node), self.settings.MON_ZABBIX_HOSTGROUP_NODE, None,\n hostgroups=hostgroups, log=log)", "def set_up_groups(self):\n groups = []\n groups.append({'groupname': 'th',\n 'grouptitle': 'TH',\n 'path': '/'})\n groups.append({'groupname': 'neutronics',\n 'grouptitle': 'Neutronics',\n 'path': '/'})\n groups.append({'groupname': 'metadata',\n 'grouptitle': 'Simulation Metadata',\n 'path': '/'})\n return groups", "def get_group_nodes(self, nodePair):\n core = self.core\n parent_node = core.get_base(nodePair.get_bundleNode())\n relative_path = list(core.is_member_of(parent_node).keys())[0]\n nodePair.set_bundleGroupNode(core.load_by_path(self.root_node, relative_path))\n\n parent_node = core.get_base(nodePair.get_countryNode())\n relative_path = list(core.is_member_of(parent_node).keys())[0]\n nodePair.set_countryGroupNode(core.load_by_path(self.root_node, relative_path))", "def add_nodes(self, node_name_list):\n nodes = requests.post(self.__url + 'nodes', data=json.dumps(\n node_name_list), headers=HEADERS).json()\n node_dict = {}\n for node in nodes:\n node_dict[node['name']] = node['SUID']\n return node_dict", "def create(*nIds):\n nodes = []\n for nId in nIds:\n nodes.append(Node(nId))\n return nodes", "def create_namespaced_group(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_group\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_group`\")\n\n resource_path = '/oapi/v1/groups'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Group',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def add_move_group_combining_others(self, new_group_name, existing_group_names=None):\n new_group = xml.dom.minidom.Document().createElement('group')\n new_group.setAttribute(\"name\", new_group_name)\n for existing_group_name in existing_group_names:\n new_group.appendChild(xml.dom.minidom.Document().createElement(f'group name=\"{existing_group_name}\"'))\n new_group.writexml(self.new_robot_srdf, indent=\" \", addindent=\" \", newl=\"\\n\")" ]
[ "0.7485883", "0.72396135", "0.71599764", "0.66984046", "0.66795355", "0.65447414", "0.6374939", "0.6371427", "0.63429767", "0.63429767", "0.6336112", "0.6318668", "0.631537", "0.63136333", "0.6290033", "0.6238642", "0.6233603", "0.62333775", "0.616521", "0.61182487", "0.6102469", "0.6092538", "0.6085988", "0.6051518", "0.60488075", "0.5986774", "0.59811246", "0.5972799", "0.5962804", "0.59606504", "0.5956766", "0.59296113", "0.5914792", "0.5898665", "0.58986485", "0.58964425", "0.5880037", "0.58732945", "0.5870162", "0.58540857", "0.58505", "0.58445364", "0.5835782", "0.5830884", "0.5827687", "0.58250254", "0.5820926", "0.5819618", "0.5811116", "0.5810487", "0.5808543", "0.5802188", "0.5790553", "0.57902277", "0.5778338", "0.57760596", "0.5767674", "0.5764979", "0.5754544", "0.57462215", "0.5744429", "0.5732055", "0.5716156", "0.57148945", "0.57053757", "0.5704867", "0.5694523", "0.56891894", "0.5688638", "0.5682427", "0.5680584", "0.5678444", "0.5671964", "0.56656796", "0.5663046", "0.5652962", "0.5645079", "0.56443536", "0.5632936", "0.56325424", "0.5627595", "0.562419", "0.5621108", "0.5615352", "0.5612948", "0.55998707", "0.55949455", "0.5594711", "0.5584334", "0.557836", "0.55724186", "0.5565315", "0.55584246", "0.55573434", "0.55571085", "0.555117", "0.5548716", "0.5547089", "0.5544881", "0.5537315" ]
0.7037025
3
returns a list of node uuids for a given group as, name, pk, uuid or group object
def get_nodes_from_group(group, return_format='uuid'): from aiida.orm import Group from aiida.common.exceptions import NotExistent nodes = [] g_nodes = [] try: group_pk = int(group) except ValueError: group_pk = None group_name = group if group_pk is not None: try: str_group = Group(dbgroup=group_pk) except NotExistent: str_group = None message = ('You have to provide a valid pk for a Group ' 'or a Group name. Reference key: "group".' 'given pk= {} is not a valid group' '(or is your group name integer?)'.format(group_pk)) print(message) elif group_name is not None: try: str_group = Group.get_from_string(group_name) except NotExistent: str_group = None message = ('You have to provide a valid pk for a Group or a Group name.' 'given group name= {} is not a valid group' '(or is your group name integer?)'.format(group_name)) print(message) elif isinstance(group, Group): str_group = group else: str_group = None print('I could not handle given input, either Group, pk, or group name please.') return nodes g_nodes = str_group.nodes for node in g_nodes: if return_format == 'uuid': nodes.append(node.uuid) elif return_format == 'pk': nodes.append(node.pk) return nodes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getGroup(group: int, name=\"\") -> list:\n groups = mongo.db.groups.find({'id':group},{'_id':0})\n userID_list = []\n user_list = []\n for entry in groups:\n if entry[\"id\"] == group:\n userID_list = userID_list + entry[\"members\"]\n if len(userID_list) != 0:\n for entry in userID_list:\n x = fetchUser(userId=entry)\n user_list = user_list + x\n return user_list", "def GetGroupMembers(self, group):\n return []", "def _get_group_uuid(self, nova, name):\n hints = {}\n try:\n groups = nova.server_groups.list()\n for group in groups:\n gname = getattr(group, 'name', '')\n if name == gname:\n hints['group'] = getattr(group, 'id', '')\n except Exception as e:\n LOG.exception(e)\n finally:\n LOG.info(\"%s:%s() %s: %s\", self.__class__.__name__,\n sys._getframe().f_code.co_name, name, hints)\n return hints", "def list_groups():\n return _list_tindyb_unique_values(\"group\", dbpath=__dbpath__)", "def list_group(group):\n\n members = group_members(group)\n ret = {}\n if members:\n for member in members:\n info = get(member)\n if info:\n ret[uid2dn(member)] = info\n return ret", "def group_nodes(self, group, namespace=None):\n source = self._source(namespace)\n return self._list(source, 'map', group)", "def getGroupInfo(groupId):\n url = f\"https://groups.roblox.com/v1/groups/{groupId}\"\n r = requests.get(url)\n j = json.loads(r.text)\n return j", "def _get_nodes_by_instance(self, instance_uuid):\n try:\n node = pecan.request.dbapi.get_node_by_instance(instance_uuid)\n return [node]\n except exception.InstanceNotFound:\n return []", "def _get_node_group(self, node_name):\n\n pass", "def getNodeLVMGroups(self,node):\n data = self.connect('get','nodes/%s/scan/lvm' % (node),None)\n return data", "def get_uuids_in_node(self, node, project_id):\n program, project = project_id.split(\"-\", 1)\n\n try:\n res = self.paginate_query(node, project_id)\n uuids = [x[\"id\"] for x in res[\"data\"][node]]\n except:\n raise Gen3Error(\n \"Failed to get UUIDs in node '\"\n + node\n + \"' of project '\"\n + project_id\n + \"'.\"\n )\n\n return uuids", "def get_uuids(things):\n return [thing.uuid for thing in things]", "def find_with_uuid(self, groupid, objects, section):\n if groupid in self._total[section]:\n # we get the objects by name to avoid memory corruption issues,\n # but we're not checking if the names change!\n return objects[self._total[section][groupid]]\n else:\n for obj in objects:\n obj_uuid = self.get_uuid(obj)\n if obj_uuid:\n #self._total[section][obj_uuid] = obj.name\n if obj_uuid == groupid:\n return obj", "def _find_groups_for_user(email):\n return [g['name'] for g in groups.find({\"users\":email})]", "def test_get_device_group_by_id(self):\n pass", "def group_members(group):\n\n group = ldapi.lookup(ld, 'cn', group, cfg['ldap_groups_base'])\n\n if group and 'uniqueMember' in group:\n r = re.compile('^uid=([^,]*)')\n return map(lambda x: r.match(x).group(1), group['uniqueMember'])\n return []", "def get_groups(self):\n user_node = self.get()\n grouplist = []\n if user_node:\n for rel in graph.match(start_node=user_node, rel_type='in'):\n grouplist.append(Usergroup(id=rel.end_node()['id']))\n return grouplist", "def get_group(tkn: Token = Depends(from_authotization_header_nondyn),):\n assert_has_clearance(tkn.owner, \"sni.read_group\")\n return [\n GetGroupShortOut(group_id=str(grp.pk), group_name=grp.group_name)\n for grp in Group.objects().order_by(\"group_name\")\n ]", "def find_group(self,id):\n result = []\n cursor = self._cnx.cursor()\n command = \"SELECT group_id FROM teilnahmen WHERE id={}\".format(id)\n cursor.execute(command)\n tuples = cursor.fetchall()\n\n for (group) in tuples:\n teilnahme = Teilnahme()\n teilnahme.set_le()\n result.append(teilnahme)\n\n self._cnx.commit()\n cursor.close()\n return result", "def _ListGroupDevices(self, group):\n for run_target in six.itervalues(group.run_targets):\n for d in six.itervalues(run_target.devices):\n yield d", "def getTGTasksName(all_taskgroup_objects, tg_name):\n tg_id = all_taskgroup_objects.id\n tg_task_obj = TaskgroupTask.objects.filter(\n taskgroup_id=tg_id\n )\n tg_task = TaskgroupTaskSerializers(\n tg_task_obj,\n many=True\n )\n tasks_list = []\n tg_list = []\n try:\n for i in range(len(tg_task.data)):\n tasks = dict(tg_task.data[i].items())\n print(\"#############\", tasks)\n task_obj = Tafv2Task.objects.get(id=tasks['task_id'])\n tasks_list.append({\n \"task_name\": task_obj.script,\n \"task_id\": task_obj.id\n })\n tg_list.append({\n \"tg_name\": tg_name,\n \"tg_id\": tg_id\n })\n\n context = {'tgList': tg_list, 'tasksList': tasks_list}\n print(\"$$$$$$$$$$$$$$\", context)\n return context\n except Exception as e:\n print(e)", "def get_group_names(self):\r\n return self.groups.keys()", "def get_group_users(groupname):\n return jsonify(admin.get_group_users(current_app.scoped_session(), groupname))", "def _groupNamesToList(settings):\n return [getattr(GroupName, val) for val in settings.dhGroups]", "def find_with_uuid(self, groupid, objects, section):\n if groupid in self._total[section]:\n # we get the objects by name to avoid memory corruption issues,\n # but we're not checking if the names change!\n return self.wrap_object(objects[self._total[section][groupid]],\n section)\n else:\n for obj in objects:\n obj_uuid = self.get_uuid(obj)\n if obj_uuid:\n self._total[section][obj_uuid] = obj.name\n if obj_uuid == groupid:\n return self.wrap_object(obj, section)", "def groups(self):\n #return self.get('{}/groups'.format(ApiVersion.A1.value))\n return self.get('{}/groups'.format(ApiVersion.CM1.value))", "def _get_nodes(self):\n viewpoint = \"shiva_{}\".format(cherrypy.session[\"id\"])\n messages_db = self.mongo[viewpoint][\"messages\"]\n people_db = self.mongo[viewpoint][\"people\"]\n #\n senders = messages_db.distinct(\"sender\")\n owner_id = cherrypy.session[\"id\"]\n nodes = list()\n for sender in senders:\n person = people_db.find_one({\"id\": sender})\n if person is None:\n name = \"id{}\".format(sender)\n else:\n name = person[\"display_name\"]\n records = list(messages_db.aggregate([{\n \"$match\": {\n \"$or\": [\n {\"sender\": owner_id, \"receiver\": sender},\n {\"sender\": sender, \"receiver\": owner_id}\n ]\n }\n }, {\"$group\": {\"_id\": None, \"count\": {\"$sum\": 1}}}]))\n if not records:\n records = 0\n else:\n records = records[0][\"count\"]\n info = \"Total records: {}\".format(records)\n history_link = \"/vk/read?id={}\".format(sender)\n statistics_link = \"#\"\n if records > 0:\n nodes.append({\n \"id\": sender,\n \"name\": name,\n \"info\": info,\n \"records\": records,\n \"history_link\": history_link,\n \"statistics_link\": statistics_link\n })\n #\n return nodes", "def construct_groups_string(nodes):\n groups = get_groups(nodes)\n if len(groups) <= 1:\n return \"\"\n else:\n result = []\n for color in groups:\n # +1 because .tsp nodes are indexed with 1\n group = [node.nid + 1 for node in nodes if node.color == color]\n result.append(group)\n return str(result)", "def get_groups(nodes):\n return list(set([node.color for node in nodes]))", "def keys(self):\n list_all_dict = self.list_all()\n return list_all_dict[\"nodes\"] + list_all_dict[\"groups\"]", "def group(self, group_cn):\n group = self.search(base=GROUPS, cn=group_cn)\n\n if len(group) == 0:\n return []\n else:\n group_members = group[0]['attributes']['member']\n\n members = []\n for member in group_members:\n members.append(self.search(dn=member))\n\n if self.objects:\n return self.member_objects(members)\n\n return members", "def list_links(self, node, dd):\n link_list = dd[node]['links']\n link_names = []\n for link in link_list:\n if \"subgroup\" in link:\n sublinks = list(link[\"subgroup\"])\n for sublink in sublinks:\n link_names.append(sublink[\"name\"])\n else:\n link_names.append(link[\"name\"])\n return link_names", "def node_group_spec(self):\n return {\n 'name': 'nodes',\n 'source': 'tree', \n 'transform': [\n {\n 'type': 'filter',\n 'expr': 'datum.type == \"node\"'\n }\n ]\n }", "def node_groups(self, node, namespace=None):\n source = self._source(namespace)\n return self._list(source, 'reverse', node)", "def get_assets(id_group):\n data = sql.list_assets(id_group)\n names = [(d['id'], d['name']) for d in data]\n return names", "def customer_group_get_all():\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n query = \"\"\"\n SELECT \n `group_id`, \n `group_name`, \n `description`, \n `timestamp`, \n `created_by`, \n `creation_time`, \n `is_deleted`, \n `updated_by`, \n `role_id`, \n `is_default`, \n `is_customer` \n FROM `groups` \n WHERE `is_customer` = 1\n \"\"\"\n user_group_details = None\n cursor = db.cursor()\n if cursor.execute(query) != 0:\n user_group_details = cursor.fetchall()\n cursor.close()\n db.close()\n return user_group_details", "def test_groups_group_id_get(self):\n pass", "def get_group_list(self) -> Sequence[str]:\n return [group.Name.lower() for group in self.LedGroups]", "def get_group_members(self, group):\n members = []\n result = self.search('ou=groups,dc=mozilla',\n filterstr='cn=%s' % (group))\n if result == False:\n raise self.SearchError\n elif result == []:\n return []\n for group in result[1]:\n members = list(set(members) | set(group[1]['memberUid']))\n return members", "def find_uuids_linked_to_item(cls, rid):\n ignored(rid)\n return []", "def get_group_nodes(self, nodePair):\n core = self.core\n parent_node = core.get_base(nodePair.get_bundleNode())\n relative_path = list(core.is_member_of(parent_node).keys())[0]\n nodePair.set_bundleGroupNode(core.load_by_path(self.root_node, relative_path))\n\n parent_node = core.get_base(nodePair.get_countryNode())\n relative_path = list(core.is_member_of(parent_node).keys())[0]\n nodePair.set_countryGroupNode(core.load_by_path(self.root_node, relative_path))", "def test_get_device_group_by_id1(self):\n pass", "def group_get_members(self,groupname):\n\n if not self.check_prereqs():\n raise StopIteration\n\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n query=self.create_query(self.sql_group_get_members_query+\" ORDER BY $username_field$\",{'groupname':groupname,'username_field':self.sql_username_field,'groupname_field':self.sql_groupname_field})\n self.log.debug(\"sqlflexibleauthstore: group_get_members: %s\" % (query,))\n\n cursor.execute(query)\n desc=[i[0] for i in cursor.description]\n for row in cursor:\n dictrow=dict(zip(desc,row))\n yield dictrow[self.sql_username_field]", "def get_groups(id_project):\n data = sql.list_groups(id_project)\n names = [(d['id'], d['name']) for d in data]\n return names", "def get_nested_groups(self, conn, group: str) -> typing.List[str]:\n nested_groups = list()\n conn.search(\n search_base=self.group_search_base,\n search_filter=self.group_search_filter.format(group=group),\n search_scope=ldap3.SUBTREE)\n if conn.response:\n for nested_group in conn.response:\n if 'dn' in nested_group:\n nested_groups.extend([nested_group['dn']])\n groups = self.get_nested_groups(conn, nested_group['dn'])\n nested_groups.extend(groups)\n nested_groups = list(set(nested_groups))\n return nested_groups", "def list_group():\n data, code, message = FIELD_SERVICE.list_group()\n return __result(data, code, message)", "def list_group(self, groupname):\n return self.get_admin(\"groups/{}\".format(groupname))", "def grep_groups(self, response):\n soup = BeautifulSoup(response.body)\n for group_link in soup.table.find_all('a', href=True):\n group_item = response.meta['group_item']\n group_item['full_name'] = group_link.text\n group_item['id'] = group_link['href'][20:]\n yield group_item", "def get_users_in_group(self, group_id):\n members = self.vk.groups.getMembers(group_id=group_id, count=1)\n peoples = members['count']\n ids = []\n while len(ids) < peoples:\n members = self.vk.groups.getMembers(group_id=group_id, offset=len(ids))\n ids += members['items']\n\n return ids", "def group_id_to_name(id_list, group_name):\n id_name_dict = {}\n id_string = [str(x) for x in id_list]\n id_string = \",\".join(id_string)\n url = \"https://www.boardgamegeek.com/xmlapi/boardgame\"\n if group_name != \"game\":\n url = url + group_name\n url += \"/{}\".format(id_string)\n print(url)\n resp = requests.get(url)\n tree = ET.fromstring(resp.content)\n for idx, val in enumerate(list(tree)):\n name = val.find(\"name\").text\n id = id_list[idx]\n id_name_dict[id] = name\n return id_name_dict", "def group_handling(existing_uuids: Set[str]) -> None:", "def product_group_list(obj):\n client = get_client(obj)\n\n res = client.product_group_list()\n\n print(json.dumps(res, indent=4))", "def test_get_device_groups(self):\n pass", "def get_group_info(groupname):\n return jsonify(admin.get_group_info(current_app.scoped_session(), groupname))", "def get_nodes_for_process(self, uuid, clean=True):\n if clean:\n uuid = Process.strip_uuid(uuid)\n return self._get_tree_queryset().filter(process__uuid_full__startswith=uuid)", "def list_groups(args):\n\n for group in get_groups(args):\n print(group)", "def info_materials_groups_get():\n session = info_map.Session()\n\n mat = aliased(info_map.Material)\n grp = aliased(info_map.Group)\n\n q = session.query(mat.group_id,grp.name).join(grp).distinct()\n groups = [Group(group=row.group_id,name=row.name) for row in q.all()]\n return groups, 200", "def groups(self):\n return []", "def get(self):\n usergroup_node = graph.find_one(\"Usergroup\",\n property_key='id',\n property_value=self.id)\n return usergroup_node", "def product_group_get(obj, name):\n client = get_client(obj)\n\n pgs = client.product_group_list(name)\n if not pgs:\n fatal_error('Product group {} does not exist'.format(name))\n\n print(json.dumps(pgs[0], indent=4))", "def customer_group_get_related(group_id):\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n\n query = \"\"\"\n SELECT \n `group_id`,\n `group_name`,\n `description`,\n `timestamp`,\n `created_by`,\n `creation_time`,\n `is_deleted`,\n `updated_by`,\n `role_id`,\n `is_default`,\n `is_customer`,\n `company_name`,\n `company_address`,\n `company_telephone`,\n `company_fax`,\n `company_website`,\n `company_sales_contact`,\n `company_purchase_contact`,\n `company_business`,\n `company_business_type`,\n `company_sales_email`,\n `company_purchase_email`,\n `company_reg_number`,\n `company_vat_number` \n FROM `groups` \n WHERE `groups`.`company_name` = (\n SELECT `asshole`.`company_name` \n FROM \n (\n SELECT * \n FROM `groups` \n WHERE `group_id` = \"%s\"\n ) AS `asshole`\n )\n \"\"\" %(group_id)\n \n group_details = None\n cursor = db.cursor()\n\n if cursor.execute(query) != 0:\n group_details = cursor.fetchall()\n\n cursor.close()\n db.close()\n\n return group_details", "def get_all_children_id_list_from_redis_by_pk(gmac_id):\n try:\n gmac = GoogleMapsAddressComponent.objects.get(pk=gmac_id)\n conn = get_redis_connection()\n key = GoogleMapsAddressComponent.get_redis_all_children_key(gmac_id)\n length = conn.llen(key)\n return conn.lrange(key, 0, length)\n except GoogleMapsAddressComponent.DoesNotExist:\n return None", "def locate_group_users(self, group):\n return self.ldap_connection.search_s(\"ou=Groups,dc=redhat,dc=com\",\n ldap.SCOPE_SUBTREE, 'cn={0}'.format(group))", "def get_nodes(self, type, query_args={}):\n endpoint = '/v3/educator/%ss' % (Node.TYPE_MAP[type])\n result = self.request(endpoint, query_args)\n\n nodes = []\n for data in result.response:\n node = Node.instance(type, data)\n nodes.append(node)\n\n return nodes", "def test_get_group(self):\n pass", "def getGroupMembers(group_id):\r\n return Group.getGroupMembers(group_id)", "def group_ids(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"group_ids\")", "def get_group_name(\n group_id: BSONObjectId,\n tkn: Token = Depends(from_authotization_header_nondyn),\n):\n assert_has_clearance(tkn.owner, \"sni.read_group\")\n return GetGroupOut.from_record(Group.objects(pk=group_id).get())", "def get_pingroups(self):\n return self.groups[:]", "def get_members(self):\r\n database = main.connect_to_cloudsql()\r\n cursor = database.cursor()\r\n query = (\"SELECT username from \" + ENV_DB + \".Groups WHERE gid='{}'\").format(self.g_id)\r\n cursor.execute(query)\r\n data = cursor.fetchall()\r\n database.close()\r\n return list(i[0] for i in data)", "def get_children_from_redis(gmac_id, as_objects=True):\n conn = get_redis_connection()\n klass = GoogleMapsAddressComponent\n results = []\n queue = []\n children = klass.get_children_id_list_from_redis_by_pk(gmac_id)\n results.extend(children)\n queue.extend(children)\n while len(queue) > 0:\n node = queue.pop()\n children = klass.get_children_id_list_from_redis_by_pk(node)\n results.extend(children)\n queue.extend(children)\n if as_objects:\n results = klass.objects.filter(pk__in=results)\n return results", "def get_all_group_ids(token) -> list:\n ids=list()\n _dict = perform_request(app_config.ENDPOINT, token)\n while True:\n for obj in _dict[\"value\"]:\n ids.append(obj[\"id\"])\n if \"@odata.nextLink\" not in _dict:\n return ids\n _dict = perform_request(_dict[\"@odata.nextLink\"], token)", "def all_childnodes_to_nifti1img(h5group):\n child_nodes = []\n def append_parent_if_dataset(name, obj):\n if isinstance(obj, h5py.Dataset):\n if name.split('/')[-1] == 'data':\n child_nodes.append(obj.parent)\n\n vols = []\n h5group.visititems(append_parent_if_dataset)\n for c in child_nodes:\n vols.append(hdfgroup_to_nifti1image(c))\n\n return vols", "def get_ids(self) -> List[str]:", "def test_get_groups(self):\n pass", "def test_get_groups(self):\n pass", "def get_node(self, uuid, clean=True):\n if clean:\n uuid = ProcessNode.strip_uuid(uuid)\n return self._get_tree_queryset().get(uuid_full__startswith=uuid)", "def getNodeNames(self, includeDisabled=False):", "def test_api_v1_groups_names_get(self):\n pass", "def check_uuid(self, obj, groupid):\n if self.get_uuid(obj) == groupid:\n return True", "def GetNodes(self, bulk=False, reason=None):\n query = []\n _AppendIf(query, bulk, (\"bulk\", 1))\n _AppendReason(query, reason)\n\n nodes = self._SendRequest(HTTP_GET, \"/%s/nodes\" % GANETI_RAPI_VERSION,\n query, None)\n if bulk:\n return nodes\n else:\n return [n[\"id\"] for n in nodes]", "def get_group_list(self):\n return [(item[0], item[1][0]) for item in self.contacts_by_group_list]", "def get_group_list(ip_address, headers):\n group_list = None\n group_url = 'https://%s/api/GroupService/Groups' % ip_address\n response = requests.get(group_url, headers=headers, verify=False)\n if response.status_code == 200:\n group_response = response.json()\n if group_response['@odata.count'] > 0:\n group_list = [x['Id'] for x in group_response['value']]\n else:\n print(\"No groups found at \", ip_address)\n else:\n print(\"No groups found at \", ip_address)\n return group_list", "def groupfinder(user_id, request):\n ret = DBSession.query(User).filter_by(user_id=user_id).all()\n if len(ret) == 0:\n return None\n user = ret[0]\n groups = [x.group_name for x in user.groups]\n return groups", "def get_output_nodes(self):\n \n\n self.buildings = self.dataset.groups['buildings']\n self.building_nodes = self.buildings.groups['nodes']\n\n eta_output_added = getattr(self.building_nodes,'eta_output_added')\n uv_output_added = getattr(self.building_nodes,'uv_output_added')\n\n eta = []\n uv = []\n nodeIds = []\n time = []\n \n if(eta_output_added or uv_output_added ):\n time = self.building_nodes.variables['time'][:].tolist()\n nodeIds = self.building_nodes.variables['id'][:].tolist()\n if eta_output_added: eta = self.building_nodes.variables['eta'][:].tolist()\n if uv_output_added: uv = self.building_nodes.variables['uv'][:].tolist()\n\n \n return nodeIds,eta, uv, time", "def test_get_resource_group_by_moid(self):\n pass", "def groups(self):\n return self.get_data(\"groups\")", "def get_organization_group_details_url(organization_group_uuid):\n\n return '{organization_group_api_path}/{organization_group_uuid}/tree'.format \\\n (organization_group_api_path=ORGANIZATION_GROUP_API_COMMON_PATH,\n organization_group_uuid=organization_group_uuid)", "def get_groups(email):\n query=\"SELECT * FROM groups WHERE createdby='{}'\".format(email)\n cur.execute(query)\n return cur.fetchall()", "def test_get_port_sub_group_list(self):\n pass", "def list():\n index = 0\n while True:\n node = Node.from_index(index)\n if os.path.exists(node.path()):\n click.echo(f'{index}: node_{index}')\n click.echo(run_lncli(node, 'getinfo | jq .identity_pubkey'))\n else:\n break\n index += 1", "def get_node_list(self):\n logger.debug('Retrieving node list')\n self.node_ids = []\n\n # Iterate over interfaces, try to grab gateway ipv4 addr\n # Try to /ping gateway over TCP using default port.. if we get a pong, we may get a node ID\n gateways = netifaces.gateways()\n gateways = gateways.get(netifaces.AF_INET, [])\n\n for gateway in gateways:\n node_id = gateway[0]\n node = self.select_node(node_id)\n info = node.get_info()\n\n if info and info.get('node'):\n logger.debug('Found node with ID \"%s\"', node_id)\n self.node_ids.append(node_id)\n\n return self.node_ids", "def test_get_device_groups1(self):\n pass", "def by_nodes(item):\n return Line['nodes', item]", "def group_tag(self):\n return ''.join(['[{}]'.format(x.name) for x in self.groups])", "def groups_by_uid(request, uid):\r\n user = User()\r\n users = user.query({\"uid\":str(uid)})\r\n if(len(users) < 1):\r\n return HttpResponse(\"No user found under uid \"+ str(uid))\r\n group = Group()\r\n group = group.query({\"gid\":str(users[0]['gid'])})\r\n if(len(group) < 1):\r\n return HttpResponse(\"No group found under uid \"+ str(uid))\r\n return HttpResponse(json.dumps(group))", "def get_by_name(cls, context, cluster, name):\n db_nodegroup = cls.dbapi.get_nodegroup_by_name(context, cluster, name)\n nodegroup = NodeGroup._from_db_object(cls(context), db_nodegroup)\n return nodegroup", "def extract_names(collection):\n return (\n '[{}]'.format(', '.join(map(repr, groups[n].entity_names)))\n if n in groups else repr(n) for n in collection\n )", "def list_group_members(self, entity):\n\n members = []\n\n for nodePath, node in self.cache.get_tree(self.userProjects).items():\n if nodePath.startswith(entity.path):\n # Check if node is a direct child\n distance = len(pathlib.Path(nodePath).relative_to(pathlib.Path(entity.path)).parts)\n\n if distance == 1:\n if type(node) is gitlab.v4.objects.Group or type(node) is gitlab.v4.objects.Project:\n members.append(node.path)\n elif type(node) is gitlab.v4.objects.User:\n members.append(node.username)\n\n return members", "def get_users(db, group):\n my_users = {\n user_id\n for user_id, in db(\"\"\"\n select distinct\n users.id\n from users, members\n where\n users.id = members.user_id\n and group_id = %s\n \"\"\",\n group.group_id)\n }\n return my_users" ]
[ "0.6142465", "0.5999166", "0.59815145", "0.58869386", "0.5741488", "0.5735394", "0.5697292", "0.5640951", "0.563847", "0.56142646", "0.55772024", "0.5554826", "0.5521682", "0.5469941", "0.5396493", "0.5393709", "0.53740793", "0.5341698", "0.5340419", "0.5333693", "0.53306866", "0.53110385", "0.5308188", "0.53049964", "0.5273193", "0.5263094", "0.5262305", "0.5256786", "0.52505016", "0.52147794", "0.5209649", "0.5186415", "0.5179269", "0.5172982", "0.5170736", "0.5168547", "0.51625335", "0.5149786", "0.5137854", "0.51282305", "0.51119983", "0.5102721", "0.5102273", "0.50959283", "0.5090843", "0.50596756", "0.5058793", "0.5052648", "0.5051217", "0.5047032", "0.5024038", "0.50216055", "0.5015558", "0.5009927", "0.4995764", "0.4986205", "0.49843484", "0.49771452", "0.49667418", "0.49660653", "0.49560857", "0.49452358", "0.49410757", "0.493784", "0.4933313", "0.4930362", "0.49207258", "0.4917301", "0.4908184", "0.49043694", "0.4901802", "0.4900425", "0.4898625", "0.48870236", "0.48811227", "0.48811227", "0.4876212", "0.48760417", "0.48668334", "0.48657748", "0.48587263", "0.48565102", "0.4852899", "0.48517993", "0.48456594", "0.48445433", "0.4844292", "0.48423952", "0.48404455", "0.48388153", "0.48364002", "0.4834897", "0.4827112", "0.48236766", "0.4822737", "0.48225388", "0.4822297", "0.48221913", "0.48186228", "0.48184037" ]
0.73784983
0
Creates an interior node with the given operator (a token), and left and right operands (other nodes).
def __init__(self, opToken, leftOper, rightOper): self.operator = opToken self.leftOperand = leftOper self.rightOperand = rightOper
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __create_internal_node_by_operator(operator: PatternStructure, sliding_window: timedelta, parent: Node = None):\n operator_type = operator.get_top_operator()\n if operator_type == SeqOperator:\n return SeqNode(sliding_window, parent)\n if operator_type == AndOperator:\n return AndNode(sliding_window, parent)\n if operator_type == KleeneClosureOperator:\n return KleeneClosureNode(sliding_window, operator.min_size, operator.max_size, parent)\n raise Exception(\"Unknown or unsupported operator %s\" % (operator_type,))", "def __init__(self, token, left = None, right = None):\n LinkedBinaryTree.__init__(self) # LinkedBinaryTree initialization\n if not isinstance(token, str):\n raise TypeError('Token must be a string')\n self._add_root(token) # use inherited, nonpublic method\n if left is not None:\n if token not in '+-*x/':\n raise ValueError('token must be valid operator')\n self._attach(self.root(), left, right) # use inherited, nonpublic method", "def visit_BinaryOperator(self, node: BinaryOperator) -> Instruction:\n\n left = self.visit(node.left)\n right = self.visit(node.right)\n\n if isinstance(left, VarSymbol):\n left_symbol = self.GLOBAL_MEMORY[left.name]\n else:\n left_symbol = left\n\n if isinstance(right, VarSymbol):\n right_symbol = self.GLOBAL_MEMORY[right.name]\n else:\n right_symbol = right\n\n if node.operator.type == TokenType.PLUS:\n return self.builder.fadd(left_symbol, right_symbol, \"addtmp\")\n elif node.operator.type == TokenType.MINUS:\n return self.builder.fsub(left_symbol, right_symbol, \"subtmp\")\n elif node.operator.type == TokenType.MUL:\n return self.builder.fmul(left_symbol, right_symbol, \"multmp\")\n elif node.operator.type == TokenType.INTEGER_DIV:\n return self.builder.fdiv(left_symbol, right_symbol, \"udivtmp\")\n elif node.operator.type == TokenType.FLOAT_DIV:\n return self.builder.fdiv(left_symbol, right_symbol, \"fdivtmp\")", "def create_operator(statement_a, operator, statement_b):\n return S(statement_a=statement_a, operator=operator, statement_b=statement_b)", "def operator(self):\n col = self.pos\n operators = [\"||\", \"&&\", \">>\", \"<<\", \"!=\", \">=\", \"<=\", \"==\", \"##\"] + \\\n [\"-\", \"+\", \"!\", \"*\", \"/\", \"|\", \"&\", \"^\", \"<\", \">\", \"?\", \":\", \"~\", \"#\", \"=\", \"%\"]\n try:\n index = self.match_any(operators)\n\n op = Operator(self.line, col, self.prev_white, operators[index])\n return op\n except TokenError:\n self.pos = col\n raise TokenError(\"Invalid operator.\")", "def build_expression_tree(token_list: Sequence[tokens.Token]) -> nodes.ExpNode:\r\n\r\n def is_unary_op(op) -> bool:\r\n return op in UNARYOP_TABLE\r\n\r\n def is_open_bracket(token) -> bool:\r\n return isinstance(token, tokens.TokenOpenBracket)\r\n\r\n def is_close_bracket(token) -> bool:\r\n return isinstance(token, tokens.TokenCloseBracket)\r\n\r\n def is_comma(token) -> bool:\r\n return isinstance(token, tokens.TokenSymbol) and token.symbol == Separators.SEP_COMMA\r\n\r\n def is_higher_or_equal_op_priority(op1, op2, table) -> bool:\r\n oi1 = table.get(op1)\r\n oi2 = table.get(op2)\r\n\r\n p1 = 0 if oi1 is None else oi1.priority\r\n p2 = 0 if oi2 is None else oi2.priority\r\n\r\n return p1 >= p2\r\n\r\n def read_exp_chain(index) -> Tuple[nodes.ExpNode, int]:\r\n token = token_list[index]\r\n if isinstance(token, tokens.TokenSymbol):\r\n if is_open_bracket(token):\r\n node, i = read_exp(index)\r\n elif is_unary_op(token.symbol):\r\n if UNARYOP_TABLE[token.symbol].affix == OperatorAffix.PREFIX:\r\n node, i = read_prefix_unary_exp(index)\r\n else:\r\n raise ParsingException(f\"unary operator '{token.symbol}' is not a prefix operator\", token.pos)\r\n else:\r\n raise ParsingException(f\"unexpected symbol '{token.symbol}'\", token.pos)\r\n else:\r\n node, i = read_exp(index)\r\n\r\n if i < len(token_list):\r\n # look ahead for 1 token\r\n next_token = token_list[i]\r\n if isinstance(next_token, tokens.TokenSymbol) and is_unary_op(next_token.symbol):\r\n if UNARYOP_TABLE[next_token.symbol].affix == OperatorAffix.POSTFIX:\r\n node, i = read_postfix_unary_exp(i, node)\r\n else:\r\n return (node, i)\r\n\r\n if i < len(token_list):\r\n # look ahead for 1 token\r\n next_token = token_list[i]\r\n if is_close_bracket(next_token):\r\n return (node, i)\r\n elif isinstance(next_token, tokens.TokenSymbol):\r\n if next_token.symbol == Separators.SEP_COMMA:\r\n return (node, i)\r\n elif next_token.symbol in BINOP_TABLE:\r\n return read_binary_exp(i, node)\r\n else:\r\n raise ParsingException(f\"unexpected symbol '{next_token.symbol}'\", next_token.pos)\r\n else:\r\n raise ParsingException(\"unexpected token\", next_token.pos)\r\n else:\r\n return (node, i)\r\n\r\n def read_exp(index) -> Tuple[nodes.ExpNode, int]:\r\n if index >= len(token_list):\r\n raise ParsingException(\"unexpected token\", token_list[-1].pos)\r\n\r\n token = token_list[index]\r\n if is_open_bracket(token):\r\n return read_bracket_exp(index)\r\n elif isinstance(token, tokens.TokenNumber):\r\n return (nodes.NumberNode(token.num, pos=token.pos), index + 1)\r\n elif isinstance(token, tokens.TokenName):\r\n if (index + 1) < len(token_list) and is_open_bracket(token_list[index + 1]):\r\n return read_func_call(index)\r\n else:\r\n return (nodes.NameConstantNode(token.name, pos=token.pos), index + 1)\r\n elif isinstance(token, tokens.TokenSymbol):\r\n raise ParsingException(f\"unexpected symbol '{token.symbol}'\", token.pos)\r\n else:\r\n raise ParsingException(\"unexpceted token\", token.pos)\r\n\r\n def read_bracket_exp(index) -> Tuple[nodes.ExpNode, int]:\r\n node, i = read_exp_chain(index + 1)\r\n\r\n if i < len(token_list) and is_close_bracket(token_list[i]):\r\n return (node, i + 1)\r\n else:\r\n raise ParsingException(\"unmatch '('\", token_list[index].pos)\r\n\r\n def read_prefix_unary_exp(index) -> Tuple[nodes.UnaryOpNode, int]:\r\n node, i = read_exp(index + 1)\r\n token = token_list[index]\r\n return (nodes.UnaryOpNode(token.symbol, node, pos=token.pos), i)\r\n\r\n def read_postfix_unary_exp(index, child: nodes.ExpNode) -> Tuple[nodes.UnaryOpNode, int]:\r\n token = token_list[index]\r\n\r\n if isinstance(child, nodes.UnaryOpNode):\r\n if is_higher_or_equal_op_priority(token.symbol, child.op, UNARYOP_TABLE):\r\n node = nodes.UnaryOpNode(token.symbol, child.child, pos=token.pos)\r\n child.child = node\r\n node = child\r\n else:\r\n node = nodes.UnaryOpNode(token.symbol, child, pos=token.pos)\r\n else:\r\n node = nodes.UnaryOpNode(token.symbol, child, pos=token.pos)\r\n\r\n return (node, index + 1)\r\n\r\n def read_binary_exp(index, left: nodes.ExpNode) -> Tuple[nodes.BinaryOpNode, int]:\r\n right, i = read_exp_chain(index + 1)\r\n\r\n token = token_list[index]\r\n if isinstance(right, nodes.BinaryOpNode) and not is_open_bracket(token_list[index + 1]):\r\n # check operator priority and rotate the expression tree when necessary.\r\n # when priority of two operators are equal, we also should rotate the tree\r\n # in case these operators don't follow the commutative law.\r\n if is_higher_or_equal_op_priority(token.symbol, right.op, BINOP_TABLE):\r\n node = nodes.BinaryOpNode(token.symbol, left, right.left, pos=token.pos)\r\n right.left = node\r\n node = right\r\n else:\r\n node = nodes.BinaryOpNode(token.symbol, left, right, pos=token.pos)\r\n else:\r\n node = nodes.BinaryOpNode(token.symbol, left, right, pos=token.pos)\r\n\r\n return (node, i)\r\n\r\n def read_func_call(index) -> Tuple[nodes.FuncCallNode, int]:\r\n name_token = token_list[index]\r\n index += 2 # skip '('\r\n\r\n token_count = len(token_list)\r\n\r\n node = None\r\n i = index\r\n args = []\r\n\r\n while i < token_count and not is_close_bracket(token_list[i]):\r\n node, i = read_exp_chain(i)\r\n args.append(node)\r\n if i < token_count and is_comma(token_list[i]):\r\n i += 1\r\n else:\r\n break\r\n\r\n if i < token_count and is_close_bracket(token_list[i]):\r\n func_node = nodes.FuncCallNode(name_token.name, args, pos=name_token.pos)\r\n return (func_node, i + 1)\r\n else:\r\n raise ParsingException(\"unclose func call\", name_token.pos)\r\n\r\n\r\n node, i = read_exp_chain(0)\r\n\r\n if i < len(token_list):\r\n last_token = token_list[i]\r\n if is_close_bracket(last_token):\r\n raise ParsingException(\"unmatch ')'\", last_token.pos)\r\n else:\r\n raise ParsingException(\"unexpected token\", last_token.pos)\r\n else:\r\n return node", "def __init__(self, operation, left, right):\n self.operation = operation\n self.left = left\n self.right = right", "def addExpr( ):\n\n\ttok = tokens.peek( )\n\tif debug: print (\"addExpr: \", tok)\n\tleft = term( )\n\ttok = tokens.peek( )\n\twhile tok == \"+\" or tok == \"-\":\n\t\ttokens.next()\n\t\tright = term( )\n\t\tleft = BinaryExpr( tok, left, right )\n\t\ttok = tokens.peek( )\n\treturn left", "def build_expression_tree(tokens):\n S = [] # we use Python list as stack\n for t in tokens:\n if t in '+-*/': # t is an operator symbol\n S.append(t) # push the operator symbol\n elif t not in '()': # consider t to be a literal\n S.append(ExpressionTree(t)) # push trivial tree storing value\n elif t == ')': # compose a new tree from three constituent parts\n right = S.pop() # right subtree as per LIFO\n op = S.pop() # operator symbol\n left = S.pop() # left subtree\n S.append(ExpressionTree(op, left, right)) # repush tree\n # we ignore a left parenthesis\n return S.pop()", "def build_expression_tree(tokens):\n S = [] # we use Python list as stack\n for t in tokens:\n if t in '+-x*/': # t is an operator symbol\n S.append(t) # push the operator symbol\n elif t not in '()': # consider t to be a literal\n S.append(ExpressionTree(t)) # push trivial tree storing value\n elif t == ')': # compose a new tree from three constituent parts\n right = S.pop() # right subtree as per LIFO\n op = S.pop() # operator symbol\n left = S.pop() # left subtree\n S.append(ExpressionTree(op, left, right)) # repush tree\n # we ignore a left parenthesis\n return S.pop()", "def __init__(self, operation, operand):\n self.operation = operation\n self.right = operand", "def binary_operator(op):\n # When combining a Factor with a NumericalExpression, we use this\n # attrgetter instance to defer to the commuted implementation of the\n # NumericalExpression operator.\n commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))\n\n def binary_operator(self, other):\n # This can't be hoisted up a scope because the types returned by\n # binop_return_type aren't defined when the top-level function is\n # invoked in the class body of Factor.\n return_type = binop_return_type(op)\n if isinstance(self, NumExprFactor):\n self_expr, other_expr, new_inputs = self.build_binary_op(\n op, other,\n )\n return return_type(\n \"({left}) {op} ({right})\".format(\n left=self_expr,\n op=op,\n right=other_expr,\n ),\n new_inputs,\n )\n elif isinstance(other, NumExprFactor):\n # NumericalExpression overrides ops to correctly handle merging of\n # inputs. Look up and call the appropriate reflected operator with\n # ourself as the input.\n return commuted_method_getter(other)(self)\n elif isinstance(other, Factor):\n if self is other:\n return return_type(\n \"x_0 {op} x_0\".format(op=op),\n (self,),\n )\n return return_type(\n \"x_0 {op} x_1\".format(op=op),\n (self, other),\n )\n elif isinstance(other, Number):\n return return_type(\n \"x_0 {op} ({constant})\".format(op=op, constant=other),\n binds=(self,),\n )\n raise BadBinaryOperator(op, self, other)\n\n binary_operator.__doc__ = \"Binary Operator: '%s'\" % op\n return binary_operator", "def __create_nested_structure(nested_operator: PatternStructure):\n order = list(range(len(nested_operator.args))) if isinstance(nested_operator, CompositeStructure) else [0]\n operator_type = None\n if isinstance(nested_operator, AndOperator):\n operator_type = OperatorTypes.AND\n elif isinstance(nested_operator, SeqOperator):\n operator_type = OperatorTypes.SEQ\n ret = TreePlanLeafNode(order[0])\n for i in range(1, len(order)):\n ret = TreePlanBinaryNode(operator_type, ret, TreePlanLeafNode(order[i]))\n return ret", "def expression( ):#DOUBLE CHECK THIS\n\t\n\ttok = tokens.peek( )\n\tif debug: print(\"Expression: \", tok)\n\tleft = andExpr( ) #does the left side of the grammar \n\ttok = tokens.peek( )\n\twhile tok == \"or\": #checks to see if there is the token or and will preform what is inside the curly bracket since it is a series \n\t\ttokens.next()\n\t\tright = andExpr( )\n\t\tleft = BinaryExpr(tok, left, right) # MIGHT HAVE TO CHANGE THIS TO STRING CAUSE ITS \"or\"\n\t\ttok = tokens.peek( )\n\treturn left", "def create_basic_op_node(op_name, node, kwargs):\n name, input_nodes, _ = get_inputs(node, kwargs)\n\n node = onnx.helper.make_node(\n op_name,\n input_nodes,\n [name],\n name=name\n )\n return [node]", "def expression_tree(postfix:str) -> Node:\n stack = deque()\n for ch in postfix:\n if ch not in {'+', '-', '*', '/', '^'}:\n stack.append(Node(ch))\n else:\n middle_node = Node(ch)\n right_node = stack.pop()\n left_node = stack.pop()\n middle_node ._right = right_node\n middle_node._left = left_node\n stack.append(middle_node)\n return stack.pop()", "def operation(self, other=None, operator=None):\n terms = [self]\n if other is not None and operator is not EmptyQuery:\n terms.append(other)\n return Operation(terms, operator=operator)", "def _append_operator(self, operator):", "def make_binary(sv, piece, o, op):\r\n here=piece.rfind(op) # look for last occurrence\r\n there=here+len(op)\r\n t1=piece[:here].strip(Space) # first term (sometimes omitted)\r\n t2=piece[there:].strip(Space) # second term must be present\r\n if not t2: \r\n print(\"\\n\", Err_op_syntax, o) # *** Syntax error in operator ***\r\n print(\" \", piece)\r\n raise ReferenceError\r\n first=tree_build(sv, t1) # process each term RECURSIVE\r\n second=tree_build(sv, t2)\r\n return (o, first, second)", "def gen_binop(self, expr: expressions.BinaryOperator):\n if expr.op in [\"*\", \"/\", \"%\", \"^\", \"|\", \"&\", \">>\", \"<<\"]:\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n op = expr.op\n\n ir_typ = self.get_ir_type(expr.typ)\n value = self.builder.emit_binop(lhs, op, rhs, ir_typ)\n elif expr.op == \",\":\n # Handle the comma operator by returning the second result\n self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n value = rhs\n elif expr.op == \"+\":\n # Pay attention to pointer arithmetics!\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n\n # left and right are swapped in semantics if right is pointer.\n if expr.a.typ.is_pointer:\n assert expr.b.typ.is_integer\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n rhs = self.builder.emit_cast(rhs, ir.ptr)\n\n ir_typ = self.get_ir_type(expr.typ)\n value = self.builder.emit_binop(lhs, \"+\", rhs, ir_typ)\n elif expr.op == \"-\":\n # Pay attention to pointer arithmetics!\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n ir_typ = self.get_ir_type(expr.typ)\n if expr.a.typ.is_pointer:\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if expr.b.typ.is_pointer:\n # pointer - pointer\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir.ptr)\n value = self.emit(ir.Cast(value, \"typecast\", ir_typ))\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", ir_typ))\n value = self.emit(\n ir.Binop(value, \"/\", esize, \"rhs\", ir_typ)\n )\n else:\n # pointer - numeric\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n rhs = self.builder.emit_cast(rhs, ir.ptr)\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir_typ)\n else:\n # numeric - numeric\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir_typ)\n\n elif expr.op in [\"<\", \">\", \"==\", \"!=\", \"<=\", \">=\", \"||\", \"&&\"]:\n value = self.gen_condition_to_integer(expr)\n elif expr.op in [\n \"=\",\n \"+=\",\n \"-=\",\n \"*=\",\n \"%=\",\n \"/=\",\n \">>=\",\n \"<<=\",\n \"&=\",\n \"|=\",\n \"~=\",\n \"^=\",\n ]:\n # Handle struct assignment special case:\n if expr.op == \"=\" and expr.a.typ.is_struct:\n lhs = self.gen_expr(expr.a, rvalue=False)\n rhs = self.gen_expr(expr.b, rvalue=False)\n amount = self.sizeof(expr.a.typ)\n self.gen_copy_struct(lhs, rhs, amount)\n value = None\n else:\n lhs = self.gen_expr(expr.a, rvalue=False)\n rhs = self.gen_expr(expr.b, rvalue=True)\n\n if expr.op == \"=\":\n value = rhs\n else:\n # Handle '+=' and friends:\n op = expr.op[:-1]\n ir_typ = self.get_ir_type(expr.typ)\n loaded = self._load_value(lhs, expr.typ)\n\n # pointer arithmatic:\n if op in [\"+\", \"-\"] and expr.a.typ.is_pointer:\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n\n value = self.builder.emit_binop(loaded, op, rhs, ir_typ)\n self._store_value(value, lhs)\n else: # pragma: no cover\n raise NotImplementedError(str(expr.op))\n return value", "def __call__(self):\r\n new_node = Node()\r\n new_node.op = self\r\n return new_node", "def _reduce_expr(tree, tok):\n second = tree.pop()\n if len(tree) > 0 and not Parser._is_unary_op(tok):\n first = tree.pop()\n expr = BinaryExpression(first, tok, second)\n else:\n expr = UnaryExpression(second, tok)\n tree.append(expr)", "def _(self, node: BinaryOp):\n left = self.visit(node.left)\n right = self.visit(node.right)\n\n return f\"( {node.op} {left} {right} )\"", "def __call__(self):\n new_node = Node()\n new_node.op = self\n return new_node", "def reflected_binary_operator(op):\n assert not is_comparison(op)\n\n def reflected_binary_operator(self, other):\n\n if isinstance(self, NumericalExpression):\n self_expr, other_expr, new_inputs = self.build_binary_op(\n op, other\n )\n return NumExprFactor(\n \"({left}) {op} ({right})\".format(\n left=other_expr,\n right=self_expr,\n op=op,\n ),\n new_inputs,\n )\n\n # Only have to handle the numeric case because in all other valid cases\n # the corresponding left-binding method will be called.\n elif isinstance(other, Number):\n return NumExprFactor(\n \"{constant} {op} x_0\".format(op=op, constant=other),\n binds=(self,),\n )\n raise BadBinaryOperator(op, other, self)\n return reflected_binary_operator", "def operator_constructor(loader, node):\n global workspace\n obj = loader.construct_mapping(node, deep=True)\n obj = resolve_pointer( workspace, obj )\n operation, arg = yaml_to_args( obj )[0]\n return getattr( operator, operation )( *arg )", "def _lex_operators(self):\n try:\n val = self._current\n type = Lexer._OPERATORS[self._current]\n self._advance()\n return Token(val, type)\n except KeyError:\n raise ParserError(self._expr,\n \"Encountered invalid token '{t}' at {i}\".format(\n t=self._current, i=self._index))", "def match_expr(self, precedence: int) -> \"AbstractNode\":\n tkn = self.lexer.tkn\n # This line is solely to satisfy mypy.\n left = AbstractNode()\n if tkn.type == Token.AT:\n self.lexer.next_token()\n address = self.match_expr(PREC_PREFIX)\n left = MemoryNode(address)\n elif tkn.type == Token.INT:\n try:\n left = IntNode(int(tkn.value, base=0))\n except ValueError:\n raise SyntaxError(\"invalid integer literal: {}\".format(tkn))\n else:\n self.lexer.next_token()\n elif tkn.type == Token.MINUS:\n self.lexer.next_token()\n left = PrefixNode(\"-\", self.match_expr(PREC_PREFIX))\n elif tkn.type == Token.REGISTER:\n try:\n left = RegisterNode(register_to_index(tkn.value))\n except HERAError:\n raise SyntaxError(\"{} is not a valid register\".format(tkn.value))\n self.lexer.next_token()\n elif tkn.type == Token.SYMBOL:\n left = SymbolNode(tkn.value)\n self.lexer.next_token()\n elif tkn.type == Token.LPAREN:\n self.lexer.next_token()\n left = self.match_expr(PREC_LOWEST)\n if self.lexer.tkn.type != Token.RPAREN:\n self.unexpected(self.lexer.tkn)\n self.lexer.next_token()\n else:\n self.unexpected(tkn)\n\n infix_tkn = self.lexer.tkn\n while infix_tkn.type in PREC_MAP and precedence < PREC_MAP[infix_tkn.type]:\n infix_precedence = PREC_MAP[infix_tkn.type]\n self.lexer.next_token()\n right = self.match_expr(infix_precedence)\n left = InfixNode(infix_tkn.value, left, right)\n infix_tkn = self.lexer.tkn\n return left", "def Expression(self, paren=False):\n left = self.Conjunction(paren)\n while self.currtok[1].name == \"OR\":\n op = self.currtok[0]\n self.currtok = next(self.tg)\n right = self.Conjunction()\n left = BinaryExpr(op, left, right, paren)\n return left", "def binary_operator(cls, quad):\n\t\tleft_op = cls.get_address_value(quad.left_operand)\n\t\tright_op = cls.get_address_value(quad.right_operand)\n\t\tresult = cls.execute_binary_operator(quad.operator, left_op, right_op)\n\t\tcls.set_address_value(quad.result, result)", "def binary_op(node_factory_function: Callable) -> Callable:\n\n @wraps(node_factory_function)\n def wrapper(left: NodeInput, right: NodeInput, *args: Any, **kwargs: Any) -> Node:\n left, right = as_nodes(left, right)\n node = node_factory_function(left, right, *args, **kwargs)\n node = _set_node_friendly_name(node, **kwargs)\n return node\n\n return wrapper", "def infix(self):\n return \"(\" + self.leftOperand.infix() + \" \" + str(self.operator) + \" \" + self.rightOperand.infix() + \")\"", "def __init__(self, token, left=None, right=None):\n super().__init__() # LinkedBinaryTree initialization\n self._add_root(token) # use inherited, nonpublic method\n if left is not None: # presumably three-parameter form\n self._attach(self.root(), left, right) # use inherited, nonpublic method", "def __init__(self, op, expression1, expression2):\n LinearExpression.__init__(self)\n\n self.op = op\n self.expression1 = expression1\n self.expression2 = expression2", "def make_unary(sv, piece, o, op):\r\n there=len(op) # start position of last part\r\n # if the object is subscripted / has args\r\n if piece[there:].startswith(Special+Bloc): \r\n here=piece[there+1:].find(Special) # find ending delimiter\r\n key=piece[there+1:there+here+1] # extract key for the block\r\n if piece[there+here+2:].strip(Space): # something after the block (some other subscript)\r\n first=(o, tree_build(sv, sv.Blocks[key]), None) # Build block RECURSIVE \r\n last=tree_build(sv, piece[there+here+2:]) # build other subscript RECURSIVE\r\n res=(Special, first, last) # code for a subscripted object\r\n else:\r\n res=(o, tree_build(sv, sv.Blocks[key]), None) # Build block RECURSIVE\r\n return res\r\n # the object is not subscripted but may have parts separated by space\r\n if Space in piece.strip(Space): return (o, tree_build(sv, piece[there:]), None) # Build RECURSIVE\r\n return make_leaf(sv, piece.strip(Space))", "def _process_operator(self, expr, operator, func, *args, **kwargs):\n for elt in self.model.xml_element_children(expr):\n self._process_operator(elt, operator, func, *args, **kwargs)\n if isinstance(expr, mathml_apply) and expr.operator().localName == operator:\n func(expr, *args, **kwargs)", "def terminal_node(\n self,\n expr: Any = None,\n ) -> None:\n self.data.append(\n {\n \"type\": \"TERMINAL\",\n \"expr\": expr,\n \"id\": len(\n self.data,\n ),\n }\n )", "def to_operator(self) -> Operator:\n return Operator(self.to_instruction())", "def create_expr(self, exprcls, ast, params=None, nopush=False):\n if params is None:\n expr = exprcls(self.current_parent, ast=ast)\n else:\n expr = exprcls(self.current_parent, ast=ast, **params)\n if not nopush:\n self.push_state(expr)\n return expr", "def Relation(self, paren=False):\n left = self.Addition(paren)\n if self.currtok[1].name in {\"GREATERTHAN\", \"LESSTHAN\", \"LET\", \"GET\"}:\n op = self.currtok[0]\n self.currtok = next(self.tg)\n right = self.Addition(paren)\n left = BinaryExpr(op, left, right, paren)\n return left", "def Term(self, paren=False):\n left = self.Factor()\n while self.currtok[1].name in {\"TIMES\", \"DIVISION\", \"MOD\"}:\n op = self.currtok[0]\n self.currtok = next(self.tg)\n right = self.Factor()\n left = BinaryExpr(op, left, right, paren)\n return left", "def get_operator_to_make_TOD(self):\n if len(self) == 1:\n return self.get_operator()\n op = self._get_array_of_operators()\n return BlockRowOperator(op, new_axisin=0)", "def test_operator(self):\n\n tokens = list(Lexer(\"+-*/^%\").generate_tokens())\n answer = [Token(TokenType.PLUS),\n Token(TokenType.MINUS),\n Token(TokenType.MULTIPLY),\n Token(TokenType.DIVIDE),\n Token(TokenType.EXPONENT),\n Token(TokenType.MODULO)]\n self.assertEqual(tokens, answer)", "def is_operator(obj):\n return isinstance(obj, Token) and obj[0] not in '/01234567890+-.<[('", "def Addition(self, paren=False):\n left = self.Term(paren)\n while self.currtok[1].name in {\"PLUS\", \"MINUS\"}:\n op = self.currtok[0]\n self.currtok = next(self.tg)\n right = self.Term(paren)\n left = BinaryExpr(op, left, right, paren)\n return left", "def evaluate(node,operators):\n\tif isinstance(node, ast.Num):\n\t\treturn node.n\n\telif isinstance(node, ast.BinOp):\n\t\treturn operators[type(node.op)](evaluate(node.left,operators), evaluate(node.right,operators))\n\telif isinstance(node, ast.UnaryOp):\n\t\treturn operators[type(node.op)](evaluate(node.operand,operators))\n\telse:\n\t\traise TypeError(node)", "def is_operator(node):\n return node.startswith('$')", "def _compile_node(selector):\n # To avoid precedence-related bugs, any sub-expression that is passed\n # around must be \"atomic\": add parentheses when the top-level would be\n # an operator. Bare literals and function calls are fine.\n\n # 1 and 0 are used for True and False to avoid global lookups.\n\n if isinstance(selector, parser.CombinedSelector):\n left_inside = _compile_node(selector.left)\n if left_inside == '0':\n return '0' # 0 and x == 0\n elif left_inside == '1':\n # 1 and x == x, but the element matching 1 still needs to exist.\n if selector.combinator in (' ', '>'):\n left = 'el.parent is not None'\n elif selector.combinator in ('~', '+'):\n left = 'el.previous is not None'\n else:\n raise SelectorError('Unknown combinator', selector.combinator)\n # Rebind the `el` name inside a generator-expressions (in a new scope)\n # so that 'left_inside' applies to different elements.\n elif selector.combinator == ' ':\n left = 'any((%s) for el in el.iter_ancestors())' % left_inside\n elif selector.combinator == '>':\n left = ('next(el is not None and (%s) for el in [el.parent])'\n % left_inside)\n elif selector.combinator == '+':\n left = ('next(el is not None and (%s) for el in [el.previous])'\n % left_inside)\n elif selector.combinator == '~':\n left = ('any((%s) for el in el.iter_previous_siblings())'\n % left_inside)\n else:\n raise SelectorError('Unknown combinator', selector.combinator)\n\n right = _compile_node(selector.right)\n if right == '0':\n return '0' # 0 and x == 0\n elif right == '1':\n return left # 1 and x == x\n else:\n # Evaluate combinators right to left:\n return '(%s) and (%s)' % (right, left)\n\n elif isinstance(selector, parser.CompoundSelector):\n sub_expressions = [\n expr for expr in map(_compile_node, selector.simple_selectors)\n if expr != '1']\n if len(sub_expressions) == 1:\n test = sub_expressions[0]\n elif '0' in sub_expressions:\n test = '0'\n elif sub_expressions:\n test = ' and '.join('(%s)' % e for e in sub_expressions)\n else:\n test = '1' # all([]) == True\n\n if isinstance(selector, parser.NegationSelector):\n if test == '0':\n return '1'\n elif test == '1':\n return '0'\n else:\n return 'not (%s)' % test\n else:\n return test\n\n elif isinstance(selector, parser.LocalNameSelector):\n return ('el.local_name == (%r if el.in_html_document else %r)'\n % (selector.lower_local_name, selector.local_name))\n\n elif isinstance(selector, parser.NamespaceSelector):\n return 'el.namespace_url == %r' % selector.namespace\n\n elif isinstance(selector, parser.ClassSelector):\n return '%r in el.classes' % selector.class_name\n\n elif isinstance(selector, parser.IDSelector):\n return 'el.id == %r' % selector.ident\n\n elif isinstance(selector, parser.AttributeSelector):\n if selector.namespace is not None:\n if selector.namespace:\n key = '(%r if el.in_html_document else %r)' % (\n '{%s}%s' % (selector.namespace, selector.lower_name),\n '{%s}%s' % (selector.namespace, selector.name),\n )\n else:\n key = ('(%r if el.in_html_document else %r)'\n % (selector.lower_name, selector.name))\n value = selector.value\n if selector.operator is None:\n return '%s in el.etree_element.attrib' % key\n elif selector.operator == '=':\n return 'el.etree_element.get(%s) == %r' % (key, value)\n elif selector.operator == '~=':\n if len(value.split()) != 1 or value.strip() != value:\n return '0'\n else:\n return (\n '%r in split_whitespace(el.etree_element.get(%s, \"\"))'\n % (value, key))\n elif selector.operator == '|=':\n return ('next(v == %r or (v is not None and v.startswith(%r))'\n ' for v in [el.etree_element.get(%s)])'\n % (value, value + '-', key))\n elif selector.operator == '^=':\n if value:\n return 'el.etree_element.get(%s, \"\").startswith(%r)' % (\n key, value)\n else:\n return '0'\n elif selector.operator == '$=':\n if value:\n return 'el.etree_element.get(%s, \"\").endswith(%r)' % (\n key, value)\n else:\n return '0'\n elif selector.operator == '*=':\n if value:\n return '%r in el.etree_element.get(%s, \"\")' % (value, key)\n else:\n return '0'\n else:\n raise SelectorError(\n 'Unknown attribute operator', selector.operator)\n else: # In any namespace\n raise NotImplementedError # TODO\n\n elif isinstance(selector, parser.PseudoClassSelector):\n if selector.name == 'link':\n return ('%s and el.etree_element.get(\"href\") is not None'\n % html_tag_eq('a', 'area', 'link'))\n elif selector.name == 'enabled':\n return (\n '(%s and el.etree_element.get(\"disabled\") is None'\n ' and not el.in_disabled_fieldset) or'\n '(%s and el.etree_element.get(\"disabled\") is None) or '\n '(%s and el.etree_element.get(\"href\") is not None)'\n % (\n html_tag_eq('button', 'input', 'select', 'textarea',\n 'option'),\n html_tag_eq('optgroup', 'menuitem', 'fieldset'),\n html_tag_eq('a', 'area', 'link'),\n )\n )\n elif selector.name == 'disabled':\n return (\n '(%s and (el.etree_element.get(\"disabled\") is not None'\n ' or el.in_disabled_fieldset)) or'\n '(%s and el.etree_element.get(\"disabled\") is not None)' % (\n html_tag_eq('button', 'input', 'select', 'textarea',\n 'option'),\n html_tag_eq('optgroup', 'menuitem', 'fieldset'),\n )\n )\n elif selector.name == 'checked':\n return (\n '(%s and el.etree_element.get(\"checked\") is not None and'\n ' ascii_lower(el.etree_element.get(\"type\", \"\")) '\n ' in (\"checkbox\", \"radio\"))'\n 'or (%s and el.etree_element.get(\"selected\") is not None)'\n % (\n html_tag_eq('input', 'menuitem'),\n html_tag_eq('option'),\n )\n )\n elif selector.name in ('visited', 'hover', 'active', 'focus',\n 'target'):\n # Not applicable in a static context: never match.\n return '0'\n elif selector.name == 'root':\n return 'el.parent is None'\n elif selector.name == 'first-child':\n return 'el.index == 0'\n elif selector.name == 'last-child':\n return 'el.index + 1 == len(el.etree_siblings)'\n elif selector.name == 'first-of-type':\n return ('all(s.tag != el.etree_element.tag'\n ' for s in el.etree_siblings[:el.index])')\n elif selector.name == 'last-of-type':\n return ('all(s.tag != el.etree_element.tag'\n ' for s in el.etree_siblings[el.index + 1:])')\n elif selector.name == 'only-child':\n return 'len(el.etree_siblings) == 1'\n elif selector.name == 'only-of-type':\n return ('all(s.tag != el.etree_element.tag or i == el.index'\n ' for i, s in enumerate(el.etree_siblings))')\n elif selector.name == 'empty':\n return 'not (el.etree_children or el.etree_element.text)'\n else:\n raise SelectorError('Unknown pseudo-class', selector.name)\n\n elif isinstance(selector, parser.FunctionalPseudoClassSelector):\n if selector.name == 'lang':\n tokens = [\n t for t in selector.arguments\n if t.type != 'whitespace'\n ]\n if len(tokens) == 1 and tokens[0].type == 'ident':\n lang = tokens[0].lower_value\n else:\n raise SelectorError('Invalid arguments for :lang()')\n\n return ('el.lang == %r or el.lang.startswith(%r)'\n % (lang, lang + '-'))\n else:\n if selector.name == 'nth-child':\n count = 'el.index'\n elif selector.name == 'nth-last-child':\n count = '(len(el.etree_siblings) - el.index - 1)'\n elif selector.name == 'nth-of-type':\n count = ('sum(1 for s in el.etree_siblings[:el.index]'\n ' if s.tag == el.etree_element.tag)')\n elif selector.name == 'nth-last-of-type':\n count = ('sum(1 for s in el.etree_siblings[el.index + 1:]'\n ' if s.tag == el.etree_element.tag)')\n else:\n raise SelectorError('Unknown pseudo-class', selector.name)\n\n result = parse_nth(selector.arguments)\n if result is None:\n raise SelectorError(\n 'Invalid arguments for :%s()' % selector.name)\n a, b = result\n # x is the number of siblings before/after the element\n # Matches if a positive or zero integer n exists so that:\n # x = a*n + b-1\n # x = a*n + B\n B = b - 1\n if a == 0:\n # x = B\n return '%s == %i' % (count, B)\n else:\n # n = (x - B) / a\n return ('next(r == 0 and n >= 0'\n ' for n, r in [divmod(%s - %i, %i)])'\n % (count, B, a))\n\n else:\n raise TypeError(type(selector), selector)", "def operator_lhs(self, inp):\n assert self.operator is not None, \\\n \"Please set an operator with the set_operation method\"\n\n return self.operator_rhs(self.operator.forward(inp))", "def ifop(stream: t.List[str]) -> AST:\n n, last = expr(stream)\n if not stream[n:] or stream[n] not in (\"=>\", \"<=>\"):\n raise SyntaxError(\"Expected => or <=>.\")\n if not stream[n + 1 :]:\n raise SyntaxError(\"Expected expression.\")\n m, rast = expr(stream[n + 1 :])\n if stream[n + 1 + m :]:\n raise SyntaxError(\"Unexpected character '{}'.\".format(stream[n + 1 + m]))\n return AST(stream[n], [last, rast])", "def create() -> 'Tokenizer':\n token_op_table = [\n EOS,\n op.Concat,\n op.ConstStr,\n op.SubStr,\n op.GetSpan,\n op.Trim,\n ]\n\n # Nesting operators and their args get \"compacted\" into\n # \"primitive\" tokens\n\n for type_ in op.Type:\n for index in op.INDEX:\n token_op_table.append((op.GetToken, type_, index))\n\n for case in op.Case:\n token_op_table.append((op.ToCase, case))\n\n for delim1 in op.DELIMITER:\n for delim2 in op.DELIMITER:\n token_op_table.append((op.Replace, delim1, delim2))\n\n for dsl_regex in list(op.Type) + list(op.DELIMITER):\n token_op_table.append((op.GetUpto, dsl_regex))\n\n for dsl_regex in list(op.Type) + list(op.DELIMITER):\n token_op_table.append((op.GetFrom, dsl_regex))\n\n for type_ in op.Type:\n for index in op.INDEX:\n token_op_table.append((op.GetFirst, type_, index))\n\n for type_ in op.Type:\n token_op_table.append((op.GetAll, type_))\n\n # Primitive types\n\n for type_ in op.Type:\n token_op_table.append(type_)\n\n for boundary in op.Boundary:\n token_op_table.append(boundary)\n\n # Covers op.INDEX\n for position in range(op.POSITION[0], op.POSITION[1]+1):\n token_op_table.append(position)\n\n # This covers op.DELIMITER\n for character in op.CHARACTER:\n token_op_table.append(character)\n\n token_op_table = {\n token: op\n for token, op in enumerate(token_op_table)\n }\n\n op_token_table = {\n op: token\n for token, op in token_op_table.items()\n }\n\n assert len(token_op_table) == len(op_token_table)\n\n string_token_table = {\n char: token\n for token, char in enumerate(op.CHARACTER)\n }\n\n return Tokenizer(\n token_op_table=token_op_table,\n op_token_table=op_token_table,\n string_token_table=string_token_table,\n )", "def __init__(self, left, right):\n super(compositeORGenerator,self).__init__()\n self._left = left\n self._right = right", "def visit_UnaryOperator(self, node: UnaryOperator) -> Constant:\n\n operator = node.operator.type\n if operator == TokenType.PLUS:\n expression = self.visit(node.expression)\n return Constant(DoubleType(), float(+expression.constant))\n elif operator == TokenType.MINUS:\n expression = self.visit(node.expression)\n return Constant(DoubleType(), float(-expression.constant))", "def rpn(token_list):\n output = deque()\n operator = deque()\n ops = {'+': 2, '-': 2, '*': 3, '/': 3, '**': 4}\n\n for token in token_list:\n if token.type == 'Literal' or token.type == 'Variable':\n output.appendleft(token)\n elif token.type == 'Function':\n operator.append(token)\n elif token.type == 'Operator':\n while len(operator) > 0 and (\n operator[-1].type == 'Function' or operator[-1].type == 'Operator' and (\n ops[operator[-1].value] > ops[token.value] or (\n ops[operator[-1].value] > ops[token.value] and token.value == '^')\n )) and token.type != 'Left Parenthesis':\n output.appendleft(operator.pop())\n operator.append(token)\n elif token.value == 'Left Parenthesis':\n operator.append(token)\n elif token.value == 'Right Parenthesis':\n while operator[-1].type != 'Left Parenthesis':\n output.appendleft(operator.pop())\n if operator[-1].type == 'Left Parenthesis':\n operator.pop()\n if operator[-1].type == 'Function':\n output.appendleft(operator.pop())\n while len(operator) > 0:\n output.appendleft(operator.pop())\n\n return output", "def __construct_tree(self, root_operator: PatternStructure, tree_plan: TreePlanNode,\n args: List[PatternStructure], sliding_window: timedelta, parent: Node,\n consumption_policy: ConsumptionPolicy):\n if isinstance(root_operator, UnaryStructure) and parent is None:\n # a special case where the top operator of the entire pattern is an unary operator\n return self.__handle_primitive_event_or_nested_structure(tree_plan, root_operator,\n sliding_window, parent, consumption_policy)\n\n if type(tree_plan) == TreePlanLeafNode:\n # either a leaf node or an unary operator encapsulating a nested structure\n # TODO: must implement a mechanism for actually creating nested tree plans instead of a flat plan\n # with leaves hiding nested structure\n return self.__handle_primitive_event_or_nested_structure(tree_plan, args[tree_plan.event_index],\n sliding_window, parent, consumption_policy)\n\n # an internal node\n current = self.__create_internal_node_by_operator(root_operator, sliding_window, parent)\n left_subtree = self.__construct_tree(root_operator, tree_plan.left_child, args,\n sliding_window, current, consumption_policy)\n right_subtree = self.__construct_tree(root_operator, tree_plan.right_child, args,\n sliding_window, current, consumption_policy)\n current.set_subtrees(left_subtree, right_subtree)\n return current", "def initialize_operator(self, operator=None, matrix=False, eval_at_once=False):\n # TODO: Make this more efficient, only compute values needed at each (r,c) step.\n # For this, 'operator' must support the 'component=(r,c)' option.\n # Operator is None is interpreted as identity transformation\n if operator is None:\n self._operator = lambda nodes, dummy, entry=None: ones((1, nodes.shape[1])) if entry[0] == entry[1] else zeros((1, nodes.shape[1]))\n else:\n if matrix is False:\n self._operator = lambda nodes, dummy, entry=None: operator(nodes, entry=entry)\n else:\n self._operator = operator\n self._eval_at_once = eval_at_once", "def get_fermion_operator(operator):\n fermion_operator = FermionOperator()\n\n if isinstance(operator, PolynomialTensor):\n for term in operator:\n fermion_operator += FermionOperator(term, operator[term])\n return fermion_operator\n\n raise TypeError(\"Unsupported type of oeprator {}\".format(operator))", "def visit_BinaryOp(self, node):\n token = node.token\n if token.type == PLUS:\n return self.visit(node.left) + self.visit(node.right)\n if token.type == MINUS:\n return self.visit(node.left) - self.visit(node.right)\n if token.type == MUL:\n return self.visit(node.left) * self.visit(node.right)\n if token.type == DIV:\n result = self.visit(node.left) / self.visit(node.right)\n if result.is_integer():\n return int(result)\n return result\n self.raise_error()", "def andExpr( ): #DOUBLE CHECK THIS\n\n\ttok = tokens.peek( )\n\tif debug: print(\"andExpr: \", tok)\n\tleft = relationalExpr( ) #does the left side of the grammar\n\ttok = tokens.peek( )\n\twhile tok == \"and\": #checks to see if there is the token \"and\" and will preform what is inside the curly bracket since it is a series \n\t\ttokens.next()\n\t\tright = relationalExpr( )\n\t\tleft = BinaryExpr(tok, left, right)#MIGHT HAVE TO CHANGE TO STRING \n\t\ttok = tokens.peek( )\n\treturn left", "def op(self) -> Literal[\"==\"] | Literal[\"<=\"] | Literal[\">=\"]:\n ...", "def __init__(self, root, left, right, end):\n self._root = root\n self._left = left\n self._right = right\n self._terminal = end", "def isoperator(token):\n\n # Token is an operator\n return token and token.lower() in Token.OPERATORS", "def make_op1(op, expr):\n\n if (op == None) or (expr == None):\n return None\n\n if op == 'NOT':\n op = '!'\n if is_assembler('beebasm') and (op == '!'):\n if isinstance(expr, utils.LazyString):\n return utils.LazyString(\"NOT(%s)\", expr)\n return 'NOT(' + expr + ')'\n if isinstance(expr, utils.LazyString):\n return utils.LazyString(\"%s%s\", op, bracket(expr))\n return op + bracket(expr)", "def convert(token, depth=1):\n\n # finds the root token\n if token.kind == 'EQUALS':\n # asssign left Token as output pin\n new_node = Node(token.left, pin=True, root=True)\n\n # recursively go through new_node to find children\n new_child_node = convert(token.right, depth + 1)\n new_node.add(new_child_node)\n\n # must be an input pin\n elif token.kind == 'ID' or token.kind == 'LITERAL':\n new_node = Node(token, pin=True, weight=1)\n\n # determines depth of tree\n self.depth = depth if depth > self.depth else self.depth\n\n # goes through tokens that are not pins or the root\n else:\n new_node = Node(token, gate=True)\n\n # recursively checks for right Tokens\n if token.right:\n new_child_node = convert(token.right, depth + 1)\n new_node.children += [new_child_node]\n\n # recursively checks for left Tokens\n if token.left:\n\n # OPTIMIZE PART\n # left child Token might be the same kind as root Token\n # if so, don't add the child Token, just add its children\n if token.left.kind == token.kind:\n new_child_node = convert(token.left, depth)\n new_node.children += new_child_node.children\n\n # checks if left child is a gate and applies not function\n elif new_node.kind == 'not' and token.left.terminal:\n if token.left.kind[0].lower() == 'n':\n new_node.kind = token.left.kind[1:].lower()\n else:\n new_node.kind = 'n' + token.left.kind.lower()\n\n new_child_node = convert(token.left, depth)\n new_node.children += new_child_node.children\n\n # no optimizing to be done\n else:\n new_child_node = convert(token.left, depth + 1)\n new_node.children += [new_child_node]\n\n new_node.calculate_weight()\n return new_node", "def unary_operator(op):\n # Only negate is currently supported for all our possible input types.\n valid_ops = {'-'}\n if op not in valid_ops:\n raise ValueError(\"Invalid unary operator %s.\" % op)\n\n def unary_operator(self):\n # This can't be hoisted up a scope because the types returned by\n # unary_op_return_type aren't defined when the top-level function is\n # invoked.\n if isinstance(self, NumericalExpression):\n return NumExprFactor(\n \"{op}({expr})\".format(op=op, expr=self._expr),\n self.inputs,\n )\n else:\n return NumExprFactor(\"{op}x_0\".format(op=op), (self,))\n\n unary_operator.__doc__ = \"Unary Operator: '%s'\" % op\n return unary_operator", "def __init__(self, str_exp=None, kind=None, scanner=None):\n self.kind = None\n self.name = 'undef'\n self.attr = None\n self.child = None\n self.left = None\n self.right = None\n self.code = None\n\n if str_exp is not None:\n logging.debug('========== EXP in init(NODE): SEXP = [' + str_exp + ']')\n scanner = lex.Scanner(rules)\n scanner.setString(str_exp)\n\n if kind is not None: # create an empty node\n self.kind = kind\n return\n\n if scanner is None:\n raise Exception('Fatal Error: scanner not defined')\n\n while scanner.curToken().type in FIRST:\n\n if scanner.curToken().type == LITERAL:\n self.name = scanner.curToken().name\n self.code = LITERAL\n self.kind = ATOM\n scanner.move()\n\n elif scanner.curToken().type == LPAREN:\n scanner.move() # skip the parentheses\n\n tmp = Exp(scanner=scanner) # tree of the expression between parentheses\n self.kind = tmp.kind\n self.attr = tmp.attr\n self.name = tmp.name\n self.left = tmp.left\n self.right = tmp.right\n self.child = tmp.child\n\n if scanner.curToken().type != RPAREN:\n raise ParserException(\"')' expected\")\n scanner.move()\n\n elif isUnitary(scanner.curToken().type):\n self.kind = UNARY\n self.name = scanner.curToken().name\n self.code = scanner.curToken().type\n\n # if token_type == ATTRIB # this is for existence and foreach\n\n scanner.move()\n self.child = Exp(scanner=scanner)\n\n # the scanner has been moved to a successive token\n if scanner.curToken().type == NULLTOKEN:\n break\n\n # check for infix operators\n if isBinary(scanner.curToken().type):\n operator_name = scanner.curToken().name\n operator_type = scanner.curToken().type\n scanner.move()\n\n # move the current node to the left of the tree\n lnode = Exp(kind=self.kind)\n lnode.name = self.name\n lnode.attr = self.attr\n lnode.child = self.child\n lnode.left = self.left\n lnode.right = self.right\n lnode.code = self.code\n\n # this node became the handler aka the binary operator\n self.code = operator_type\n self.name = operator_name\n self.kind = BINARY\n self.left = lnode\n # lookup the second child of the operator\n self.right = Exp(scanner=scanner)", "def isOperator(self):\n return _libsbml.ASTNode_isOperator(self)", "def Equality(self, paren=False):\n left = self.Relation(paren)\n if self.currtok[1].name in {\"EQULITY\", \"NOTEQUAL\"}:\n op = self.currtok[0]\n self.currtok = next(self.tg)\n right = self.Relation(paren)\n left = BinaryExpr(op, left, right, paren)\n return left", "def __init__(self: 'BinaryTree', symbol: str,\n left: 'RegexTree', right: 'RegexTree') -> None:\n RegexTree.__init__(self, symbol, [left, right])", "def __init__(self, op, symbolicExpression1, symbolicExpression2):\n\n SymbolicExpression.__init__(self)\n \n self.op = op\n self.symbolicExpression1 = symbolicExpression1\n self.symbolicExpression2 = symbolicExpression2", "def declare_operators(*op_list):\n operators.update({op.__name__:op for op in op_list})\n return operators", "def __init__(self, a_node, b_node, name=None):\n BinaryMatrixOp.__init__(self, a_node, b_node, name)", "def era_operator(cls, quad):\n\t\tfunc_name = quad.left_operand\n\t\tfunc = FunctionTable.function_dict[func_name]\n\t\tcls.mem_to_push = Memory(len(type_dict), func.var_quantities) \n\t\tprint \"> Created new memory for '{}': {}\".format(func_name, cls.mem_to_push.memory)", "def add_operator(self, operator: Callable) -> None:\n self.operators.append(operator)", "def append_operator(cls, operator):\n for context in cls._active_contexts:\n context._append_operator(operator) # pylint: disable=protected-access", "def is_identity(operator):\n if isinstance(\n operator,\n (QubitOperator, FermionOperator, BosonOperator, QuadOperator)):\n return list(operator.terms) == [()]\n raise TypeError('Operator of invalid type.')", "def _add_prefix_to_node(self, prefix_deque: deque, tree: Tree, node: Node, index: int) -> tuple[Tree, int]:\n\n # Get the current token by popping the left of the prefix_deque\n token = prefix_deque.popleft()\n\n\n # If the token is an operator\n if self.is_token(token):\n # Create a new node\n new_node = tree.create_node(\n f\"{token}\", # Name the same as token\n index, # ID that of index\n parent = node # And parent this node\n )\n # Increment index\n index += 1\n\n # Calculate child A\n tree,index = self._add_prefix_to_node(prefix_deque, tree, new_node, index)\n\n # Calculate child B\n tree,index = self._add_prefix_to_node(prefix_deque, tree, new_node, index)\n\n # Return the tree and the index\n return tree, index\n else:\n # If the token is not an operator\n\n # Just create a new node\n new_node = tree.create_node(\n f\"{token}\", # Name the same as token\n index, # ID that of index\n parent = node # And parent this node\n )\n\n # Increment index\n index += 1\n \n # Return index and tree\n return tree, index", "def visitor(node: NodeT, left_distribute: bool) -> NodeT:\n if isinstance(node, ir.AddSub):\n items = OrderedDict() # type: Dict[ir.Node, List[Tuple[str, ir.Node]]]\n new_operators = []\n new_operands = []\n for operator, operand in zip(('+',) + getattr(node, 'operator'),\n getattr(node, 'operand')):\n if (operator == '+' and isinstance(operand, ir.MulDiv) and\n getattr(operand, 'operator') == ('*',)):\n if left_distribute:\n coeff, item = getattr(operand, 'operand')\n else:\n item, coeff = getattr(operand, 'operand')\n items.setdefault(coeff, []).append((operator, item))\n else:\n new_operators.append(operator)\n new_operands.append(operand)\n for coeff, item in items.items():\n operator, operand = zip(*item)\n assert operator[0] == '+'\n new_operators.append(operator[0])\n if len(operand) > 1:\n new_item = ir.AddSub(operator=operator[1:], operand=operand)\n else:\n new_item = operand[0]\n if left_distribute:\n children = coeff, new_item\n else:\n children = new_item, coeff\n new_operands.append(ir.MulDiv(operator=('*',), operand=children))\n if len(new_operands) > 1:\n assert new_operators[0] == '+'\n new_node = ir.AddSub(operator=tuple(new_operators[1:]),\n operand=tuple(new_operands))\n if new_node != node:\n return new_node # type: ignore\n elif new_operands and new_operands[0] != node:\n return new_operands[0]\n return node", "def assert_assignment(text, operator, left, right):\n try:\n node = parse_single_statement(text)\n eq_(node.op, operator)\n eq_(node.target.name, left)\n eq_( node.right.value, right)\n except AssertionError as e:\n node.show()\n raise e", "def and_or_operator(cls, quad):\n\t\tleft_op = cls.get_address_value(quad.left_operand)\n\t\tright_op = cls.get_address_value(quad.right_operand)\n\t\t# TODO: The next set of lines will fail at a specific case\n\t\tif quad.operator == 10 :\n\t\t\tcls.set_address_value(quad.result, (left_op and right_op))\n\t\telif quad.operator == 11 :\n\t\t\tcls.set_address_value(quad.result, (left_op or right_op))", "def create_inverted_tree(tree: NodeTree) -> NodeTree:\n inverted_tree = NodeTree(tree.value)\n\n if tree.left is not None:\n inverted_tree.right = create_inverted_tree(tree.left)\n\n if tree.right is not None:\n inverted_tree.left = create_inverted_tree(tree.right)\n\n return inverted_tree", "def relationalExpr( ):#MAKE SURE I USED THE RIGHT LOGIC FOR THIS\n\n\ttok = tokens.peek( )\n\tif debug: print(\"relationalExpr: \", tok)\n\tleft = addExpr( )\n\texpr = \"\"\n\ttok = tokens.peek( )\n\tif tok in relations:\n\t\trel = relation( ) # expecting a relation to start off \n\t\tright = expression( ) # if there is a relation we expect there to be an expression to the right of the relation\n\t\texpr = BinaryExpr( rel, left, right )\n\t\treturn expr #fix this for syntax tree maybe\n\n\treturn left", "def make_operators(self):\n self.relationship_operator = Operators.RelationshipOperator(self)\n self.infection_operator = Operators.InfectionOperator(self)\n self.time_operator = Operators.TimeOperator(self)", "def add_operator(self, operator: Soldier) -> None:\n if isinstance(operator, Soldier):\n if len(self.__operators) < self.MAX_OPERATORS:\n self.__operators.append(operator)\n self.__is_alive = True\n else:\n raise TypeError(\"argument must be a Soldier\")", "def calculator(infix_expr):\n\n # Assign precedence values to operators\n prec = {}\n prec['^'] = 4\n prec['*'] = 3\n prec['/'] = 3\n prec['+'] = 2\n prec['-'] = 2\n prec['('] = 1\n\n # Instantiate stacks\n operand_stack = Stack()\n operator_stack = Stack()\n\n try:\n token_list = infix_expr.split()\n logging.debug(\"token_list = {}\".format(token_list))\n except:\n sys.exit(1)\n\n for token in token_list:\n logging.debug(\"token = {}\".format(token))\n if token in '0123456789':\n operand_stack.push(int(token))\n logging.debug(\"operand_stack.push = {}\".format(token))\n elif token == '(':\n operator_stack.push(token)\n logging.debug(\"operator_stack.push = {}\".format(token))\n elif token == ')':\n logging.debug(\"token = {}\".format(token))\n operator_token = operator_stack.pop()\n logging.debug(\"operator_stack.pop = {}\".format(operator_token))\n while operator_token != '(':\n operand2 = operand_stack.pop()\n operand1 = operand_stack.pop()\n result = do_math(operator_token, operand1, operand2)\n operand_stack.push(result)\n logging.debug(\"while operator_token != '(':\\noperand1 = {} | operand2 = {} | token = {} | result = {}\".format(\n operand1, operand2, operator_token, result))\n operator_token = operator_stack.pop()\n logging.debug(\"new operator_token = {}\".format(operator_token))\n elif token in '^*/+-':\n while (not operator_stack.isEmpty()) and \\\n (prec[operator_stack.peek()] >= prec[token]):\n operand2 = operand_stack.pop()\n operand1 = operand_stack.pop()\n operator_token = operator_stack.pop()\n result = do_math(operator_token, operand1, operand2)\n operand_stack.push(result)\n logging.debug(\"Operator - While:\\noperand1 = {} | operand2 = {} | token = {} | result = {}\".format(\n operand1, operand2, operator_token, result))\n operator_stack.push(token)\n logging.debug(\"operator_stack.push(): {}\".format(token))\n else:\n logging.debug(\"else.... exiting....\")\n sys.exit(1)\n\n # Use all remaining operators\n if not operator_stack.isEmpty():\n operand2 = operand_stack.pop()\n operand1 = operand_stack.pop()\n operator_token = operator_stack.pop()\n result = do_math(operator_token, operand1, operand2)\n logging.debug(\"Remaining Operators:\\noperand1 = {} | operand2 = {} | token = {} | result = {}\".format(\n operand1, operand2, operator_token, result))\n operand_stack.push(result)\n\n return operand_stack.pop()", "def is_unary_operator(oper):\n # definition:\n # memeber in class\n # ret-type operator symbol()\n # ret-type operator [++ --](int)\n # globally\n # ret-type operator symbol( arg )\n # ret-type operator [++ --](X&, int)\n symbols = ['!', '&', '~', '*', '+', '++', '-', '--']\n if not isinstance(oper, calldef.operator_t):\n return False\n if oper.symbol not in symbols:\n return False\n if isinstance(oper, calldef.member_operator_t):\n if 0 == len(oper.arguments):\n return True\n elif oper.symbol in ['++', '--'] and \\\n isinstance(oper.arguments[0].type, cpptypes.int_t):\n return True\n else:\n return False\n else:\n if 1 == len(oper.arguments):\n return True\n elif oper.symbol in ['++', '--'] \\\n and 2 == len(oper.arguments) \\\n and isinstance(oper.arguments[1].type, cpptypes.int_t):\n # may be I need to add additional check whether first argument is\n # reference or not?\n return True\n else:\n return False", "def new_xmldoc_opml():\n xmldoc = XMLDoc()\n opml = OPML()\n xmldoc.root_element = opml\n\n return (xmldoc, opml)", "def __compile_operator(self, op, caller):\r\n if op == \"+\":\r\n self.__vmwriter.write_arithmetic(\"add\")\r\n elif op == \"-\" and caller == \"expression\":\r\n self.__vmwriter.write_arithmetic(\"sub\")\r\n elif op == \"*\":\r\n self.__vmwriter.write_call(\"Math.multiply\", 2)\r\n elif op == \"/\":\r\n self.__vmwriter.write_call(\"Math.divide\", 2)\r\n elif op == \"&\":\r\n self.__vmwriter.write_arithmetic(\"and\")\r\n elif op == \"|\":\r\n self.__vmwriter.write_arithmetic(\"or\")\r\n elif op == \"<\":\r\n self.__vmwriter.write_arithmetic(\"lt\")\r\n elif op == \">\":\r\n self.__vmwriter.write_arithmetic(\"gt\")\r\n elif op == \"=\":\r\n self.__vmwriter.write_arithmetic(\"eq\")\r\n elif op == \"-\":\r\n self.__vmwriter.write_arithmetic(\"neg\")\r\n elif op == \"~\":\r\n self.__vmwriter.write_arithmetic(\"not\")", "def mutate_bySingleOperator(self, root, operator):\n self.operator = operator\n\n ast.fix_missing_locations(root)\n # traverse the target ast tree and mutate interesting node\n mutated_ast = self.visit(root)\n ast.fix_missing_locations(root)\n\n return mutated_ast", "def to_condition(operator: str, value: Any) -> CellCondition:\n operator = str(operator).lower().strip()\n condition = {\n \">\": lambda x: x is not None and x > value,\n \"<\": lambda x: x is not None and x < value,\n \">=\": lambda x: x is not None and x >= value,\n \"<=\": lambda x: x is not None and x <= value,\n \"==\": lambda x: x == value,\n \"!=\": lambda x: x != value,\n \"is\": lambda x: x is value,\n \"not is\": lambda x: x is not value,\n \"contains\": lambda x: x is not None and value in x,\n \"not contains\": lambda x: x is not None and value not in x,\n \"in\": lambda x: x in value,\n \"not in\": lambda x: x not in value,\n }.get(operator)\n\n if not condition:\n raise ValueError(f\"Unknown operator: {operator}\")\n\n return condition", "def assign_operator(cls, quad):\n\t\tvalue = cls.get_address_value(quad.left_operand)\n\t\tif quad.right_operand :\n\t\t\tcls.set_arr_value(quad.result, quad.right_operand, value)\n\t\telse:\n\t\t\tcls.set_address_value(quad.result, value)", "def __init__(self, *args):\n \n self.ops = []\n for _, arg in enumerate(args):\n if arg is None:\n continue\n elif isinstance(arg, Operator):\n self.ops.append(arg)\n elif isinstance(arg, list):\n for op in arg:\n if op is None:\n continue\n elif isinstance(op, Operator):\n self.ops.append(op)\n else:\n raise TypeError('Argument must be either Operator or Hstack')\n \n # check domain\n self.n = len(self.ops)\n domain = []\n for idx in range(self.n):\n if idx < self.n - 1:\n if not self.ops[idx].range.checkSame(self.ops[idx + 1].range):\n raise ValueError('Range incompatibility between Op %d and Op %d' % (idx, idx + 1))\n domain += [self.ops[0].domain]\n super(Hstack, self).__init__(domain=superVector(domain), range=self.ops[0].range)", "def __init__(self, left = None, right = None):\n super(compositeConditionalGenerator,self).__init__()\n self._left = left\n self._right = right", "def build_ast(expression):\n\n # use a directed graph to store the tree\n G = DiGraph()\n\n stack = []\n\n for n in expression:\n # Since the graph does not maintain the order of adding nodes/edges\n # add an extra attribute 'pos' so we can always sort to the correct order\n if isinstance(n, OperatorNode):\n if n.ttype == ept.TOK_TYPE_OP_IN:\n arg2 = stack.pop()\n arg1 = stack.pop()\n G.add_node(arg1, pos=1)\n G.add_node(arg2, pos=2)\n G.add_edge(arg1, n)\n G.add_edge(arg2, n)\n else:\n arg1 = stack.pop()\n G.add_node(arg1, pos=1)\n G.add_edge(arg1, n)\n\n elif isinstance(n, FunctionNode):\n args = [stack.pop() for _ in range(n.num_args)]\n args.reverse()\n for i, a in enumerate(args):\n G.add_node(a, pos=i)\n G.add_edge(a, n)\n # for i in range(n.num_args):\n # G.add_edge(stack.pop(),n)\n else:\n G.add_node(n, pos=0)\n\n stack.append(n)\n\n return G, stack.pop()", "def term( ):\n\n\ttok = tokens.peek( )\n\tif debug: print (\"Term: \", tok)\n\tleft = factor( )\n\ttok = tokens.peek( )\n\twhile tok == \"*\" or tok == \"/\":\n\t\ttokens.next()\n\t\tright = factor( )\n\t\tleft = BinaryExpr( tok, left, right )\n\t\ttok = tokens.peek( )\n\treturn left", "def is_binary_operator(oper):\n # definition:\n # memeber in class\n # ret-type operator symbol(arg)\n # globally\n # ret-type operator symbol( arg1, arg2 )\n symbols = [\n ',', '()', '[]', '!=', '%', '%=', '&', '&&', '&=', '*', '*=', '+',\n '+=', '-', '-=', '->', '->*', '/', '/=', '<', '<<', '<<=', '<=', '=',\n '==', '>', '>=', '>>', '>>=', '^', '^=', '|', '|=', '||']\n if not isinstance(oper, calldef.operator_t):\n return False\n if oper.symbol not in symbols:\n return False\n if isinstance(oper, calldef.member_operator_t):\n if 1 == len(oper.arguments):\n return True\n else:\n return False\n else:\n if 2 == len(oper.arguments):\n return True\n else:\n return False", "def convert_elementwise_op(g, op, block):\n\n op_map = {\n \"elementwise_div\": \"divide\",\n \"elementwise_add\": \"add\",\n \"elementwise_mul\": \"multiply\",\n \"elementwise_sub\": \"subtract\",\n \"elementwise_mod\": \"mod\",\n \"elementwise_max\": \"maximum\",\n \"elementwise_min\": \"minimum\",\n \"elementwise_pow\": \"power\",\n \"elementwise_floordiv\": \"floor_divide\",\n \"equal\": \"equal\",\n \"greater_equal\": \"greater_equal\",\n \"greater_than\": \"greater\",\n \"less_equal\": \"less_equal\",\n \"less_than\": \"less\",\n \"not_equal\": \"not_equal\",\n }\n op_func = op_map[op.type]\n ipt0 = g.get_node(op.input(\"X\")[0])\n ipt1 = g.get_node(op.input(\"Y\")[0])\n ipt0_shape = infer_shape(ipt0)\n ipt1_shape = infer_shape(ipt1)\n axis = op.attr(\"axis\")\n if len(ipt0_shape) != len(ipt1_shape):\n if axis < 0:\n axis = axis + len(ipt0_shape)\n if axis != len(ipt0_shape) - 1:\n ipt1 = _op.expand_dims(ipt1, axis=axis, num_newaxis=(len(ipt0_shape) - axis - 1))\n op_func = get_relay_op(op_func)\n out = op_func(ipt0, ipt1)\n g.add_node(op.output(\"Out\")[0], out)", "def __handle_primitive_event_or_nested_structure(self, tree_plan_leaf: TreePlanLeafNode,\n current_operator: PatternStructure,\n sliding_window: timedelta, parent: Node,\n consumption_policy: ConsumptionPolicy):\n if isinstance(current_operator, PrimitiveEventStructure):\n # the current operator is a primitive event - we should simply create a leaf\n event = current_operator\n if consumption_policy is not None and \\\n consumption_policy.should_register_event_type_as_single(False, event.type):\n parent.register_single_event_type(event.type)\n return LeafNode(sliding_window, tree_plan_leaf.event_index, event, parent)\n\n if isinstance(current_operator, UnaryStructure):\n # the current operator is a unary operator hiding a nested pattern structure\n unary_node = self.__create_internal_node_by_operator(current_operator, sliding_window, parent)\n nested_operator = current_operator.arg\n child = self.__construct_tree(nested_operator, Tree.__create_nested_structure(nested_operator),\n Tree.__get_operator_arg_list(nested_operator), sliding_window, unary_node,\n consumption_policy)\n unary_node.set_subtree(child)\n return unary_node\n\n # the current operator is a nested binary operator\n return self.__construct_tree(current_operator, Tree.__create_nested_structure(current_operator),\n current_operator.args, sliding_window, parent, consumption_policy)", "def __init__(self: 'DotTree', left: 'RegexTree',\n right: 'RegexTree') -> None:\n BinaryTree.__init__(self, \".\", left, right)", "def __init__(self, data, left=None, right=None):\n self.data = data\n self.left = left\n self.right = right" ]
[ "0.6768396", "0.6402284", "0.6212105", "0.6038182", "0.6029708", "0.59814626", "0.59771395", "0.5969799", "0.57881653", "0.57876146", "0.57825124", "0.57195956", "0.56935775", "0.56912225", "0.56751347", "0.559061", "0.5583933", "0.5532599", "0.54641896", "0.5460819", "0.5419034", "0.5418368", "0.5414495", "0.53841996", "0.53565824", "0.5339949", "0.5322291", "0.53207654", "0.5312278", "0.53110856", "0.5306055", "0.5272652", "0.5271246", "0.5270123", "0.5263009", "0.5212397", "0.5198877", "0.51786864", "0.5178488", "0.51732", "0.51681817", "0.51634574", "0.5157324", "0.5138562", "0.5124519", "0.5124152", "0.5109847", "0.5107352", "0.5099674", "0.5093816", "0.5089314", "0.507746", "0.5071173", "0.50677216", "0.5057984", "0.5048431", "0.5024074", "0.50104874", "0.500028", "0.4999717", "0.49994624", "0.49993813", "0.49921978", "0.49638057", "0.49618012", "0.49565318", "0.4937051", "0.4926631", "0.49242032", "0.4905791", "0.4902782", "0.48960978", "0.48907167", "0.48822057", "0.48799077", "0.48773736", "0.48713273", "0.48646808", "0.4861987", "0.48605847", "0.4859568", "0.4852708", "0.48508367", "0.48477334", "0.48255715", "0.48195538", "0.48081177", "0.47973618", "0.47966662", "0.479338", "0.47851366", "0.47837195", "0.4778259", "0.4776471", "0.47736108", "0.47710797", "0.4769113", "0.47667655", "0.47621205", "0.4747374" ]
0.66272795
1
Write a custom auth property where we grab the auth token and put it in the headers
def authenticate(self): #it's weird i have to do this here, but the code makes this not simple auth_json={'email':self.user, 'password':self.password} #send a post with no auth. prevents an infinite loop auth_response = self.post('/auth', data = json.dumps(auth_json), auth = None) _token = auth_response.json['token'] self._token = _token self._wrapped.auth = SpringAuth(_token)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, r):\n r.headers[\"x-aims-auth-token\"] = self._token\n return r", "def auth_header_value(self):\n return f\"token {self.API_TOKEN}\"", "def authorization(self):\n return {'auth-token': '{token}'.format(token=self.token)}", "def authorization(self):\n return {'auth-token': '{token}'.format(token=self.token)}", "def add_auth_to_headers(self):\n if not hasattr(self, \"headers\"):\n self.headers = {\"Content-Type\": \"application/json\"}\n\n login = {\"account_number\": self.account[\"account_number\"],\n \"pin\": self.account[\"pin\"]}\n token = json.loads(self.client.post(\n \"/accounts/login\",\n data=json.dumps(login),\n headers=self.headers).get_data())[\"token\"]\n self.headers[\"Authorization\"] = \"Bearer \" + token", "def _auth_headers(self):\n if self.token_str:\n return {'Authorization': 'Bearer {}'.format(self.token_str)}\n else:\n return {}", "def get_authenticate_header(self):\n return f'Basic realm=\"{self.www_authenticate_realm}\"'", "def auth_token(self):", "def add_header(response):\n response.headers['Authorization'] = response\n return response", "def _get_authorization_header(self):\n return f\"token {self._context.get_github_token()}\"", "def auth_header(self):\n return self._auth_header", "def create_auth_header(api_token):\n return {'Authorization': f'token {api_token}'}", "def get_authorization_header(self):\n return {\"Authorization\": \"Bearer {}\".format(self.get_jwt())}", "def auth_headers(current_user_token: str) -> Dict[str, str]:\n return {\"Authorization\": f\"Bearer {current_user_token}\"}", "def __call__(self, r):\n r.headers['Authorization'] = 'Bearer %s' % self.get_access_token()\n return r", "def __call__(self, r):\n if (self.token):\n r.headers['access-token'] = self.token\n return r", "def __call__(self, r):\n r.headers['Authorization'] = 'OAuth ' + self._access_token\n return r", "def authenticate_header(self, request):\n return '{0} realm=\"{1}\"'.format(settings.JWT_AUTH_HEADER_PREFIX,\n self.www_authenticate_realm)", "def bearer_authentication(self, token: str) -> None:\n self.api_session.headers.update({'Authorization': f'Bearer {token}'})", "def header_token(token):\n return {'Authorization': '{0} {1}'.format('JWT', token)}", "def apply(self, headers):\n headers['Authorization'] = 'Bearer ' + self._metadata_service.auth_token", "def __call__(self, request):\n request.headers['Authorization'] = f'Token {self.token}'\n return request", "def _add_auth_header(\n self,\n headers: t.Union[None, t.Dict[str, t.Any]] = None,\n ) -> t.Dict[str, t.Any]:\n if headers is None:\n headers = {}\n headers[self.AUTH_HEADER_NAME] = f'{self.AUTH_PREFIX}{self.API_TOKEN}'\n return headers", "def _make_header(self, token):\n header = HEADER.copy()\n header['Authorization'] = \"Bearer {}\".format(token)\n\n return header", "def __call__(self, resp):\r\n if not self.auth_token:\r\n self.auth()\r\n resp.register_hook('response', self.handle_error)\r\n resp.headers['X-Auth-Token'] = self.auth_token\r\n return resp", "def buildHeader(self):\n if self.key:\n userString = self.user+b\":\"+self.key\n else:\n userString = self.user+b\":\"\n \n encodedUserString = b64encode(userString)\n decodedUserString = encodedUserString.decode(\"ascii\")\n self.basicAuthHeader = {\"Authorization\": \"Basic \" + decodedUserString}", "def _headers(self) -> dict[str, str]:\n headers = super()._headers()\n headers[\"Authorization\"] = f\"Bearer {self.__token}\"\n return headers", "def build_header(self):\n authstring = \"Bearer \" + self.auth_token\n header = {\n \"Authorization\": authstring,\n \"Content-Type\": \"application/json\",\n \"User-Agent\": self.user_agent,\n \"Accept-Encoding\": \"gzip\"\n }\n return header", "def api_client_authz_header():\n return assemble_authorization_header(API_TOKEN)", "def api_client_authz_header():\n return assemble_authorization_header(API_TOKEN)", "def create_authorization_header(self, **kwargs):\n return {\"Authorization\": \"Bearer {}\".format(self.create_jwt(**kwargs))}", "def set_auth_headers(self, access_token, client_id):\n\t\tself.headers['X-Udemy-Bearer-Token'] = access_token\n\t\tself.headers['X-Udemy-Client-Id'] = client_id\n\t\tself.headers['Authorization'] = \"Bearer \" + access_token\n\t\tself.headers['X-Udemy-Authorization'] = \"Bearer \" + access_token", "def add_auth(self, http_request):\r\n pass", "def get_authenticate_header(self):\n pass", "def get_auth_header(self) -> Mapping[str, Any]:\n return {}", "def _set_auth_header(self, refresh=False):\n if isinstance(self._token_man, TokenManager):\n self._api_client.set_default_header(Headers.authorization,\n self._token_man.get_header(refresh=refresh))\n else:\n self._api_client.set_default_header(Headers.x_auth_token,\n self._token_man.get_session_token(refresh=refresh))", "def get_headers(self):\n return {\n 'Authorization': 'JWT {}'.format(self.token)\n }", "def _token_header(token=None):\n if not token:\n return None\n\n message = '{token}:Ignored'.format(token=token)\n headers = {'Authorization': 'Basic {code}'.format(\n code=base64.b64encode(message))}\n return headers", "def _create_auth_headers(self):\n auth_headers = {**self.get_headers()}\n auth_headers['Authorization'] = 'Bearer ' + self.get_access_token()\n return auth_headers", "def basic_header(self):\n self.auth = base64.encodestring('%s:%s' % (self.username, self.password)).replace('\\n', '')\n return { \n #\"Authorization\" : \"Basic %s\" % self.auth, \n \"Content-type\": \"text/plain\" }", "def _make_header(self, token: str) -> dict:\n\n header = HEADER.copy()\n # modify to represent how to build the header\n header['Authorization'] = f\"Bearer {token}\"\n\n return header", "def generate_headers_with_auth(self, token_type: str = 'access'):\n if re.search('access', token_type, re.I):\n bearer_token = self._access_token\n elif re.search('refresh', token_type, re.I):\n bearer_token = self._refresh_token\n else:\n raise (Exception('Please check docstrings and change token_type value'))\n\n return {\n 'accept': 'application/json',\n 'Content-Type': 'application/json',\n 'Authorization': 'Bearer ' + bearer_token\n }", "def _addAuthenticationToRequestHeader(request, client):\n request.addAuthorization(client.id, client.secret)", "def add_auth_token(self):\n auth_token = json.loads(os.getenv('AUTH_TOKEN'))\n self.driver.add_cookie(auth_token)", "def __header_base64(self):\n header_base64 = base64.b64encode(f'{self.client_id}:{self.client_secret}'.encode('ascii'))\n header_base64 = str(header_base64).split(\"'\")[1]\n return {'Authorization': f'Basic {header_base64}'}", "def test_headers(self):\n token = 'abc123'\n requests.get(self.url, auth=BearerAuth(token))\n self.assertEqual(httpretty.last_request().headers['Authorization'], 'Bearer {}'.format(token))", "def request_http_header( self ) -> dict:\n return {'content-type': 'application/json','Authorization':f'NLAuth nlauth_account={self._acct_number},nlauth_email={self._auth_email},nlauth_signature={self._acct_signature},nlauth_role=1090'}", "async def gen_headers(auth_string):\n return {\n \"Authorization\": f\"Basic {str(b64encode(bytearray(auth_string, 'utf8')), 'utf-8')}\"\n }", "def get_api_header(token):\n return {\n 'Authorization': 'Token ' + str(token)}", "def auth_headers(self, path, payload=\"\"):\n rand = hexlify(Random.new().read(16))\n auth = self.souma.sign(\"\".join([self.souma.id, rand, path, payload]))\n return [(\"Glia-Rand\", rand), (\"Glia-Auth\", auth), (\"Glia-Souma\", self.souma.id)]", "def get_auth_header(self):\n if not self.verify():\n return None\n\n auth_val = self.encode_auth_header_val()\n if not auth_val:\n return None\n\n return {'Authorization': auth_val.replace('\\n', '')}", "def get_basic_auth_token(self):\n return f'Basic {self.key}'", "def build_header(token: str = None):\n return {\n \"Content-Type\": \"application/json\",\n \"X-Auth-Token\": token or get_project_token(),\n }", "def authenticationToken(self):\n return self.authToken", "def token_header(token):\n message = '{token}:ignored'.format(token=token)\n return {'Authorization': 'Basic {code}'.format(\n code=base64.b64encode(message))}", "def _get_auth_string(self):", "def __MakeHeaders(self, auth):\n\n headers = dict()\n headers[\"X-Blip-api\"] = BLIP_API_VERSION\n headers[\"Accept\"] = JSON\n if (auth and self.userName != None and self.password != None):\n credentials = self.userName + \":\" + self.password;\n headers[\"Authorization\"] = \"Basic \"+base64.b64encode(credentials)\n if (self.userAgent != None):\n headers[\"User-Agent\"] = self.userAgent\n\n return headers", "def authenticate(self):\n\n headers = {\n 'Authorization': 'Bearer ' + self.access_token,\n 'ClientId': self.client_id,\n }\n self.headers.update(headers)", "def _headers(self, **kwargs):\n headers = BASE_HEADERS.copy()\n if self._token:\n headers['X-Plex-Token'] = self._token\n headers.update(kwargs)\n return headers", "def _headers(self, **kwargs):\n headers = BASE_HEADERS.copy()\n if self._token:\n headers['X-Plex-Token'] = self._token\n headers.update(kwargs)\n return headers", "def init_headers(token):\n headers = {\n 'Content-Type': 'application/json',\n 'Authorization': 'Bearer ' + token\n }\n return headers", "def __http_build_headers(self, with_authentication):\n\n dynamic_headers = {\n 'timestamp': str(self.__current_milli_time())\n }\n if with_authentication and self.__login_token:\n dynamic_headers['Authorization'] = 'Bearer ' + self.__login_token\n \n dynamic_headers.update(self.__http_default_headers)\n return dynamic_headers", "def __init__(self, token):\n self.token = token\n self.session = requests.Session()\n self.session.headers.update({\"Authorization\": \"Bearer {token}\".format(token=self.token)})", "def add_headers():\n # the actual access token -\n g.x_tapis_token = request.headers.get('X-Tapis-Token')\n\n # the tenant associated with the subject of the request; used, for instance, when the subject is different\n # from the subject in the actual access_token (for example, when the access_token represents a service account).\n g.x_tapis_tenant = request.headers.get('X-Tapis-Tenant')\n\n # the user associated with the subject of the request. Similar to x_tapis_tenant, this is used, for instance, when\n # the subject is different from the subject in the actual access_token (for example, when the access_token\n # represents a service account).\n g.x_tapis_user = request.headers.get('X-Tapis-User')\n\n # a hash of the original user's access token. this can be used, for instance, to check if the original user's\n # access token has been revoked.\n g.x_tapis_user_token_hash = request.headers.get('X-Tapis-User-Token-Hash')", "def EstablishAuthToken(self, opener):\n raise NotImplementedError()", "def __call__(self, context, callback):\r\n\r\n callback((('authorization', 'Bearer ' + self.token_hash ),), None)", "def polling_header(self):\n self.auth = base64.encodestring('%s:%s' % (self.username, self.password)).replace('\\n', '')\n return { \n #\"Authorization\" : \"Basic %s\" % self.cmd.auth,\n #\"X-Atmosphere-Transport\" : \"long-polling\",\n #\"X-Atmosphere-tracking-id\" : self.atmos_id,\n \"X-Atmosphere-Framework\" : \"1.0\",\n \"Accept\" : \"application/json\" }", "def get_headers(self):\r\n return {\r\n 'authenticate': {\r\n 'complexType': 'PortalLoginToken',\r\n 'userId': self.user_id,\r\n 'authToken': self.auth_token,\r\n }\r\n }", "def set_access_token(self, token):\n\n self.__current_request_mock.headers['Authorization'] = token", "def _headers(self):\n auth = AuthenticationProvider.currentAuth()\n\n return {\n 'Authorization': '%s %s' % (auth.tokenType, auth.accessToken),\n 'Content-Type': 'application/json'}", "def get_auth_headers(self,email,passwd):\n #获取认证后的http头\n\n postdata = urllib.urlencode({'Email':email,'Passwd':passwd,'service':'reader','source':self.ClIENT})\n req = urllib2.Request(self.AUTH_URL,postdata)\n if self.host:\n req.set_proxy(self.host, self.type)\n f = urllib2.urlopen(req)\n auth_value = f.read().split()[2][5:]\n f.close()\n self.Author_Headers = {'Authorization':'GoogleLogin auth=%s'%auth_value}", "def set_auth_header(self):\n self.auth_header = self.get_auth_header(self.login, self.password)\n return True if self.auth_header else False", "def get_headers():\n return {'Authorization': f'token {settings.GITHUB_AUTH_TOKEN}'}", "def get_token_header(cls, token):\n if token is EMPTY_KNOX_TOKEN:\n return {}\n else:\n return {'HTTP_AUTHORIZATION': 'token {}'.format(token)}", "def authorization(self):\n token = self.create_auth_token(\n self.api_key.user, self.api_key.key, self.api_key.secret\n )\n return f'JWT {token}'", "def auth_key(event):\n headers = event.get('header')\n if not headers:\n raise RestException(\"Headers are missing\", 400)\n auth = headers.get('Authorization')\n if not auth:\n raise RestException('Header Authorization is missing', 400)\n if not auth.lower().startswith('bearer '):\n raise RestException(\"Authorization missing Bearer keyword\", 400)\n auth = auth.replace('Bearer ', '')\n auth = auth.replace('bearer ', '')\n return auth.strip()", "def auth(self):\n return self.api(self.token)", "def get_headers(self):\r\n return {\r\n 'authenticate': {\r\n 'username': self.username,\r\n 'apiKey': self.api_key,\r\n }\r\n }", "def __init__(self, authtoken, organization_id):\n self.headers = {\n 'Authorization': 'Zoho-oauthtoken ' + authtoken,\n }\n self.details = {\n 'organization_id': organization_id\n }", "def get_token_auth_header():\n auth = request.headers.get(\"Authorization\", None)\n if not auth:\n return \"authorization_header_missing\"\n\n parts = auth.split()\n\n if parts[0].lower() != \"bearer\":\n return \"invalid_header\"\n elif len(parts) == 1:\n return \"invalid_header\"\n elif len(parts) > 2:\n return \"invalid_header\"\n\n token = parts[1]\n return token", "def __init__(\n self,\n uri,\n audience,\n get_token,\n **kwargs\n ):\n super(JWTTokenAuth, self).__init__(uri, audience, kwargs.pop(\"token_type\", TOKEN_TYPE_JWT), get_token)\n self.get_token = get_token", "def __call__(self, request):\n self._logger.debug(f'__call__, {request.url} adding Authorization header')\n request.headers[\"Authorization\"] = self._get_auth_value()\n request.register_hook(\"response\", self._handle_401)\n return request", "def get_header( self ):\n\t\tkey = self.key\n\t\tvalue = self.value\n\t\tpath = self.path\n\t\texpires = self.expires.strftime( \"%a, %d-%m-%y %H:%M:%S GMT\" )\n\t\treturn ( \"Set-Cookie\", \"%(key)s=%(value)s; Path=%(path)s; Expires=%(expires)s;\" % locals() )", "def bearer_oauth(r):\n\n r.headers[\"Authorization\"] = f\"Bearer {bearer_token}\"\n r.headers[\"User-Agent\"] = \"S-n-Tweet Alpha\"\n return r", "def headers(self):\n headers = BASE_HEADERS\n if self.token:\n headers['X-Plex-Token'] = self.token\n return headers", "def asterisk_in_header():\n auth_token = get_auth_token()\n\n headers = '{\"Host\":\"$host\",\"User-Agent\":\"$user_agent\",\"Date\":\"DATE\",'\n headers += '\"Accept\": \"*/*\",\"Accept-Encoding\": \"gzip\",'\n headers += '\"X-Project-ID\": \"$project_id\",'\n headers += '\"X-Auth-Token\": \"$token\"}'\n headers = string.Template(headers)\n\n return headers.substitute(host=CFG.host, user_agent=CFG.user_agent,\n project_id=CFG.project_id, token=auth_token)", "def authenticate_by_token(self, apitoken):\n self.__headers.update({'Authorization': 'APIToken {}'.format(apitoken)})", "def for_authenticate_only(self):\n self.token['type'] = 'auth'\n\n return self", "def auth(self):\n return auth.get_auth()", "def _authenticate_for(self, resp):\n # Get the auth. info from the headers\n scheme, params = resp.headers['Www-Authenticate'].split(None, 1)\n assert (scheme == 'Bearer')\n info = {k: v.strip('\"') for k, v in (i.split('=')\n for i in params.split(','))}\n\n # Request a token from the auth server\n params = {k: v for k, v in info.items() if k in ('service', 'scope')}\n auth = HTTPBasicAuth(self.username, self.password)\n r2 = requests.get(info['realm'], params=params,\n auth=auth, verify=self.verify_ssl)\n\n if r2.status_code == 401:\n raise RuntimeError(\"Authentication Error\")\n r2.raise_for_status()\n\n self.auth = BearerAuth(r2.json()['token'])", "def set_auth_token_header(self):\n\n username = 'test-user'\n passwd = 'testuserpass1234'\n user = User.objects.create(username=username)\n user.set_password(passwd)\n user.save()\n\n assert Account.objects.get(user=user) is not None\n url = reverse('token_obtain_pair')\n res = self.client.post(url,\n data={'username': username, 'password': passwd})\n self.client.credentials(HTTP_AUTHORIZATION=\n f\"Bearer {res.data['access']}\")\n return user", "def bearer_auth():\n authorization = request.headers.get(\"Authorization\")\n if not (authorization and authorization.startswith(\"Bearer \")):\n response = app.make_response(\"\")\n response.headers[\"WWW-Authenticate\"] = \"Bearer\"\n response.status_code = 401\n return response\n slice_start = len(\"Bearer \")\n token = authorization[slice_start:]\n\n return jsonify(authenticated=True, token=token)", "def get_token(self):\n self.session.headers.pop(\"Authorization\", None) # delete old token if was\n\n data = json.dumps({\"password\": self.password, \"username\": self.username})\n answer = self.server_request(self._authTokenPath, data=data)\n\n try:\n self.token = json.loads(answer)[\"token\"]\n self.session.headers.update({\"Authorization\": \"Token \" + self.token})\n except KeyError as err:\n print_unexpected_json_error_key(err, answer, self._authTokenPath)\n exit(1)", "def authentication_header():\n with open(KEY_FILE, \"r\") as file:\n header = json.load(file)\n return header", "def get_token_auth_header():\n auth = request.headers.get(\"Authorization\", None)\n print(auth)\n\n if not auth:\n raise AuthError({\"code\": \"authorization_header_missing\",\n \"description\":\n \"Authorization header is expected\"}, 401)\n \n parts = auth.split()\n \n if parts[0].lower() != \"bearer\":\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must start with\"\n \" Bearer\"}, 401)\n elif len(parts) == 1:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\": \"Token not found\"}, 401)\n elif len(parts) > 2:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must be\"\n \" Bearer token\"}, 401)\n\n token = parts[1]\n return token", "def get_token_auth_header():\n auth = request.headers.get('Authorization', None)\n if not auth:\n raise AuthError({'code': 'authorization_header_missing',\n 'description': 'Authorization header is expected'}, 401)\n\n parts = auth.split()\n\n if parts[0].lower() != 'bearer':\n raise AuthError({'code': 'invalid_header',\n 'description': 'Authorization header must start with Bearer'}, 401)\n\n if len(parts) < 2:\n raise AuthError({'code': 'invalid_header',\n 'description': 'Token not found after Bearer'}, 401)\n\n if len(parts) > 2:\n raise AuthError({'code': 'invalid_header',\n 'description': 'Authorization header is an invalid token structure'}, 401)\n\n return parts[1]", "def oxe_set_headers(token, method=None):\n\n # basic method GET\n headers = {\n 'Authorization': 'Bearer ' + token,\n 'accept': 'application/json'\n }\n\n # addition for POST & PUT\n if method in ('POST', 'PUT'):\n headers.update({'Content-Type': 'application/json'})\n # addition for DELETE\n elif method == 'DELETE':\n headers.update({'Content-Type': 'text/plain'})\n return headers", "def set_token(self, token):\n # type: (Token) -> None\n self.token = token\n self._token_header = \"Bearer \" + token[\"access_token\"]", "def headers(group_id, token):\n return { \n \"active-group\": group_id,\n \"Authorization\" : \"Bearer: {}\".format(token) \n }", "def set_requests_auth(self):\n self.__auth = OAuth2(token=self.bearer_token)" ]
[ "0.7161087", "0.7057166", "0.6970341", "0.6970341", "0.6921795", "0.68667036", "0.67996633", "0.6784417", "0.6764053", "0.66727406", "0.66410804", "0.6630715", "0.66150844", "0.6613414", "0.6583362", "0.65666574", "0.6546947", "0.6528426", "0.6521229", "0.6509429", "0.64885724", "0.64683807", "0.6465539", "0.64653766", "0.6446665", "0.637545", "0.6356642", "0.63558835", "0.63277066", "0.63277066", "0.63096464", "0.6301908", "0.6295405", "0.628945", "0.6274963", "0.6246223", "0.62333673", "0.62333494", "0.6230232", "0.618832", "0.6172884", "0.616688", "0.61577165", "0.61508465", "0.61501974", "0.6137991", "0.6101909", "0.60997856", "0.60972965", "0.6083374", "0.60781026", "0.60656196", "0.60457563", "0.6039963", "0.6032903", "0.60067856", "0.6006133", "0.6000747", "0.5990458", "0.5990458", "0.5986704", "0.5985856", "0.59825397", "0.5976816", "0.59273535", "0.5917832", "0.5908729", "0.59062403", "0.58950627", "0.58925265", "0.5880798", "0.58785415", "0.5878321", "0.58720535", "0.5870622", "0.5855475", "0.5851044", "0.5825631", "0.58075994", "0.5797097", "0.57726085", "0.5767756", "0.5765293", "0.5757139", "0.5747815", "0.5746138", "0.57403547", "0.5737745", "0.5729536", "0.57282186", "0.5725681", "0.5724671", "0.5722894", "0.57221377", "0.5689014", "0.5678615", "0.5666714", "0.56666636", "0.566492", "0.56508875" ]
0.5789005
80
if the person removed is an owner of flockr, check that they were actually removed from flockr
def test_channel_leave_normal_case_owner(): clear() leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last') user = auth_register('[email protected]', '123abc!@#', 'first', 'last') userchannel_id = channels_create(user['token'], 'userchannel', True) channel_join(leaver['token'], userchannel_id['channel_id']) channel_addowner(leaver['token'], userchannel_id['channel_id'], leaver['u_id']) channel_leave(leaver['token'], userchannel_id['channel_id']) randChannel_details = channel_details(user['token'], userchannel_id['channel_id']) assert(randChannel_details['owner_members'] == [ { 'u_id' : user['u_id'], 'name_first' : 'first', 'name_last' : 'last', 'profile_img_url': '' } ])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_channel_removeowner_owner_flockr():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def unorphaned(self):\n return self.new_owner == self.user", "def test_remove_already_not_subbed(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=11,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=False,\n invite_only=False,\n target_users_subbed=False,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 0)\n self.assert_length(json[\"not_removed\"], 1)", "def block_owner_deletion(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"block_owner_deletion\")", "def test_channel_removeowner_last_owner():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n #register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n #channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n # removing third user\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def delete_self_ownership(self):\n current_ownership_list = self.msg.get_ownershipList()\n self.old_ownership_list = current_ownership_list\n for comp in self.deleted_comp_list:\n if comp in current_ownership_list:\n current_ownership_list.remove(comp)\n self.logger.debug(\"After removing transfer component ownership, \\\n new ownership: %s\" % current_ownership_list)\n self.msg.set_ownershipList(current_ownership_list)", "def isowner(self, o):\n return self._owner is o", "def test_channel_removeowner_not_owner_permissions():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_removeowner(register_third_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def test_remove_fellow_from_organization_success(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n user = User.create(name='Admin', email='[email protected]', user_type='user',\n owned_organizations=['Organization_foo'])\n req = User.create(name='Valid Requestor', email='[email protected]',\n user_type='user',\n owned_organizations=['Organization_foo'])\n user.put()\n req.put()\n\n # Successful removal.\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_organizations': []},\n headers=self.login_headers(req),\n )\n self.assertEqual(json.loads(response.body)['owned_organizations'], [])\n\n # Changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(fetched_user.owned_organizations, [])\n self.assertEqual(user.user_type, fetched_user.user_type)", "def is_owner(self, author):\n return not self.server or author == self.server.owner", "def leave_union(self):\n if self.union is None:\n return f'{self.username} is not a member of any guild'\n\n if self.union.has_member(self):\n union_name = self.union.name\n self.union = None\n self.save()\n return f'{self.username} has been removed from {union_name}'", "def renounceOwnership():\n\n assert msg.sender == self.owner, \"Access is denied.\"\n\n log.OwnershipRenounced(msg.sender)\n self.owner = ZERO_ADDRESS", "def is_participant(self, message: discord.Message):\n if message.author in self.participants:\n self.participants.remove(message.author)\n return True\n\n return False", "async def __remove(self, ctx, name: discord.Member=None):\n server = ctx.message.server\n author = ctx.message.author\n if name is None:\n name = author\n if server.id not in self.db:\n self.db[server.id] = {}\n if \"bookkeeper\" not in self.db[server.id]:\n self.db[server.id][\"bookkeeper\"] = []\n await self.bot.say(\"Bookkeeper list is currently empty, add new bookkeepers using points keeper add\"\n \" <Discord name or nickname>\")\n self.save_db()\n return\n if name.id not in self.db[server.id][\"bookkeeper\"]:\n await self.bot.say(\"Keeper is not registered, please make sure the name or nickname is correctly spelled. \"\n \"You can check using points keeper list\")\n return\n self.db[server.id][\"bookkeeper\"].remove(name.id)\n self.save_db()", "def channel_removeowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n u_id_permission = database.get_permission_dict(u_id)\n if u_id_permission[\"permission_id\"] == 1:\n raise error.AccessError(description=\"user being removed is the owner of the slackr\")\n\n # checks if u_id is not an owner of the channel\n # also checks if current auth user is an owner of the channel\n is_u_owner = False\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n is_u_owner = True\n if curr_id == owner_id:\n is_curr_owner = True\n if is_u_owner is False:\n raise error.InputError(description=\"user being removed is not an owner of the channel\")\n\n\n # if the auth user is owner of slackr, allows him to remove u_id as owner\n if user_perms[\"permission_id\"] == 1:\n # removes the user from channel_owner\n curr_channel[\"owner_ids\"].remove(u_id)\n # if the auth user is an owner of the channel, allow him to remove u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].remove(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"Authorised user user is not an owner of the channel,\n or of the slackr\"\"\")", "def test_realm_admin_remove_others_from_unsubbed_private_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=17,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=False,\n invite_only=True,\n target_users_subbed=True,\n other_sub_users=[self.example_user(\"othello\")],\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)", "def ownercheck(self, userhost):\n if self.cfg and self.cfg.owner:\n if userhost in self.cfg.owner: return True\n return False", "def test_realm_admin_remove_others_from_subbed_private_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=17,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=True,\n invite_only=True,\n target_users_subbed=True,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)", "def is_still_owner(self):\n raise tooz.NotImplemented", "def rm_person():\n # get person name from user\n responses = accept_inputs([\"Person name\"])\n person_name = responses[\"Person name\"]\n # check for existence of person\n results = query_with_results(\"select id from person where name = ?\", [person_name])\n if len(results) == 0:\n print(\"No person found with name '%s' that we could remove.\" % person_name)\n return\n # the person exists, so remove it\n query_no_results(\"delete from person where name = ?\", [person_name])\n # remove all associations with tasks\n query_no_results(\"delete from task_person_pair where person = ?\", [results[0][0]])\n print(\"Person with name '%s' removed.\" % person_name)", "def check_deletion(oc_name, org):\n duplicate_name = org['name']\n\n distance = org_tools.getDistance(oc_name, duplicate_name)\n\n if distance <= 0.35:\n org['can_delete'] = 1\n else:\n org['can_delete'] = 0\n\n return org", "def test_channel_removeowner_standard_input():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n assert(channel_details(register_second_result['token'], randChannel_id['channel_id']) == {\n 'name' : 'Random Channel',\n 'owner_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ],\n 'all_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }, \n {\n 'u_id': 3,\n 'name_first' : 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ]\n })", "def test_removeperson(self):\n p1, p2, p3 = self.create3persons()\n t = model.Team(name='Tigers', persons=[p1, p2, p3])\n id = t.store()\n t.remove_person(p2)\n t.store()\n\n t2 = model.Team(id=id)\n self.assertEqual(t2.persons, [p1.id, p3.id])\n\n with self.assertRaises(ValueError): # cannot be removed again\n t2.remove_person(p2)", "def remove_member(self, persona):\n if persona in self.members:\n self.members.remove(persona)", "def test_cant_remove_other_users_from_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=8,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=False,\n is_subbed=True,\n invite_only=False,\n target_users_subbed=True,\n )\n self.assert_json_error(result, \"Insufficient permission\")", "async def _remove(self, ctx, points: int, *, name=None):\n server = ctx.message.server\n author = ctx.message.author\n names = None\n if not self.permcheck(ctx):\n return\n if name is None:\n name = author\n elif \",\" in str(name):\n if \", \" in name:\n names = name.split(\", \")\n elif \",\" in name:\n names = name.split(\",\")\n namesp = names.copy()\n for i in range(len(names)):\n names[i] = discord.utils.find(\n lambda m: m.display_name == names[i], server.members)\n if names[i] is None:\n names[i] = discord.utils.find(\n lambda m: m.name == names[i], server.members)\n name = None\n else:\n namea = name[:]\n name = discord.utils.find(\n lambda m: m.display_name == name, server.members)\n if name is None:\n name = discord.utils.find(\n lambda m: m.name == name, server.members)\n if name is None:\n await self.bot.say(\"{} was not found, please check the spelling and also make \"\n \"sure that the member name being entered is a member in your Discord and \"\n \"that its the same as their Discord name / nickname.\".format(namea))\n return\n if server.id not in self.db:\n self.db[server.id] = {}\n if not name:\n counter = -1\n for x in names:\n counter += 1\n if x is None:\n await self.bot.say(\"{} was not found, please check the spelling and also make \"\n \"sure that the member name being entered is a member in your Discord and \"\n \"that its the same as their Discord name / nickname.\".format(namesp[counter]))\n await asyncio.sleep(1)\n continue\n elif x.id not in self.db[server.id]:\n await self.bot.say(\"{} was not found. Please add them first using points member add\"\n \" <discord name or Nickname>\".format(x.display_name))\n else:\n self.db[server.id][x.id][\"Lifetime Loss\"] += points\n self.db[server.id][x.id][\"Balance\"] -= points\n await self.bot.say(\"{} points substracted from {}\".format(points, x.name))\n await asyncio.sleep(1)\n else:\n if name.id not in self.db[server.id]:\n await self.bot.say(\"{} is not in the list, please register first using points member add\"\n \" <Discord name or nickname>\".format(namea))\n return\n self.db[server.id][name.id][\"Lifetime Loss\"] += points\n self.db[server.id][name.id][\"Balance\"] -= points\n await self.bot.say(\"{} points substracted from {}\".format(points, name.name))\n self.save_db()", "async def cog_check(self, ctx:utils.Context):\n\n if ctx.author.id in self.bot.config['owners']:\n return True\n raise commands.NotOwner", "def test_realm_admin_remove_others_from_public_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=16,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=True,\n invite_only=False,\n target_users_subbed=True,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)", "def validate_owner(model, request):\n auth_token = request.headers.get('Authentication-Token')\n user = _token_loader(auth_token)\n if model.owner != user:\n abort(401)", "def is_owned_by(self, user):\n return user and user.id == self.user_id", "def remove_person(self, per: str):\n if per in self._people:\n self._people.remove(per)\n else:\n raise IDDoesNotExist", "def delete_leader(self):", "def check_user_del(user):\n\twith open('tracked_users', 'r') as myfile:\n\t\tuserfile = myfile.read()\n\t\tif user.lower() in userfile.lower():\n\t\t\treturn 1\n\treturn 0", "def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def test_remove_facility_pt2(self):\n self.assertFalse(self.coach1.has_perm('auth.remove_facility'))", "def test_remove_from_team_forbidden(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n team.put()\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_teams=['Team_foo'])\n req = User.create(name='requestor', email='[email protected]',\n user_type='user')\n user.put()\n req.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_teams': []},\n headers=self.login_headers(req),\n status=403,\n )\n\n # Not changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_teams, fetched_user.owned_teams)", "def check_user_has_owner_clearance(self, userName, userGroup):\n dataBase = self.read_database()\n owners = dataBase['userGroups'][userGroup]['owners']\n return userName in owners", "async def remove(self, ctx, *, name=None):\n server = ctx.message.server\n author = ctx.message.author\n names = None\n namesp = None\n if not self.permcheck(ctx):\n return\n if name is None:\n name = author\n elif \",\" in str(name):\n if \", \" in name:\n names = name.split(\", \")\n elif \",\" in name:\n names = name.split(\",\")\n namesp = names.copy()\n for i in range(len(names)):\n names[i] = discord.utils.find(lambda m: m.display_name == names[i], server.members)\n if names[i] is None:\n names[i] = discord.utils.find(lambda m: m.name == names[i], server.members)\n name = None\n else:\n namea = name[:]\n name = discord.utils.find(lambda m: m.display_name == name, server.members)\n if name is None:\n name = discord.utils.find(lambda m: m.name == name, server.members)\n if name is None:\n await self.bot.say(\"{} was not found, please check the spelling and also make \"\n \"sure that the member name being entered is a member in your Discord and \"\n \"that its the same as their Discord name / nickname.\".format(namea))\n return\n if server.id not in self.db:\n self.db[server.id] = {}\n if not name:\n counter = -1\n for x in names:\n counter += 1\n if x is None:\n await self.bot.say(\"{} was not found, please check the spelling and also make \"\n \"sure that the member name being entered is a member in your Discord and \"\n \"that its the same as their Discord name / nickname.\".format(namesp[counter]))\n await asyncio.sleep(1)\n continue\n elif x.id not in self.db[server.id]:\n await self.bot.say(\"{} is not in the list, please make sure they have been added first to \"\n \"the list.\".format(x.display_name))\n elif x.id in self.db[server.id]:\n del self.db[server.id][x.id]\n self.save_db()\n await self.bot.say(\"{} has been removed from the list.\".format(x.display_name))\n await asyncio.sleep(1)\n else:\n if name.id not in self.db[server.id]:\n await self.bot.say(\"{} is not in the list, please make sure they have been added first to \"\n \"the list.\".format(name.display_name))\n return\n elif name.id in self.db[server.id]:\n del self.db[server.id][name.id]\n self.save_db()\n await self.bot.say(\"{} has been deleted from the list.\".format(name.display_name))", "def test_remove_self_from_team_success(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_teams=[team.uid])\n user.put()\n team.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_teams': []},\n headers=self.login_headers(user),\n )\n\n # User is removed from team.\n self.assertEqual(json.loads(response.body)['owned_teams'], [])", "def testOwnershipAfterEdit(self):\n self.simulateATGUIInteraction(task='edit')\n self.failUnlessEqual(self.person.getOwnerTuple()[1], 'abc123')", "def test_stream_stale_follows(self):\n self.user2.delete()\n self.assertNotIn('Two', str(user_stream(self.user1)))", "def test_remove_coach_specific_for_coach_pt2(self):\n self.assertFalse(self.coach1.has_perm(self.AUTH_REMOVE_COACH, self.classrooms[1]))", "def _check_owner(user, study):\n if not user.id == study.owner:\n raise HTTPError(403, \"User %s does not own study %d\" %\n (user.id, study.id))", "def test_captain_removes_teammate_success(self):\n team = Team.create(name='foo', program_id=self.program.uid)\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_teams=[team.uid])\n captain = User.create(name='captain', email='[email protected]',\n user_type='user', owned_teams=[team.uid])\n team.captain_id = captain.uid\n user.put()\n captain.put()\n team.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_teams': []},\n headers=self.login_headers(captain),\n )\n\n # User is removed from team.\n self.assertEqual(json.loads(response.body)['owned_teams'], [])", "def _testAssistantOwnershipAfter(self, person=None, task='create'):\n if not person:\n person = self.person\n \n newperson = self.getPerson(id='def456', firstName=\"Test\", lastName=\"Assistant\")\n person.setAssistants([newperson.UID(),])\n self.simulateATGUIInteraction(person=person, task=task)\n owners = person.users_with_local_role('Owner')\n \n return 'def456' in owners", "def is_bot_owner(ctx: commands.Context) -> bool:\n return ctx.author.id == int(open(\"data/metadata/owner.id.txt\", \"r\").read())", "def is_owner(self):\n return self._is_owner", "def unfriend(self, remove):\n remover_friends_list = self # person terminating the friendship \n \n # remove friend from remover friend list\n remover_friends_list.remove_friend(removee)\n\n #remove friend from removee friend list\n friends_list = FriendList.objects.get(user=removee)\n friend_list.remove_friend(self.user)", "def cross_check(context, authors, poscom):\n displaynames = [x['author']['displayname'] for x in poscom]\n\n for author in authors:\n if author.user.username not in displaynames:\n context.assertFalse(True, \"%s not in list\" %author.user.username)", "async def owner(c, m):\n if not m.id in ids:\n await c.send('You must be an owner to use this command.')\n raise Exception()\n return True", "def test_channel_addowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def untether(self) -> None:\n if self.msg.sender != self.owner:\n revert(f'Only the owner can call the untether method.')\n pass", "def test_func(self):\n member_to_finish = self.get_object()\n return self.request.user.rfid == member_to_finish.rfid", "def _unfiled_box():\n return db.box((db.box.name == 'Unfiled') & (db.box.owner == auth.user.id))", "def remove_partner(self, other_person,s):\n self.number_of_partners -= 1\n self.current_partners.remove(other_person.identifier)\n \n if self.number_of_partners == 0:\n #no partners left -> single\n s.number_of_singles += 1\n s.singles.add(self.identifier)", "def __updater_get_new_ownership(self):\n if self._transfer_cmp_event.is_set() and not self.put_queue_flag:\n self.logger.info(\"Received transfer/accept request event in updater\")\n for comp_tuple in self._updater_map.keys():\n if int(comp_tuple[0]) not in self.msg.get_ownershipList():\n del self._updater_map[comp_tuple]\n self.msg.put_into_Queue()\n self.put_queue_flag = True\n elif not self._transfer_cmp_event.is_set():\n self.put_queue_flag = False", "def is_loan_owner(self, farmer_id, loan_id):\n owner = self.get_loan_owner(loan_id)\n if (\"farmer_id\" in owner.keys()) and owner[\"farmer_id\"] == farmer_id:\n return True\n return False", "def _remove_player(self, player, player_party, other_party):\n\n party = vars(self)[player_party][:]\n party.remove(player)\n vars(self)[player_party].remove(player)\n for other in vars(self)[other_party]:\n if player in other.prefs:\n other.forget(player)", "def prune_losers(self):\n self.log.debug(\"PRUNE LOSERS\")\n # check to see if people i followed follow me back\n cutoff_time = (datetime.now()\n - timedelta(hours=self.reciprocation_window))\n ingrates = Target.objects.filter(\n hunter=self.user, status=Target.PURGATORY,\n modified__lt=cutoff_time) # They didn't follow back in time\n\n for ingrate in ingrates:\n ingrate.status = Target.INGRATE\n ingrate.save()\n self.log.debug(\" => Unfollowed %s\" % ingrate.hunted.screen_name)\n try:\n self.api.destroy_friendship(ingrate.hunted)\n except Exception, e:\n print e\n return\n finally:\n pass\n #self.contact(ingrate)", "def test_remove_coach_specific_for_coach_pt1(self):\n self.assertTrue(self.coach2.has_perm(self.AUTH_REMOVE_COACH, self.classrooms[1]))", "def remove_owner(urn: str, owner_urn: str) -> None:\n\n if not urn.startswith(\"urn:li:dataProduct:\"):\n urn = f\"urn:li:dataProduct:{urn}\"\n dataproduct_patcher: DataProductPatchBuilder = DataProduct.get_patch_builder(urn)\n dataproduct_patcher.remove_owner(owner=_get_owner_urn(owner_urn))\n with get_default_graph() as graph:\n _abort_if_non_existent_urn(graph, urn, \"remove owners\")\n for mcp in dataproduct_patcher.build():\n print(json.dumps(mcp.to_obj()))\n graph.emit(mcp)", "def check_owner(data=None, **kw):\n if data and 'owner_id' in data and not data['owner_id'] == current_user.id:\n raise ProcessingException(description=\"No write privileges\",\n code=401)", "def demote(name):\r\n try:\r\n if name in man:\r\n man.remove(name)\r\n off.append(name)\r\n off.sort()\r\n else:\r\n print(f\"{name} cannot be demoted from a manager as they are not in the personnel list\")\r\n\r\n except TypeError:\r\n print(\"Error: Call with strings only\")", "def test_01_self_unshare_resource(self):\n holes = self.holes\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_resource_with_user(holes, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in holes.raccess.edit_users)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))\n dog.uaccess.unshare_resource_with_user(holes, dog)\n self.assertFalse(dog in holes.raccess.edit_users)\n self.assertFalse(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [], dog.uaccess.get_resource_unshare_users(holes)))", "def test_channel_removeowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channels_create(register_third_result['token'], 'Random Channel 2', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n auth_logout(register_second_result['token'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def run(self):\n # Determine if this filter doesn't apply.\n if (self.owner == None \\\n or (self.sense and self.user != self.owner) \\\n or ((not self.sense) and self.user == self.owner)):\n return 0\n\n # Perform the child actions.\n self.context.tokens['Owner'] = self.owner\n return super(FilterLockOwner, self).run()", "def fire(name):\r\n try:\r\n if name in man:\r\n man.remove(name)\r\n else:\r\n print(f\"Error: {name} not found in personnel list\")\r\n\r\n except TypeError:\r\n print(\"Error: Call with strings only\")", "async def remove(ctx, pkmn_id: int):\n res = database.remove_from_party(ctx.message.author, pkmn_id)\n if not res:\n ctx.send(\"**Oak**: Make sure you actually have that pokemon or if your party is not full ya scrub.\")\n return await show_party(ctx.message.author)", "def test_remove_from_organization_forbidden(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n user = User.create(name='Admin', email='[email protected]', user_type='user',\n owned_organizations=['Organization_foo'])\n req = User.create(name='Invalid Requestor', email='[email protected]',\n user_type='user')\n user.put()\n req.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_organizations': []},\n headers=self.login_headers(req),\n status=403,\n )\n\n # Not changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_organizations,\n fetched_user.owned_organizations)", "def has_remove_permissions(self, obj):\n return True", "def remove_user(self):\n self.currentuser = None\n self.carlocked = False", "def test_channel_removeowner_invalid_user_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], \"[email protected]\")", "def check_delete_permission(self):\n if getSecurityManager().checkPermission(\"Delete objects\", self):\n username = getSecurityManager().getUser().getUserName()\n if username == self.getOwner().getId():\n return True\n return False", "def test_kyc_delete_legal_board_member(self):\n pass", "def remove_referrer(self, service_id):\n referrers = [r for r in self.referrers if r['service_id'] != service_id]\n if len(referrers) == len(self.referrers):\n logger.warning(\"%s did not have a reservation for %s\", service_id, self)\n return False\n else:\n self.referrers = referrers\n logger.warning(\"%s removed from referrers of %s\", service_id, self)\n # TODO: set atime, but make volume atime and instance atime both nanoseconds\n return True", "def remove_patient(self, patient_name):\n for i in range(len(self)):\n selected_patient = self._patient_list[i]\n if patient_name == selected_patient.first_name + \" \" + selected_patient.last_name:\n self._patient_list.pop(i)\n self.calculate_avg_cholesterol()\n return True\n return False", "def is_channel_owner():\n\n async def check(ctx):\n if ctx.guild:\n owner = ctx.author == ctx.guild.owner\n if not owner:\n await ctx.send(\"I guess you are not this server's pogchamp. Bruh.\")\n return owner\n return True\n\n return commands.check(check)", "def verify_user_existance(self, user):\n for client in self.clients:\n if user == client.get_name():\n return True\n return False", "def unfriend(self, removee):\n remover_friends_list = self # person terminating the friendship\n # Remove friend from remover friend list\n remover_friends_list.remove_friend(removee)\n # Remove friend from removee's friend list\n friends_list = FriendList.objects.get(user=removee)\n friends_list.remove_friend(self.user)", "def user_deletable(self):\n source_module_id = getattr(self, 'source_module_id', False)\n if not source_module_id:\n return True\n\n root_module_id = getattr(self, 'root_module_id', False)\n if not root_module_id:\n return True\n\n app = self.get_app()\n parent_module = app.get_module_by_unique_id(root_module_id)\n\n if parent_module.module_type == 'shadow':\n return False\n\n return True", "async def done(self, ctx, member: discord.Member):\r\n if ctx.guild.id == 445092370006933505:\r\n data = self.config.guild(ctx.guild)\r\n lst = await data.get_raw('neededlist')\r\n coach = await data.coachid()\r\n coach_role = ctx.guild.get_role(coach)\r\n x = ctx.author.top_role\r\n if x >= coach_role:\r\n if member.id in lst:\r\n lst.remove(member.id)\r\n await self.config.guild(ctx.guild).neededlist.set(lst)\r\n await self.config.member(member).clear()\r\n await ctx.send(\"Removed member from pending list\")\r\n\r\n else:\r\n await ctx.send(\"Member not in the pending list\")\r\n\r\n else:\r\n await ctx.send(\"You are not allowed to do that\")\r\n\r\n else:\r\n await ctx.send(\"This command only works in the Legend eSports server, join us at: https://discord.gg/GGuCXDn\")", "def test_that_contractor_is_in_recipients(self):\n\n self.trs.recipient.users.clear()\n self.trs.recipient.save()\n res = self.client.get(self.url)\n self.assertEqual(res.status_code, 404)", "def isOwner(id, userId):\n db = core.connect()\n return db[id][\"createdBy\"] == userId", "def test_remove_last_from_organization_forbidden(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_organizations=[org.uid])\n user.put()\n\n self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_organizations': []},\n headers=self.login_headers(user),\n )\n\n # not changed in the db\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_organizations,\n fetched_user.owned_organizations)", "def testOwnershipAfterCreate(self):\n self.simulateATGUIInteraction(task='create')\n self.failUnlessEqual(self.person.getOwnerTuple()[1], 'abc123')", "def fire(name):\r\n try:\r\n if name in off:\r\n off.remove(name)\r\n else:\r\n print(f\"Error: {name} not found in personnel list\")\r\n\r\n except TypeError:\r\n print(\"Error: Call with strings only\")", "async def on_member_remove(member):\r\n pass", "def test_handle_remove_not_in_team(self):\n test_user = User(\"userid\")\n test_user.permissions_level = Permissions.admin\n team = Team(\"BRS\", \"brs\", \"web\")\n team.github_team_id = \"githubid\"\n other_user = User(\"anotheruser\")\n other_user.github_id = \"githubID\"\n other_user.github_username = \"myuser\"\n self.db.retrieve.side_effect = [test_user, other_user]\n self.db.query.return_value = [team]\n self.gh.has_team_member.return_value = False\n with self.app.app_context():\n self.assertTupleEqual(self.testcommand.handle(\"team remove\"\n \" brs ID\", user),\n (\"User not in team!\", 200))\n self.gh.has_team_member.assert_called_once_with(\"myuser\", \"githubid\")\n self.db.store.assert_not_called()\n self.gh.remove_team_member.assert_not_called()", "def authorizes(self, user):\n return self.owner == user or self.workers.filter(pk=user.id).exists()", "def _remove(updated_pending_requests):\n remove_member_from_pending_query = Query.room_request(roomname, \"\", updated_pending_requests)\n self.db.execute_query(remove_member_from_pending_query)", "def test_remove_friends_symmetrical(self):\n u = AppUser(id = 1)\n u.django_user = User.objects.create(username='Testuser')\n u.save()\n f = AppUser(id = 2)\n f.django_user = User.objects.create(username='Testuser2')\n f.save()\n\n u.friends.add(f)\n f.friends.remove(u)\n self.assertIs(u in f.friends.all(), False)\n self.assertIs(f in u.friends.all(), False)", "def drop_off_task(obs):\n gripper_obs = obs[0][0][2:5]\n object_obs = torch.cat((obs[0][0][5:7], torch.tensor([1.0])))\n if (sum(gripper_obs == object_obs) == 3).item():\n print(f'Dropping the object off now')\n return True\n else:\n print(f'Picking up the object!')\n return False", "def _handleBusOwnerChanged(self, new_owner):\n if new_owner == '':\n logger.warn('No owner anymore for bus name ' + RemoteDhcpClientControl.DBUS_NAME)\n raise Exception('LostDhcpSlave')\n else:\n pass # Owner exists", "def remove_person_from_the_station(self, station: TelegramController.Station):\n\n if station.line_number in self.__stations_dict and station.station_number in self.__stations_dict[\n station.line_number]:\n if self.__stations_dict[station.line_number][station.station_number] == 1:\n del self.__stations_dict[station.line_number][station.station_number]\n if len(self.__stations_dict[station.line_number]) == 0:\n del self.__stations_dict[station.line_number]\n elif self.__stations_dict[station.line_number][station.station_number] > 1:\n self.__stations_dict[station.line_number][station.station_number] -= 1\n self.__message_sender.send_line(station.line_number, update_passengers=True)\n else:\n print(\"whoops an error, looks like the current station doesn't exit and there's no person waiting for it.\")", "def complete(self):\n return (self.memberDevices <= len(self.members)) or not self.exists", "def test_util_has_perm_or_owns_sanity(self):\n me = User.objects.get(pk=118533)\n my_t = Thread.objects.filter(creator=me)[0]\n other_t = Thread.objects.exclude(creator=me)[0]\n perm = 'forums_forum.thread_edit_forum'\n allowed = access.has_perm_or_owns(me, perm, my_t, self.forum_1)\n eq_(allowed, True)\n allowed = access.has_perm_or_owns(me, perm, other_t, self.forum_1)\n eq_(allowed, False)", "def test_delete_non_owner(self):\n another_user = CustomUser.objects.create(id=134, email='[email protected]', is_active=True)\n another_user.set_password('qwerty12345')\n another_user.save()\n\n self.client.login(email='[email protected]', password='qwerty12345')\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': 87876})\n\n response = self.client.delete(url)\n\n self.assertEqual(response.status_code, 403)", "def target_will_be_deleted(cls, ctx, user, target):\n # For every rover feature that was enabled/used by this target being deleted (as passed via\n # the target metadata) and that is listed in any rover_features field in the capability definitions,\n # decrement its uses count to both free up any free uses and keep an accurate count of the number of uses.\n for rover_feature in (f for f in target.metadata.iterkeys() if f in capability_module.all_rover_features()):\n if target.rover.can_reuse_feature(rover_feature):\n target.rover.reuse_feature(rover_feature)\n else:\n logger.warn(\"No available capabilities when trying to reuse rover feature [%s][%s]\",\n rover_feature, target.user.capabilities)", "def test_remove_followers(self):\n pass" ]
[ "0.6447291", "0.6402777", "0.637274", "0.61781394", "0.6064277", "0.59293276", "0.58794194", "0.58277196", "0.58173835", "0.5780744", "0.5778175", "0.5695273", "0.5692215", "0.56889635", "0.56681126", "0.5662332", "0.5635315", "0.56060123", "0.5600414", "0.5581912", "0.5572824", "0.5558181", "0.555741", "0.5554508", "0.5551208", "0.5549676", "0.5538474", "0.5528646", "0.55206037", "0.5484841", "0.547482", "0.5462268", "0.5457631", "0.54571044", "0.54478526", "0.5444867", "0.54446566", "0.54420394", "0.54409444", "0.5435328", "0.5426527", "0.54172057", "0.54168695", "0.54104245", "0.5388187", "0.5385031", "0.5379166", "0.5371102", "0.53708696", "0.5365126", "0.5362628", "0.53529793", "0.5352597", "0.5349752", "0.533929", "0.5334408", "0.5334376", "0.5333468", "0.53105766", "0.5279138", "0.5276762", "0.5272932", "0.5271816", "0.527084", "0.5269935", "0.5252177", "0.5251743", "0.52512574", "0.5233443", "0.5228801", "0.5221088", "0.5220006", "0.52154654", "0.52076685", "0.52065694", "0.5205373", "0.51984423", "0.5198417", "0.5192645", "0.5190721", "0.5184148", "0.5182349", "0.5180277", "0.5175932", "0.51724935", "0.51640785", "0.51530975", "0.515198", "0.5150458", "0.51467925", "0.5142", "0.5138596", "0.51373386", "0.5137216", "0.51276106", "0.5125328", "0.5121738", "0.51174307", "0.5114009", "0.5112847" ]
0.5255025
65
Needs a ATOM.atom instance as argument. Returns the names of the framework atoms bound to that atom.
def get_framework_neighbours(atom, useH=True): neighbourlist = [] for atom2 in atom.partner[:5]: #if not 'H(' in atom2.name and np.linalg.norm(atom.cart-atom2.cart)<=1.6: if np.linalg.norm(atom.cart - atom2.cart) <= float(covalence_radius[atom.element]) + float( covalence_radius[atom2.element]) + .1: if not 'H' == atom2.element or useH: neighbourlist.append(atom2) return neighbourlist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_atom_labels(self, full=False):\n import numpy\n\n labels = self.get_attr(\"atom_labels\")\n if full:\n return labels\n return numpy.array(labels)[self._get_equivalent_atom_list()].tolist()", "def getAtomNames(self):\n return self._raw_data['ATOM_NAME']", "def bond_atoms(atom_list):\n pass", "def all_registered_appnames():\n yield from sorted(Registry.monomers.keys())", "def get_pyxb_namespaces():\n return pyxb.namespace.utility.AvailableNamespaces()", "def atomList(self):\n\n\t\tal = []\t\n\t\tfor chain in self.chain:\n\t\t\tfor res in chain.residue:\n\t\t\t\tfor atom in res.atom:\n\t\t\t\t\tal.append(atom)\n\n\t\treturn al", "def defined_names(self, tree=False):\n if not tree:\n return list(self.bindings.keys())\n else:\n return list(self.bindings.keys()) + (list(self.parent.defined_names(tree=True)) if self.parent else [])", "def xontrib_installed(ns=None):\n installed_xontribs = set()\n xontrib_locations = importlib.util.find_spec(\"xontrib2\").submodule_search_locations\n names = None if not ns or len(ns.names) == 0 else set(ns.names)\n if xontrib_locations:\n for xl in xontrib_locations:\n for x in Path(xl).glob(\"*\"):\n name = x.name.split(\".\")[0]\n if name[0] == \"_\" or (names and name not in names):\n continue\n installed_xontribs.add(name)\n return installed_xontribs", "def getNames(self, resname, atomname):\n rname = None\n aname = None\n if resname in self.map:\n res = self.map[resname]\n if res.hasAtom(atomname):\n atom = res.atoms[atomname]\n aname = atom.name\n rname = atom.resname\n return rname, aname", "def installed_appnames():\n appnames = set()\n for finder in sys.meta_path:\n if hasattr(finder, 'appname'):\n appnames.add(finder.appname)\n return appnames", "def find_ntyp(atoms):\n symbs = atoms.get_chemical_symbols()\n unique_symbols = []\n for s in symbs:\n if not (s in unique_symbols):\n unique_symbols.append(s)\n #\n return len(unique_symbols)", "def atoms(self):\n return self._atoms", "def getSBMLExtensionNamespaces(self, *args):\n return _libsbml.QualExtension_getSBMLExtensionNamespaces(self, *args)", "def get_bonded_atom(self, name_list):\n current_atom = self\n for name in name_list:\n moveto_atom = None\n for atom in current_atom.iter_bonded_atoms():\n if atom.name == name:\n moveto_atom = atom\n break\n if moveto_atom is None:\n return None\n current_atom = moveto_atom\n return current_atom", "def names(self):\n if not self.extensions:\n self.discover()\n\n names = list(self.builtins.keys())\n names += self.extensions.keys()\n\n return sorted(names)", "def fqns(self):\n return [fqn for fqn in self.runinfos]", "def getAtomIndices( structure, resname ):\n atom_indices_ligand = []\n topology = structure.topology\n for atom in topology.atoms():\n if str(resname) in atom.residue.name:\n atom_indices_ligand.append(atom.index)\n\n return atom_indices_ligand", "def get_input_names(self):\n inputNames = []\n for inVar in self.inputs:\n # inVar is of type InOutVar and the object that it contains is a PyFMI variable\n inputNames.append(inVar.get_object().name)\n return inputNames", "def _get_bindings_list_yang_name(self, bindings_list=None):\n\n yang_name_list = []\n\n for bindings_tuple in bindings_list:\n if self._module_name == bindings_tuple[2]:\n yang_name_list.append(bindings_tuple[0].split('.')[-1].replace('_', '-'))\n \n return yang_name_list", "def getSBMLExtensionNamespaces(self, *args):\n return _libsbml.FbcExtension_getSBMLExtensionNamespaces(self, *args)", "def getAtoms(self):\n return self.atoms", "def getSBMLExtensionNamespaces(self, *args):\n return _libsbml.CompExtension_getSBMLExtensionNamespaces(self, *args)", "def _get_atoms(self):\n atoms = []\n invarioms = []\n\n for molecule in self.values():\n atoms += [atom for atom in molecule.atoms]\n invarioms += [atom for atom in molecule.atoms if atom.invariom_name is not None]\n self.atoms = atoms\n self.invarioms = invarioms", "def names() -> Tuple[str, ...]:\n return plugins.list_all(package_name=__name__)", "def getSBMLExtensionNamespaces(self, *args):\n return _libsbml.LayoutExtension_getSBMLExtensionNamespaces(self, *args)", "def getBindedNames(self):\n names = []\n for function in self.functions:\n names.append(function.__name__)\n return \", \".join(names)", "def getModuleNames():\n import setup\n names = [e.name[1:] for e in setup.wxpExtensions]\n return names", "def lookup(obj):\n return list(dir(obj))", "def lookup(obj):\n return list(dir(obj))", "def getAtomTypes(self):\n return self._raw_data['AMBER_ATOM_TYPE']", "def get_allref(self):\n return self.__applicationList.keys()", "def get_info(atom):\n return [atom.GetIdx(), atom.GetNeighbors()[0].GetIdx()]", "def get_all():\n temp = inspect.getmembers(sys.modules[__name__], inspect.isclass)\n return [i[1] for i in temp if i[0] != \"Aggregator\"]", "def filter_carbon_atoms(atom_list, rings):\n list_3 = []\n list_2 = []\n list_2n = []\n for atom in atom_list:\n if (check_connected(atom, identify_bonds(atom, atom_list)) == False):\n if (len(identify_bonds(atom, atom_list)) == 3):\n list_3.append(atom)\n elif (len(identify_bonds(atom, atom_list)) == 2):\n list_2.append(atom)\n for neighbour in identify_bonds(atom, atom_list):\n if (len(identify_bonds(neighbour[0], atom_list)) == 2):\n for ring in rings:\n if( (atom in ring) and (neighbour[0] in ring)):\n list_2n.append(atom) \n return list_3, list_2, list_2n", "def list_processor_names():\n return [ep.name for ep in pkg_resources.iter_entry_points(ENTRY_POINT_NAME)]", "def namelist(self):\n return self._handle.namelist()", "def namelist(self):\n return self._handle.namelist()", "def models_with_all_atoms(formula, atoms):\n if formula == True:\n return [model for model in tablize(atoms)]\n\n original_models = [model for model in satisfiable(formula, all_models=True)]\n extra_atoms = atoms - formula.atoms()\n\n if not extra_atoms:\n return original_models\n else:\n models_all_atoms = []\n for model in original_models:\n models_all_atoms += [updated_model for updated_model in tablize(extra_atoms, model)]\n return models_all_atoms", "def namelist(self):\n return self._handle.getnames()", "def namelist(self):\n return self._handle.getnames()", "def iter_atoms(self):\n return iter(self.atom_list)", "def get_all_target_namespaces():\n setup_roots = get_all_setups_roots()\n techanim_ns = [x.split(\":\")[0] for x in setup_roots]\n namespaces = get_all_namespaces()\n filtered_ns = []\n for ns in namespaces:\n if ns in [\"UI\", \"ui\", \"shared\", \"Shared\"] + techanim_ns:\n continue\n filtered_ns.append(ns)\n return filtered_ns", "def getRegisterNames(self):\n pass", "def names(self) -> List:\n ...", "def get_atypes(self):\n self.atypes = []\n self.hybs = []\n #self.zs = []\n for ai in self.m.GetAtoms():\n hybi = str( ai.GetHybridization() )\n self.hybs.append( hybi )\n zi = ai.GetAtomicNum()\n #self.zs.append( zi )\n si = ai.GetSymbol()\n if hybi == 'SP2':\n ar = ai.GetIsAromatic()\n ar_suffix = '_R' if ar else '_2'\n ap = si + ar_suffix # atomic_pattern\n elif hybi == 'SP3':\n if zi == 16 and ai.GetExplicitValence() == 6:\n ap = si + 'o3'\n elif zi in [9, 17, 35, 53]:\n ap = si\n else:\n ap = si + '_3'\n elif hybi == 'SP':\n ap = si + '_1'\n elif hybi in ['S', ]: #'UNSPECIFIED']:\n ap = si\n else:\n print((' unknown atom type: `%s`'%hybi))\n raise\n self.atypes.append( ap )", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def getSBMLExtensionNamespaces(self, *args):\n return _libsbml.SBMLExtension_getSBMLExtensionNamespaces(self, *args)", "def _resolve_atomtypes(topology, typemap):\n atoms = list(topology.atoms())\n for atom_id, atom in typemap.items():\n atomtype = [rule_name for rule_name in \n atom['whitelist'] - atom['blacklist']]\n if len(atomtype) == 1:\n atom['atomtype'] = atomtype[0]\n elif len(atomtype) > 1:\n raise FoyerError(\"Found multiple types for atom {} ({}): {}.\".format(\n atom_id, atoms[atom_id].element.name, atomtype))\n else:\n raise FoyerError(\"Found no types for atom {} ({}).\".format(\n atom_id, atoms[atom_id].element.name))", "def getSBMLExtensionNamespaces(self, *args):\n return _libsbml.MultiExtension_getSBMLExtensionNamespaces(self, *args)", "def names(self):\n\t\treturn", "def get_input_node_names(self, node_name):\n # (str) -> list\n node = self.get_node(node_name)\n return node.bottoms", "def _get_buffer_names(self, root_module: nn.Module) -> Set[str]:\n\n def module_fn(module: nn.Module, prefix: str, buffer_names: Set[str]):\n # For FSDP modules, only add the entry when considering the\n # contained `FlattenParamsWrapper` to avoid duplication\n if not isinstance(module, FullyShardedDataParallel):\n for buffer_name, _ in module.named_buffers(recurse=False):\n # Clean module wrapper prefixes in case of nested wrapping\n prefixed_buffer_name = clean_tensor_name(prefix + buffer_name)\n buffer_names.add(prefixed_buffer_name)\n\n def return_fn(buffer_names: Set[str], *args):\n return buffer_names\n\n buffer_names: Set[str] = set()\n return _apply_to_modules(\n root_module,\n module_fn,\n return_fn,\n buffer_names,\n )", "def names(self) -> list[str]:", "def names(self):\n return list(item.name for item in self.mechanisms)", "def lookup(obj):\n a = list(dir(obj))\n return a", "def find_atomtypes(topology, forcefield, max_iter=10):\n typemap = {atom.index: {'whitelist': set(), 'blacklist': set(), \n 'atomtype': None} for atom in topology.atoms()}\n\n rules = _load_rules(forcefield, typemap)\n\n # Only consider rules for elements found in topology\n subrules = dict()\n system_elements = {a.element.symbol for a in topology.atoms()}\n for key,val in rules.items():\n atom = val.nodes[0]['atom']\n if len(list(atom.find_data('atom_symbol'))) == 1 and \\\n not list(atom.find_data('not_expression')):\n try:\n element = next(atom.find_data('atom_symbol')).children[0]\n except IndexError:\n try:\n atomic_num = next(atom.find_data('atomic_num')).children[0]\n element = pt.Element[int(atomic_num)]\n except IndexError:\n element = None\n else:\n element = None\n if element is None or element in system_elements:\n subrules[key] = val\n rules = subrules\n\n _iterate_rules(rules, topology, typemap, max_iter=max_iter)\n _resolve_atomtypes(topology, typemap)\n\n return typemap", "def getBuilderNames():", "def listBuilderNames():", "def listBuilderNames():", "def xontrib_context(name):\n spec = find_xontrib(name)\n if spec is None:\n return None\n m = importlib.import_module(spec.name)\n pubnames = getattr(m, \"__all__\", None)\n if pubnames is not None:\n ctx = {k: getattr(m, k) for k in pubnames}\n else:\n ctx = {k: getattr(m, k) for k in dir(m) if not k.startswith(\"_\")}\n return ctx", "def find_xontrib(name):\n if name.startswith(\".\"):\n spec = importlib.util.find_spec(name, package=\"xontrib2\")\n else:\n spec = importlib.util.find_spec(\".\" + name, package=\"xontrib2\")\n return spec or importlib.util.find_spec(name)", "def get_all_windows(self):\n success, result = self.manager.c.eval(\n textwrap.dedent(\n \"\"\"\n [win.wid for win in self.core.mapped_windows]\n \"\"\"\n )\n )\n assert success\n return eval(result)", "def __getitem__(self, atom_name):\n return self.atoms_by_name[atom_name]", "def get_names(dep):\n res = [dep.name]\n return res", "def get_all_setups_roots():\n ta_roots = cmds.ls(\"*.{}\".format(CONFIG[\"config_attr\"]), r=True, o=True)\n return ta_roots", "def get_interactions(list_atoms1, list_atoms2, dist):\n beta_carbons1 = list(filter(lambda x: x.get_id() == \"CB\", list_atoms1))\n beta_carbons2 = list(filter(lambda x: x.get_id() == \"CB\", list_atoms2))\n ns = NeighborSearch(beta_carbons1)\n interactions = []\n\n for atom in beta_carbons2:\n interact = ns.search(atom.get_coord(), dist)\n interactions.extend(\n [tuple(sorted([str(atom.get_parent().resname), str(x.get_parent().resname)])) for x in interact])\n return interactions", "def get_atom_info(self):\n return", "def getNumAtoms(self):\n return int(self._getPointerValue('NATOM'))", "def get_afferents_names(self):\n\t\treturn self._afferentsNames", "def find_app(app, symbol_by_name=..., imp=...):\n ...", "def atoms(self):\n return set(self.array_form)", "def app_names(self):\n return self.get_app_names()", "def get_framework_neighbors(atom, useH=True):\n neighborlist = []\n for atom2 in atom.partner[:5]:\n if np.linalg.norm(atom.cart - atom2.cart) <= float(covalence_radius[atom.element]) + float(\n covalence_radius[atom2.element]) + .1:\n if not 'H' == atom2.element or useH:\n neighborlist.append(atom2)\n return neighborlist", "def atomList(joints):\n assert len(joints) > 0\n first = joints[0]\n functorList = first[1][:-2] # Second element of row, last two elements of that are joint prob and log prob\n atomList = []\n for (node,_) in functorList:\n atomList.append(node.functor+\"(\"+\",\".join(node.varList)+\")\")\n return atomList", "def getCmdList():\n return [obj for name, obj in inspect.getmembers(sys.modules[__name__]) \n if inspect.isclass(obj) and issubclass(obj, Cmd)][1:]", "def getExtnNodes(self):\n for name in self._names:\n try:\n mod = __import__(name, fromlist=['open'])\n except ImportError:\n raise ImportError(\"import %s error\" % name)\n self._AllExtnNodes = mod.AllXtens", "def getAtomResonances(atom):\n\n resonances = set()\n atomSet = atom.atomSet\n if atomSet:\n for resonanceSet in atomSet.resonanceSets:\n resonances.update(resonanceSet.resonances)\n return resonances", "def imports():\n for name, val in globals().items():\n if isinstance(val, getattr(types, \"ModuleType\")):\n yield val.__name__", "def get_chebi_synonyms(chebi_ent):\n if hasattr(chebi_ent, 'Synonyms'):\n return [entry.data for entry in chebi_ent.Synonyms]\n else:\n return []", "def get_map_anywhere(atom_list):\n anywhere_map = [atom for atom in atom_list if (check_connected(atom, identify_bonds(atom, atom_list)) == False)]\n return anywhere_map", "def _list_all(root_pkg, prog):\n res = \"\\n\".join(\n sorted(\n pkinspect.package_module_names(_import(root_pkg)),\n key=str.lower,\n ),\n )\n sys.stderr.write(f\"usage: {prog} module command [args...]\\nModules:\\n{res}\\n\")\n return 1", "def names():\n pass", "def names(self):\n\n allnames = []\n for term in self.terms:\n allnames += term.names()\n return allnames", "def names(self):\n\n allnames = []\n for term in self.terms:\n allnames += term.names()\n return allnames", "def get_atom_features(self, atom):\n period = [0,1,1,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3\n ,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4\n ,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5\n ,6,6,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6\n ,7,7,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]\n\n group = [0,1,18,1,2,13,14,15,16,17,18,1,2,13,14,15,16,17,18\n ,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18\n ,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18\n ,1,2,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18\n ,1,2,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18]\n atomic_num = atom.number\n return np.array([period[atomic_num],group[atomic_num]])", "def list_manifest_labels(tag_manifest, prefix_filter=None):\n query = (\n Label.select(Label, MediaType)\n .join(MediaType)\n .switch(Label)\n .join(LabelSourceType)\n .switch(Label)\n .join(TagManifestLabel)\n .where(TagManifestLabel.annotated == tag_manifest)\n )\n\n if prefix_filter is not None:\n query = query.where(prefix_search(Label.key, prefix_filter))\n\n return query", "def getChemCompSysNames(self):\n dataDict = self.__dict__\n result = frozenset(y for x in self.chemComp.namingSystems for y in x.chemCompSysNames if not y.specificChemCompVars).union(self.specificSysNames)\n return result", "def atoms(self, resnum, chain_id, icode=' ', alt=' ', model_num = 0):\n return [atm for atm in self.residue(resnum, chain_id, icode, alt, model_num)]", "def list_class_names(clz, package):\n\n def isclz(obj):\n if inspect.isclass(obj):\n return issubclass(obj, clz) and not obj == clz\n return False\n\n module = importlib.import_module(package)\n\n return [name for name, _ in inspect.getmembers(module, isclz)]", "def activemodes(self):\n\t\tret_active = []\n\t\tfor key,val in self.ms_all.iteritems():\n\t\t\tret_active.extend( val.active() )\n\t\treturn ret_active", "def get_all_namespaces():\n cmds.namespace(setNamespace=':')\n return cmds.namespaceInfo(listOnlyNamespaces=True, recurse=True)", "def items(self):\n return self.namespace_to_alias.items()" ]
[ "0.5786468", "0.5760434", "0.5465059", "0.54395527", "0.5285232", "0.52612984", "0.52286106", "0.5170458", "0.5095022", "0.5048037", "0.5035679", "0.5034012", "0.49822837", "0.49687123", "0.4942803", "0.49194297", "0.49104977", "0.49047342", "0.4869437", "0.48587328", "0.4852082", "0.4840675", "0.4829039", "0.4822362", "0.4811568", "0.4810619", "0.4808622", "0.4799978", "0.4799978", "0.4795981", "0.4736011", "0.47321567", "0.47297364", "0.47159797", "0.47068104", "0.47023106", "0.47023106", "0.47001618", "0.4699204", "0.4699204", "0.46985132", "0.46883008", "0.4683279", "0.46767035", "0.46759668", "0.4660091", "0.4660091", "0.4660091", "0.4660091", "0.4660091", "0.4660091", "0.4660091", "0.4660091", "0.4660091", "0.46587837", "0.46443975", "0.46411455", "0.46392143", "0.4631139", "0.46290624", "0.4622316", "0.46176785", "0.46157855", "0.4614827", "0.46086472", "0.46072483", "0.46072483", "0.4603554", "0.4601164", "0.45996723", "0.45984766", "0.45894808", "0.45860296", "0.45855227", "0.4581334", "0.45790514", "0.45771754", "0.4575187", "0.45743796", "0.45725185", "0.4569602", "0.45647654", "0.45549145", "0.45537135", "0.4548317", "0.45416355", "0.45386574", "0.45328185", "0.45292175", "0.452912", "0.452684", "0.452684", "0.45188373", "0.45180884", "0.45157114", "0.45119992", "0.45082745", "0.45044893", "0.44977933", "0.44952616" ]
0.45080042
97
Returns the rotation matrix equivalent of the given quaternion. This function is used by the get_refined_rotation() function.
def get_rotation_matrix_from_quaternion(q): R = np.matrix([[q[0] * q[0] + q[1] * q[1] - q[2] * q[2] - q[3] * q[3], 2 * (q[1] * q[2] - q[0] * q[3]), 2 * (q[1] * q[3] + q[0] * q[2])], [2 * (q[2] * q[1] + q[0] * q[3]), q[0] * q[0] - q[1] * q[1] + q[2] * q[2] - q[3] * q[3], 2 * (q[2] * q[3] - q[0] * q[1])], [2 * (q[3] * q[1] - q[0] * q[2]), 2 * (q[3] * q[2] + q[0] * q[1]), q[0] * q[0] - q[1] * q[1] - q[2] * q[2] + q[3] * q[3]]]) return R
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def quaternion_to_rotation_matrix(quaternion):\n\n q_w, q_x, q_y, q_z = quaternion\n sqw, sqx, sqy, sqz = np.square(quaternion)\n norm = (sqx + sqy + sqz + sqw)\n rotation_matrix = np.zeros((3, 3))\n\n # division of square length if quaternion is not already normalized\n rotation_matrix[0, 0] = (+sqx - sqy - sqz + sqw) / norm\n rotation_matrix[1, 1] = (-sqx + sqy - sqz + sqw) / norm\n rotation_matrix[2, 2] = (-sqx - sqy + sqz + sqw) / norm\n\n tmp1 = q_x * q_y\n tmp2 = q_z * q_w\n rotation_matrix[1, 0] = 2.0 * (tmp1 + tmp2) / norm\n rotation_matrix[0, 1] = 2.0 * (tmp1 - tmp2) / norm\n\n tmp1 = q_x * q_z\n tmp2 = q_y * q_w\n rotation_matrix[2, 0] = 2.0 * (tmp1 - tmp2) / norm\n rotation_matrix[0, 2] = 2.0 * (tmp1 + tmp2) / norm\n tmp1 = q_y * q_z\n tmp2 = q_x * q_w\n rotation_matrix[2, 1] = 2.0 * (tmp1 + tmp2) / norm\n rotation_matrix[1, 2] = 2.0 * (tmp1 - tmp2) / norm\n return rotation_matrix", "def quaternion_matrix(quaternion):\r\n q = numpy.array(quaternion, dtype=numpy.float64, copy=True)\r\n n = numpy.dot(q, q)\r\n if n < _EPS:\r\n return numpy.identity(4)\r\n q *= math.sqrt(2.0 / n)\r\n q = numpy.outer(q, q)\r\n return numpy.array([\r\n [1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0],\r\n [ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0],\r\n [ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0],\r\n [ 0.0, 0.0, 0.0, 1.0]])", "def quat_to_rotmat(quat):\n norm_quat = quat\n norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True)\n w, x, y, z = norm_quat[:, 0], norm_quat[:, 1], norm_quat[:, 2], norm_quat[:, 3]\n B = quat.size(0)\n w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)\n wx, wy, wz = w * x, w * y, w * z\n xy, xz, yz = x * y, x * z, y * z\n rotMat = torch.stack([w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz, 2 * wz + 2 * xy, w2 - x2 + y2 - z2, 2 * yz - 2 * wx, 2 * xz - 2 * wy, 2 * wx + 2 * yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)\n return rotMat", "def quat_to_rotmat(quat): \n norm_quat = quat\n norm_quat = norm_quat/norm_quat.norm(p=2, dim=1, keepdim=True)\n w, x, y, z = norm_quat[:,0], norm_quat[:,1], norm_quat[:,2], norm_quat[:,3]\n\n B = quat.size(0)\n\n w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)\n wx, wy, wz = w*x, w*y, w*z\n xy, xz, yz = x*y, x*z, y*z\n\n rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,\n 2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,\n 2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)\n return rotMat", "def quaternion_matrix(quaternion):\n q = np.array(quaternion, dtype=np.float64, copy=True)\n n = np.dot(q, q)\n if n < _EPS:\n return np.identity(4)\n q *= math.sqrt(2.0 / n)\n q = np.outer(q, q)\n return np.array([\n [1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0],\n [ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0],\n [ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0],\n [ 0.0, 0.0, 0.0, 1.0]])", "def quat_to_rotmat(quat):\n norm_quat = quat\n norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True)\n w, x, y, z = norm_quat[:, 0], norm_quat[:, 1], norm_quat[:, 2], norm_quat[:, 3]\n\n B = quat.size(0)\n\n w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)\n wx, wy, wz = w * x, w * y, w * z\n xy, xz, yz = x * y, x * z, y * z\n\n rotMat = torch.stack([w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz,\n 2 * wz + 2 * xy, w2 - x2 + y2 - z2, 2 * yz - 2 * wx,\n 2 * xz - 2 * wy, 2 * wx + 2 * yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)\n return rotMat", "def quaternion_to_Rot(q: array):\n\n # Create a vector from the quaternion parameters (and check dimensions)\n q = array(q).reshape(4)\n\n # Normalize the quaternion\n q = divide(q, sqrt(sum(power(q, 2))))\n\n # Auxiliary matrix\n q_hat = zeros((3, 3))\n q_hat[0, 1] = -q[3]\n q_hat[0, 2] = q[2]\n q_hat[1, 2] = -q[1]\n q_hat[1, 0] = q[3]\n q_hat[2, 0] = -q[2]\n q_hat[2, 1] = q[1]\n\n # Return the rotation matrix\n return eye(3) + 2 * dot(q_hat, q_hat) + 2 * dot(q[0], q_hat)", "def rotation_matrix(self):\n self._normalise()\n product_matrix = np.dot(self._q_matrix(), self._q_bar_matrix().conj().transpose())\n return product_matrix[1:][:,1:]", "def quat2mat(quat):\n norm_quat = torch.cat([quat[:, :1].detach() * 0 + 1, quat], dim=1)\n norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True)\n w, x, y, z = norm_quat[:, 0], norm_quat[:, 1], norm_quat[:, 2], norm_quat[:, 3]\n\n B = quat.size(0)\n\n w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)\n wx, wy, wz = w * x, w * y, w * z\n xy, xz, yz = x * y, x * z, y * z\n\n rotMat = torch.stack([w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz,\n 2 * wz + 2 * xy, w2 - x2 + y2 - z2, 2 * yz - 2 * wx,\n 2 * xz - 2 * wy, 2 * wx + 2 * yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)\n return rotMat", "def quat2mat(quat):\r\n norm_quat = torch.cat([quat[:,:1].detach()*0 + 1, quat], dim=1)\r\n norm_quat = norm_quat/norm_quat.norm(p=2, dim=1, keepdim=True)\r\n w, x, y, z = norm_quat[:,0], norm_quat[:,1], norm_quat[:,2], norm_quat[:,3]\r\n\r\n B = quat.size(0)\r\n\r\n w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)\r\n wx, wy, wz = w*x, w*y, w*z\r\n xy, xz, yz = x*y, x*z, y*z\r\n\r\n rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,\r\n 2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,\r\n 2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).reshape(B, 3, 3)\r\n return rotMat", "def quat2mat(quat):\n norm_quat = torch.cat([quat[:,:1].detach()*0 + 1, quat], dim=1)\n norm_quat = norm_quat/norm_quat.norm(p=2, dim=1, keepdim=True)\n w, x, y, z = norm_quat[:,0], norm_quat[:,1], norm_quat[:,2], norm_quat[:,3]\n\n B = quat.size(0)\n\n w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)\n wx, wy, wz = w*x, w*y, w*z\n xy, xz, yz = x*y, x*z, y*z\n\n rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,\n 2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,\n 2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).reshape(B, 3, 3)\n return rotMat", "def quaternion_to_matrix(quaternions):\n r, i, j, k = torch.unbind(quaternions, -1)\n two_s = 2.0 / (quaternions * quaternions).sum(-1)\n\n o = torch.stack(\n (\n 1 - two_s * (j * j + k * k),\n two_s * (i * j - k * r),\n two_s * (i * k + j * r),\n two_s * (i * j + k * r),\n 1 - two_s * (i * i + k * k),\n two_s * (j * k - i * r),\n two_s * (i * k - j * r),\n two_s * (j * k + i * r),\n 1 - two_s * (i * i + j * j),\n ),\n -1,\n )\n return o.reshape(quaternions.shape[:-1] + (3, 3))", "def _rotate_quaternion(self, q):\n self._normalise()\n return self * q * self.conjugate", "def quaternion_to_rotation_matrix(q0, q1, q2, q3) -> np:\n\n # First row of the rotation matrix\n r00 = 2 * (q0 * q0 + q1 * q1) - 1\n r01 = 2 * (q1 * q2 - q0 * q3)\n r02 = 2 * (q1 * q3 + q0 * q2)\n\n # Second row of the rotation matrix\n r10 = 2 * (q1 * q2 + q0 * q3)\n r11 = 2 * (q0 * q0 + q2 * q2) - 1\n r12 = 2 * (q2 * q3 - q0 * q1)\n\n # Third row of the rotation matrix\n r20 = 2 * (q1 * q3 - q0 * q2)\n r21 = 2 * (q2 * q3 + q0 * q1)\n r22 = 2 * (q0 * q0 + q3 * q3) - 1\n\n # 3x3 rotation matrix\n rot_matrix = np.array([[r00, r01, r02],\n [r10, r11, r12],\n [r20, r21, r22]])\n\n return rot_matrix", "def rotmat(p, q):\n rot = numpy.dot(refmat(q, -p), refmat(p, -p))\n return rot", "def rotation_mat_to_quat(R, q):\n q[0] = np.sqrt(R[0] + R[4] + R[8]) / 2\n q[1] = (R[7] - R[5]) / (4. * q[0])\n q[2] = (R[2] - R[6]) / (4. * q[0])\n q[3] = (R[3] - R[1]) / (4. * q[0])", "def rotation_matrix_to_quaternion(rotation_matrix):\n trace = np.trace(rotation_matrix)\n\n if trace > 0:\n S = np.sqrt(trace + 1) * 2\n q_w = 0.25 * S\n q_x = (rotation_matrix[2, 1] - rotation_matrix[1, 2]) / S\n q_y = (rotation_matrix[0, 2] - rotation_matrix[2, 0]) / S\n q_z = (rotation_matrix[1, 0] - rotation_matrix[0, 1]) / S\n return np.asarray([q_w, q_x, q_y, q_z])\n\n elif ((rotation_matrix[0, 0] > rotation_matrix[1, 1]) and\n (rotation_matrix[0, 0] > rotation_matrix[2, 2])):\n\n S = np.sqrt(1.0 + rotation_matrix[0, 0] - rotation_matrix[1, 1] -\n rotation_matrix[2, 2]) * 2\n q_w = (rotation_matrix[2, 1] - rotation_matrix[1, 2]) / S\n q_x = 0.25 * S\n q_y = (rotation_matrix[0, 1] + rotation_matrix[1, 0]) / S\n q_z = (rotation_matrix[0, 2] + rotation_matrix[2, 0]) / S\n\n elif rotation_matrix[1, 1] > rotation_matrix[2, 2]:\n\n S = np.sqrt(1.0 + rotation_matrix[1, 1] - rotation_matrix[0, 0] -\n rotation_matrix[2, 2]) * 2\n q_w = (rotation_matrix[0, 2] - rotation_matrix[2, 0]) / S\n q_x = (rotation_matrix[0, 1] + rotation_matrix[1, 0]) / S\n q_y = 0.25 * S\n q_z = (rotation_matrix[1, 2] + rotation_matrix[2, 1]) / S\n\n else:\n S = np.sqrt(1.0 + rotation_matrix[2, 2] - rotation_matrix[0, 0] -\n rotation_matrix[1, 1]) * 2\n q_w = (rotation_matrix[1, 0] - rotation_matrix[0, 1]) / S\n q_x = (rotation_matrix[0, 2] + rotation_matrix[2, 0]) / S\n q_y = (rotation_matrix[1, 2] + rotation_matrix[2, 1]) / S\n q_z = 0.25 * S\n\n if q_w >= 0:\n return np.asarray([q_w, q_x, q_y, q_z])\n else:\n return -1 * np.asarray([q_w, q_x, q_y, q_z])", "def _cubelet_rotation_matrix(self, cubelet_meta_info, qpos_array):\n euler_angles = qpos_array[cubelet_meta_info[\"euler_qpos\"]]\n return rotation.euler2mat(euler_angles)", "def rotation_matrix_to_quaternion(rotation_matrix: np) -> object:\n\n cosine_for_pitch = math.sqrt(rotation_matrix[0][0] ** 2 + rotation_matrix[1][0] ** 2)\n is_singular = cosine_for_pitch < 10 ** -6\n if not is_singular:\n yaw = math.atan2(rotation_matrix[1][0], rotation_matrix[0][0])\n pitch = math.atan2(-rotation_matrix[2][0], cosine_for_pitch)\n roll = math.atan2(rotation_matrix[2][1], rotation_matrix[2][2])\n else:\n yaw = math.atan2(-rotation_matrix[1][2], rotation_matrix[1][1])\n pitch = math.atan2(-rotation_matrix[2][0], cosine_for_pitch)\n roll = 0\n\n e = (yaw, pitch, roll)\n\n return euler_to_quaternion(e)", "def rotationMatrix(self):\n\n R = Compute3DRotationMatrix(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4],\n self.exteriorOrientationParameters[5])\n\n return R", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = angles[0:3]\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def _get_rotation_matrix(transform):\n # caution: UE4 is using left-hand ortation order\n roll = np.deg2rad(-transform.rotation.roll)\n pitch = np.deg2rad(-transform.rotation.pitch)\n yaw = np.deg2rad(transform.rotation.yaw)\n sr, cr = np.sin(roll), np.cos(roll)\n sp, cp = np.sin(pitch), np.cos(pitch)\n sy, cy = np.sin(yaw), np.cos(yaw)\n rotation_matrix = np.array([[cy * cp, -sy * sr + cy * sp * sr, cy * sp * cr + sy * sr],\n [sy * cp, cy * sp * sr + cy * sr, -cy * sr + sy * sp * cr],\n [-sp, cp * sr, cp * cr]])\n return rotation_matrix", "def quaternion_to_angle(self, q):\n\tx, y, z, w = q.x, q.y, q.z, q.w\n\troll, pitch, yaw = tf.transformations.euler_from_quaternion((x, y, z, w))\n\treturn yaw", "def test_to_rotation(self):\r\n q = np.array([-1, 1, 3, 2])\r\n q = q / np.linalg.norm(q)\r\n R_gt = np.array([\r\n [-1/3., -14/15., -2/15.],\r\n [2/3., -1/3., 2/3.],\r\n [-2/3., 2/15., 11/15.]]).T\r\n R = to_rotation(q)\r\n\r\n zero_matrix = R - R_gt\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)\r\n\r\n for _ in range(20):\r\n q = np.random.randn(4)\r\n q /= np.linalg.norm(q)\r\n q_inv = quaternion_conjugate(q)\r\n\r\n R = to_rotation(q)\r\n R_inv = to_rotation(q_inv)\r\n\r\n zero_matrix = R @ R_inv - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)\r\n\r\n # orthogonal matrix\r\n zero_matrix = R @ R.T - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)", "def quat2mat(self,quat):\n\t quat = np.asarray(quat, dtype=np.float64)\n\t assert quat.shape[-1] == 4, \"Invalid shape quat {}\".format(quat)\n\n\t w, x, y, z = quat[..., 0], quat[..., 1], quat[..., 2], quat[..., 3]\n\t Nq = np.sum(quat * quat, axis=-1)\n\t s = 2.0 / Nq\n\t X, Y, Z = x * s, y * s, z * s\n\t wX, wY, wZ = w * X, w * Y, w * Z\n\t xX, xY, xZ = x * X, x * Y, x * Z\n\t yY, yZ, zZ = y * Y, y * Z, z * Z\n\n\t mat = np.empty(quat.shape[:-1] + (3, 3), dtype=np.float64)\n\t mat[..., 0, 0] = 1.0 - (yY + zZ)\n\t mat[..., 0, 1] = xY - wZ\n\t mat[..., 0, 2] = xZ + wY\n\t mat[..., 1, 0] = xY + wZ\n\t mat[..., 1, 1] = 1.0 - (xX + zZ)\n\t mat[..., 1, 2] = yZ - wX\n\t mat[..., 2, 0] = xZ - wY\n\t mat[..., 2, 1] = yZ + wX\n\t mat[..., 2, 2] = 1.0 - (xX + yY)\n\t return np.where((Nq > _FLOAT_EPS)[..., np.newaxis, np.newaxis], mat, np.eye(3))", "def quat2mat(self,quat):\n quat = np.asarray(quat, dtype=np.float64)\n assert quat.shape[-1] == 4, \"Invalid shape quat {}\".format(quat)\n\n w, x, y, z = quat[..., 0], quat[..., 1], quat[..., 2], quat[..., 3]\n Nq = np.sum(quat * quat, axis=-1)\n s = 2.0 / Nq\n X, Y, Z = x * s, y * s, z * s\n wX, wY, wZ = w * X, w * Y, w * Z\n xX, xY, xZ = x * X, x * Y, x * Z\n yY, yZ, zZ = y * Y, y * Z, z * Z\n\n mat = np.empty(quat.shape[:-1] + (3, 3), dtype=np.float64)\n mat[..., 0, 0] = 1.0 - (yY + zZ)\n mat[..., 0, 1] = xY - wZ\n mat[..., 0, 2] = xZ + wY\n mat[..., 1, 0] = xY + wZ\n mat[..., 1, 1] = 1.0 - (xX + zZ)\n mat[..., 1, 2] = yZ - wX\n mat[..., 2, 0] = xZ - wY\n mat[..., 2, 1] = yZ + wX\n mat[..., 2, 2] = 1.0 - (xX + yY)\n return np.where((Nq > _FLOAT_EPS)[..., np.newaxis, np.newaxis], mat, np.eye(3))", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi) = angles[0:2]\n omega = np.deg2rad(self.omega)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def py_rotation_from_matrix(matrix):\n return np.float32(quat2angle_axis(mat2quat(matrix)))", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, omega) = angles[0:2]\n chi = np.deg2rad(self.chi)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, omega) = angles[0:2]\n chi = np.deg2rad(self.chi)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def quaternion_to_angle(q):\n\tx, y, z, w = q.x, q.y, q.z, q.w\n\troll, pitch, yaw = tf.transformations.euler_from_quaternion((x, y, z, w))\n\treturn yaw", "def quat2mat(q):\n #leila: https://www.euclideanspace.com/maths/geometry/rotations/conversions/quaternionToMatrix/index.htm\n\n sz = quat.get_size(q)\n q0 = quat.getq0(q)\n q1 = quat.getq1(q)\n q2 = quat.getq2(q)\n q3 = quat.getq3(q)\n qt = quat.get_type(q)\n\n g = np.zeros((sz, 3, 3))\n g[:, 0, 0] = np.square(q0) + np.square(q1) - np.square(q2) - np.square(q3)\n g[:, 0, 1] = 2*(q1*q2 - q0*q3)\n g[:, 0, 2] = 2*(q3*q1 + q0*q2)\n g[:, 1, 0] = 2*(q1*q2 + q0*q3)\n g[:, 1, 1] = np.square(q0) - np.square(q1) + np.square(q2) - np.square(q3)\n g[:, 1, 2] = 2*(q2*q3 - q0*q1)\n g[:, 2, 0] = 2*(q3*q1 - q0*q2)\n g[:, 2, 1] = 2*(q2*q3 + q0*q1)\n g[:, 2, 2] = np.square(q0) - np.square(q1) - np.square(q2) + np.square(q3)\n\n if sz == 1:\n g = g.reshape((3, 3))\n if qt == -1:\n g = -g\n else:\n inds1 = np.where(qt == -1)\n g[inds1, :, :] = -g[inds1, :, :]\n\n return g", "def from_quaternion(self, q: np.ndarray) -> np.ndarray:\n if q is None:\n return np.identity(3)\n if q.shape[-1]!=4 or q.ndim>2:\n raise ValueError(\"Quaternion must be of the form (4,) or (N, 4)\")\n if q.ndim>1:\n q /= np.linalg.norm(q, axis=1)[:, None] # Normalize\n R = np.zeros((q.shape[0], 3, 3))\n R[:, 0, 0] = 1.0 - 2.0*(q[:, 2]**2 + q[:, 3]**2)\n R[:, 1, 0] = 2.0*(q[:, 1]*q[:, 2]+q[:, 0]*q[:, 3])\n R[:, 2, 0] = 2.0*(q[:, 1]*q[:, 3]-q[:, 0]*q[:, 2])\n R[:, 0, 1] = 2.0*(q[:, 1]*q[:, 2]-q[:, 0]*q[:, 3])\n R[:, 1, 1] = 1.0 - 2.0*(q[:, 1]**2 + q[:, 3]**2)\n R[:, 2, 1] = 2.0*(q[:, 0]*q[:, 1]+q[:, 2]*q[:, 3])\n R[:, 0, 2] = 2.0*(q[:, 1]*q[:, 3]+q[:, 0]*q[:, 2])\n R[:, 1, 2] = 2.0*(q[:, 2]*q[:, 3]-q[:, 0]*q[:, 1])\n R[:, 2, 2] = 1.0 - 2.0*(q[:, 1]**2 + q[:, 2]**2)\n return R\n q /= np.linalg.norm(q)\n return np.array([\n [1.0-2.0*(q[2]**2+q[3]**2), 2.0*(q[1]*q[2]-q[0]*q[3]), 2.0*(q[1]*q[3]+q[0]*q[2])],\n [2.0*(q[1]*q[2]+q[0]*q[3]), 1.0-2.0*(q[1]**2+q[3]**2), 2.0*(q[2]*q[3]-q[0]*q[1])],\n [2.0*(q[1]*q[3]-q[0]*q[2]), 2.0*(q[0]*q[1]+q[2]*q[3]), 1.0-2.0*(q[1]**2+q[2]**2)]])", "def quaternion2rot3d(quat):\n q01 = quat[0] * quat[1]\n q02 = quat[0] * quat[2]\n q03 = quat[0] * quat[3]\n q11 = quat[1] * quat[1]\n q12 = quat[1] * quat[2]\n q13 = quat[1] * quat[3]\n q22 = quat[2] * quat[2]\n q23 = quat[2] * quat[3]\n q33 = quat[3] * quat[3]\n\n # Obtain the rotation matrix\n rotation = np.zeros((3, 3))\n rotation[0, 0] = (1. - 2. * (q22 + q33))\n rotation[0, 1] = 2. * (q12 - q03)\n rotation[0, 2] = 2. * (q13 + q02)\n rotation[1, 0] = 2. * (q12 + q03)\n rotation[1, 1] = (1. - 2. * (q11 + q33))\n rotation[1, 2] = 2. * (q23 - q01)\n rotation[2, 0] = 2. * (q13 - q02)\n rotation[2, 1] = 2. * (q23 + q01)\n rotation[2, 2] = (1. - 2. * (q11 + q22))\n\n return rotation", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, kappa, omega) = self.get_phi_kappa_omega(angles)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.kappa_opposite_rotation_matrix(phi, np.deg2rad(self.alpha), kappa, omega)", "def quaternion_to_angle(q):\n x, y, z, w = q.x, q.y, q.z, q.w\n roll, pitch, yaw = tf.transformations.euler_from_quaternion((x, y, z, w))\n return yaw", "def get_rot_as_quat(m_obj):\n mfn_obj = oMa.MFnTransform(m_obj)\n\n rot = mfn_obj.rotation(asQuaternion=True)\n\n return rot", "def _quaternions(self, R):\n # Simple Wikipedia version\n # en.wikipedia.org/wiki/Rotation_matrix#Quaternion\n # For other options see math.stackexchange.com/questions/2074316/calculating-rotation-axis-from-rotation-matrix\n diag = torch.diagonal(R, dim1=-2, dim2=-1)\n Rxx, Ryy, Rzz = diag.unbind(-1)\n magnitudes = 0.5 * torch.sqrt(torch.abs(1 + torch.stack([\n Rxx - Ryy - Rzz, \n - Rxx + Ryy - Rzz, \n - Rxx - Ryy + Rzz\n ], -1)))\n _R = lambda i,j: R[:,:,:,i,j]\n signs = torch.sign(torch.stack([\n _R(2,1) - _R(1,2),\n _R(0,2) - _R(2,0),\n _R(1,0) - _R(0,1)\n ], -1))\n xyz = signs * magnitudes\n # The relu enforces a non-negative trace\n w = torch.sqrt(F.relu(1 + diag.sum(-1, keepdim=True))) / 2.\n Q = torch.cat((xyz, w), -1)\n Q = F.normalize(Q, dim=-1)\n return Q", "def rotation_matrix(self):\n return self.affine_matrix[0:3][:, 0:3]", "def toRot(q):\n R = SX.zeros(3, 3)\n qi = q[0]; qj = q[1]; qk = q[2]; qr = q[3]\n R[0, 0] = 1. - 2. * (qj * qj + qk * qk);\n R[0, 1] = 2. * (qi * qj - qk * qr);\n R[0, 2] = 2. * (qi * qk + qj * qr)\n R[1, 0] = 2. * (qi * qj + qk * qr);\n R[1, 1] = 1. - 2. * (qi * qi + qk * qk);\n R[1, 2] = 2. * (qj * qk - qi * qr)\n R[2, 0] = 2. * (qi * qk - qj * qr);\n R[2, 1] = 2. * (qj * qk + qi * qr);\n R[2, 2] = 1. - 2. * (qi * qi + qj * qj)\n\n return R", "def axis2rotmat(axis):\n return quat2rotmat(axis2quat(axis))", "def angle_to_quaternion(angle):\n\treturn Quaternion(*tf.transformations.quaternion_from_euler(0, 0, angle))", "def rotmat_to_quaternion(rotmat):\n r00 = rotmat[0,0]\n r01 = rotmat[0,1]\n r02 = rotmat[0,2]\n r10 = rotmat[1,0]\n r11 = rotmat[1,1]\n r12 = rotmat[1,2]\n r20 = rotmat[2,0]\n r21 = rotmat[2,1]\n r22 = rotmat[2,2]\n\n tr = r00 + r11 + r22\n quat = np.zeros(4)\n if tr > 0:\n S = np.sqrt(tr+1.0) * 2. # S=4*qw\n quat[0] = 0.25 * S\n quat[1] = (r21 - r12) / S\n quat[2] = (r02 - r20) / S\n quat[3] = (r10 - r01) / S\n elif (r00 > r11) and (r00 > r22):\n S = np.sqrt(1.0 + r00 - r11 - r22) * 2. # S=4*qx\n quat[0] = (r21 - r12) / S\n quat[1] = 0.25 * S\n quat[2] = (r01 + r10) / S\n quat[3] = (r02 + r20) / S\n elif r11 > r22:\n S = np.sqrt(1.0 + r11 - r00 - r22) * 2. # S=4*qy\n quat[0] = (r02 - r20) / S\n quat[1] = (r01 + r10) / S\n quat[2] = 0.25 * S\n quat[3] = (r12 + r21) / S\n else:\n S = np.sqrt(1.0 + r22 - r00 - r11) * 2. # S=4*qz\n quat[0] = (r10 - r01) / S\n quat[1] = (r02 + r20) / S\n quat[2] = (r12 + r21) / S\n quat[3] = 0.25 * S\n\n return quat", "def quaternion_inv(quaternion):\r\n q = numpy.array(quaternion, dtype=numpy.float64, copy=True)\r\n numpy.negative(q[1:], q[1:])\r\n return q / numpy.dot(q, q)", "def Rot_to_quaternion(r: array):\n\n # Compute the trace of the rotation matrix\n tr = r[0, 0] + r[1, 1] + r[2, 2]\n\n if tr > 0:\n S = sqrt(tr + 1.0) * 2\n qw = 0.25 * S\n qx = (r[2, 1] - r[1, 2]) / S\n qy = (r[0, 2] - r[2, 0]) / S\n qz = (r[1, 0] - r[0, 1]) / S\n elif (r[0, 0] > r[1, 1]) and (r[0, 0] > r[2, 2]):\n S = sqrt(1.0 + r[0, 0] - r[1, 1] - r[2, 2]) * 2\n qw = (r[2, 1] - r[1, 2]) / S\n qx = 0.25 * S\n qy = (r[0, 1] + r[1, 0]) / S\n qz = (r[0, 2] + r[2, 0]) / S\n elif r[1, 1] > r[2, 2]:\n S = sqrt(1.0 + r[1, 1] - r[0, 0] - r[2, 2]) * 2\n qw = (r[0, 2] - r[2, 0]) / S\n qx = (r[0, 1] + r[1, 0]) / S\n qy = 0.25 * S\n qz = (r[1, 2] + r[2, 1]) / S\n else:\n S = sqrt(1.0 + r[2, 2] - r[0, 0] - r[1, 1]) * 2\n qw = (r[1, 0] - r[0, 1]) / S\n qx = (r[0, 2] + r[2, 0]) / S\n qy = (r[1, 2] + r[2, 1]) / S\n qz = 0.25 * S\n\n q = array([qw, qx, qy, qz])\n q = q * sign(qw)\n\n return q", "def quatRightMat(q):\n\ts = q[0]\n\tv = q[1:].reshape(-1,)\n\tR = np.zeros((4, 4))\n\tR[0, 0] = s\n\tR[0, 1:] = -v\n\tR[1:, 0] = v\n\tR[1:, 1:] = s*np.eye(3) - skewMat(v)\n\treturn R", "def rotation_matrix(yaw, pitch) -> TransformationMatrixType:\n return rotation_matrix_yx(math.radians(yaw + 180), math.radians(pitch))", "def rotation_matrix(angle, axis):\n about_z = rotation_about_z(angle)\n z_to_axis = z_to_vector(axis)\n axis_to_z = np.linalg.inv(z_to_axis)\n return reduce(np.dot, [z_to_axis, about_z, axis_to_z])", "def rotmat2quat(R):\n rotdiff = R - R.T\n\n r = np.zeros(3)\n r[0] = -rotdiff[1, 2]\n r[1] = rotdiff[0, 2]\n r[2] = -rotdiff[0, 1]\n sintheta = np.linalg.norm(r) / 2\n r0 = np.divide(r, np.linalg.norm(r) + np.finfo(np.float32).eps)\n\n costheta = (np.trace(R) - 1) / 2\n\n theta = np.arctan2(sintheta, costheta)\n\n q = np.zeros(4)\n q[0] = np.cos(theta / 2)\n q[1:] = r0 * np.sin(theta / 2)\n return q", "def angle_to_quaternion(angle):\n return Quaternion(*tf.transformations.quaternion_from_euler(0, 0, angle))", "def rotation_matrix_to_quaternion(rotation_matrix, eps=1e-6):\n if not torch.is_tensor(rotation_matrix):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(\n type(rotation_matrix)))\n\n if len(rotation_matrix.shape) > 3:\n raise ValueError(\n \"Input size must be a three dimensional tensor. Got {}\".format(\n rotation_matrix.shape))\n if not rotation_matrix.shape[-2:] == (3, 4):\n raise ValueError(\n \"Input size must be a N x 3 x 4 tensor. Got {}\".format(\n rotation_matrix.shape))\n\n rmat_t = torch.transpose(rotation_matrix, 1, 2)\n\n mask_d2 = (rmat_t[:, 2, 2] < eps).float()\n\n mask_d0_d1 = (rmat_t[:, 0, 0] > rmat_t[:, 1, 1]).float()\n mask_d0_nd1 = (rmat_t[:, 0, 0] < -rmat_t[:, 1, 1]).float()\n\n t0 = 1 + rmat_t[:, 0, 0] - rmat_t[:, 1, 1] - rmat_t[:, 2, 2]\n q0 = torch.stack([rmat_t[:, 1, 2] - rmat_t[:, 2, 1],\n t0, rmat_t[:, 0, 1] + rmat_t[:, 1, 0],\n rmat_t[:, 2, 0] + rmat_t[:, 0, 2]], -1)\n t0_rep = t0.repeat(4, 1).t()\n\n t1 = 1 - rmat_t[:, 0, 0] + rmat_t[:, 1, 1] - rmat_t[:, 2, 2]\n q1 = torch.stack([rmat_t[:, 2, 0] - rmat_t[:, 0, 2],\n rmat_t[:, 0, 1] + rmat_t[:, 1, 0],\n t1, rmat_t[:, 1, 2] + rmat_t[:, 2, 1]], -1)\n t1_rep = t1.repeat(4, 1).t()\n\n t2 = 1 - rmat_t[:, 0, 0] - rmat_t[:, 1, 1] + rmat_t[:, 2, 2]\n q2 = torch.stack([rmat_t[:, 0, 1] - rmat_t[:, 1, 0],\n rmat_t[:, 2, 0] + rmat_t[:, 0, 2],\n rmat_t[:, 1, 2] + rmat_t[:, 2, 1], t2], -1)\n t2_rep = t2.repeat(4, 1).t()\n\n t3 = 1 + rmat_t[:, 0, 0] + rmat_t[:, 1, 1] + rmat_t[:, 2, 2]\n q3 = torch.stack([t3, rmat_t[:, 1, 2] - rmat_t[:, 2, 1],\n rmat_t[:, 2, 0] - rmat_t[:, 0, 2],\n rmat_t[:, 0, 1] - rmat_t[:, 1, 0]], -1)\n t3_rep = t3.repeat(4, 1).t()\n\n mask_c0 = mask_d2 * mask_d0_d1\n mask_c1 = mask_d2 * (1 - mask_d0_d1)\n mask_c2 = (1 - mask_d2) * mask_d0_nd1\n mask_c3 = (1 - mask_d2) * (1 - mask_d0_nd1)\n mask_c0 = mask_c0.view(-1, 1).type_as(q0)\n mask_c1 = mask_c1.view(-1, 1).type_as(q1)\n mask_c2 = mask_c2.view(-1, 1).type_as(q2)\n mask_c3 = mask_c3.view(-1, 1).type_as(q3)\n\n q = q0 * mask_c0 + q1 * mask_c1 + q2 * mask_c2 + q3 * mask_c3\n q /= torch.sqrt(t0_rep * mask_c0 + t1_rep * mask_c1 + # noqa\n t2_rep * mask_c2 + t3_rep * mask_c3) # noqa\n q *= 0.5\n return q", "def matrix(self):\n return self._rotation", "def quaternion2rot3D(quaternion):\n theta, axis = quaternion2AngleAxis(quaternion)\n return angleAxis2rot3D(axis, theta)", "def quat_to_angle(self, quat):\n\t\trot = PyKDL.Rotation.Quaternion(quat.x, quat.y, quat.z, quat.w)\n\t\treturn rot.GetRPY()[2]", "def point_rotation_by_quaternion(v, q):\r\n r = [0] + v\r\n q_conj = [q[0], -q[1], -q[2], -q[3]]\r\n return quaternion_product(quaternion_product(q, r), q_conj)[1:]", "def test_to_quaternion(self):\r\n R = np.identity(3)\r\n q = to_quaternion(R)\r\n zero_vec = q - np.array([0., 0., 0., 1.])\r\n self.assertAlmostEqual(np.linalg.norm(zero_vec), 0.0)\r\n\r\n for _ in range(20):\r\n q = np.random.randn(4)\r\n q /= np.linalg.norm(q)\r\n\r\n R = to_rotation(q)\r\n R2 = to_rotation(to_quaternion(R))\r\n zero_matrix = R - R2\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)", "def convert_rotmat2quat(request):\n\n # TODO complete the function to transform a rotation matrix to quaternion\n\n m = np.array(request.R.data).reshape(3,3)\n\n tr = np.trace(m)\n\n theta = np.arccos((tr-1)/2)\n\n response = rotmat2quatResponse()\n\n if theta==0:\n response.q.x = 0\n response.q.y = 0\n response.q.z = 0\n response.q.w = 1\n elif theta == np.pi or theta == -np.pi:\n K = 0.5 *(m +np.eye(3))\n\n sth2=np.sin(theta/2)\n\n response.q.x = np.sqrt(K[0,0])*sth2\n response.q.y = np.sqrt(K[1,1])*sth2\n response.q.z = np.sqrt(K[2,2])*sth2\n response.q.w = 0\n else:\n den = 2*np.sin(theta)\n\n r_x = (1/den) * (m[2,1]-m[1,2])\n r_y = (1/den) * (m[0,2]-m[2,0])\n r_z = (1/den) * (m[1,0]-m[0,1])\n\n sth2=np.sin(theta/2)\n cth2=np.cos(theta/2)\n\n response.q.x = r_x*sth2\n response.q.y = r_y*sth2\n response.q.z = r_z*sth2\n response.q.z = cth2\n\n return response", "def invert_quaternion(quaternion):\n norm = np.linalg.norm(quaternion)\n quaternion[1:] = -1.0 * quaternion[1:]\n return quaternion / norm", "def as_homogenous_transformation(self):\n r3 = self.orientation.normalize().unit_quaternion_as_r3_rotation_matrix()\n return matrix.sqr((r3[0],r3[1],r3[2],self.translation[0],\n r3[3],r3[4],r3[5],self.translation[1],\n r3[6],r3[7],r3[8],self.translation[2],\n 0,0,0,1))", "def quatActiveRot(q, v):\n\tv_q = np.zeros((4, 1))\n\tv_q[1:] = v\n\tv_qnew = quatLeftMat(q) @ quatRightMat(q).T @ v_q\n\treturn v_qnew[1:]", "def test_conversions_matrix_quaternion():\n R = np.eye(3)\n a = pr.axis_angle_from_matrix(R)\n assert_array_almost_equal(a, np.array([1, 0, 0, 0]))\n\n random_state = np.random.RandomState(0)\n for _ in range(5):\n q = pr.random_quaternion(random_state)\n R = pr.matrix_from_quaternion(q)\n pr.assert_rotation_matrix(R)\n\n q2 = pr.quaternion_from_matrix(R)\n pr.assert_quaternion_equal(q, q2)\n\n R2 = pr.matrix_from_quaternion(q2)\n assert_array_almost_equal(R, R2)\n pr.assert_rotation_matrix(R2)", "def rpy_from_quaternion(quaternion):\n (yaw, pitch, roll) = quaternion.yaw_pitch_roll\n return (roll, pitch, yaw)", "def quat_rotate(X, q):\n # repeat q along 2nd dim\n ones_x = X[[0], :, :][:, :, [0]] * 0 + 1\n q = torch.unsqueeze(q, 1) * ones_x\n\n q_conj = torch.cat([q[:, :, [0]], -1 * q[:, :, 1:4]], dim=-1)\n X = torch.cat([X[:, :, [0]] * 0, X], dim=-1)\n\n X_rot = hamilton_product(q, hamilton_product(X, q_conj))\n return X_rot[:, :, 1:4]", "def quat_rotate(X, q):\n # repeat q along 2nd dim\n ones_x = X[[0], :, :][:, :, [0]] * 0 + 1\n q = torch.unsqueeze(q, 1) * ones_x\n\n q_conj = torch.cat([q[:, :, [0]], -1 * q[:, :, 1:4]], dim=-1)\n X = torch.cat([X[:, :, [0]] * 0, X], dim=-1)\n\n X_rot = hamilton_product(q, hamilton_product(X, q_conj))\n return X_rot[:, :, 1:4]", "def quat2transform(q):\n x, y, z, w = q\n xx2 = 2 * x * x\n yy2 = 2 * y * y\n zz2 = 2 * z * z\n xy2 = 2 * x * y\n wz2 = 2 * w * z\n zx2 = 2 * z * x\n wy2 = 2 * w * y\n yz2 = 2 * y * z\n wx2 = 2 * w * x\n\n rmat = np.empty((3, 3), float)\n rmat[0,0] = 1. - yy2 - zz2\n rmat[0,1] = xy2 - wz2\n rmat[0,2] = zx2 + wy2\n rmat[1,0] = xy2 + wz2\n rmat[1,1] = 1. - xx2 - zz2\n rmat[1,2] = yz2 - wx2\n rmat[2,0] = zx2 - wy2\n rmat[2,1] = yz2 + wx2\n rmat[2,2] = 1. - xx2 - yy2\n\n return rmat", "def quaternion(self, name, q):\n R = self.R(name=name, q=q)\n quat = transformations.unit_vector(\n transformations.quaternion_from_matrix(matrix=R))\n return quat", "def test_matrix_from_quaternion_hamilton():\n q = np.sqrt(0.5) * np.array([1, 0, 0, 1])\n R = pr.matrix_from_quaternion(q)\n assert_array_almost_equal(\n np.array([[0, -1, 0],\n [1, 0, 0],\n [0, 0, 1]]),\n R\n )", "def get_world_rot_as_quat(m_obj):\n plug = get_world_matrix_plug(m_obj, 0)\n matrix_obj = plug.asMObject()\n matrix_data = oMa.MFnMatrixData(matrix_obj)\n matrix = matrix_data.matrix()\n\n trans_matrix = oMa.MTransformationMatrix(matrix)\n rot = trans_matrix.rotation(asQuaternion=True)\n\n return rot", "def quatLeftMat(q):\n\ts = q[0]\n\tv = q[1:].reshape(-1,)\n\tL = np.zeros((4, 4))\n\tL[0, 0] = s\n\tL[0, 1:] = -v\n\tL[1:, 0] = v\n\tL[1:, 1:] = s*np.eye(3) + skewMat(v)\n\treturn L", "def random_rotate():\n u = np.random.uniform(size=3)\n\n # Random quaternion\n q = np.array([np.sqrt(1-u[0])*np.sin(2*np.pi*u[1]),\n np.sqrt(1-u[0])*np.cos(2*np.pi*u[1]),\n np.sqrt(u[0])*np.sin(2*np.pi*u[2]),\n np.sqrt(u[0])*np.cos(2*np.pi*u[2])])\n \n # Convert the quaternion into a rotation matrix \n rotMat = np.array([[q[0]*q[0] + q[1]*q[1] - q[2]*q[2] - q[3]*q[3],\n 2*q[1]*q[2] - 2*q[0]*q[3],\n 2*q[1]*q[3] + 2*q[0]*q[2]],\n [2*q[1]*q[2] + 2*q[0]*q[3],\n q[0]*q[0] - q[1]*q[1] + q[2]*q[2] - q[3]*q[3],\n 2*q[2]*q[3] - 2*q[0]*q[1]],\n [2*q[1]*q[3] - 2*q[0]*q[2],\n 2*q[2]*q[3] + 2*q[0]*q[1],\n q[0]*q[0] - q[1]*q[1] - q[2]*q[2] + q[3]*q[3]]])\n return rotMat", "def quaternion_from_axis_angle(x, y, z, theta):\n if x == y == z == 0:\n return np.array([1, 0, 0, 0])\n axis = np.array([x, y, z])\n axis /= np.linalg.norm(axis)\n return rowan.from_axis_angle(axis, theta)", "def test_quaternion_from_matrix_180():\n a = np.array([1.0, 0.0, 0.0, np.pi])\n q = pr.quaternion_from_axis_angle(a)\n R = pr.matrix_from_axis_angle(a)\n q_from_R = pr.quaternion_from_matrix(R)\n assert_array_almost_equal(q, q_from_R)\n\n a = np.array([0.0, 1.0, 0.0, np.pi])\n q = pr.quaternion_from_axis_angle(a)\n R = pr.matrix_from_axis_angle(a)\n q_from_R = pr.quaternion_from_matrix(R)\n assert_array_almost_equal(q, q_from_R)\n\n a = np.array([0.0, 0.0, 1.0, np.pi])\n q = pr.quaternion_from_axis_angle(a)\n R = pr.matrix_from_axis_angle(a)\n q_from_R = pr.quaternion_from_matrix(R)\n assert_array_almost_equal(q, q_from_R)\n\n R = np.array(\n [[0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, -1.0]])\n assert_raises_regexp(\n ValueError, \"Expected rotation matrix\", pr.quaternion_from_matrix, R)\n\n R = np.array(\n [[-1.0, 0.0, 0.0],\n [0.0, 0.00000001, 1.0],\n [0.0, 1.0, -0.00000001]])\n q_from_R = pr.quaternion_from_matrix(R)", "def from_rotation_mat(rot: np.ndarray) -> Quaternion:\n if rot.shape != (3, 3):\n raise TypeError('input rot should be a 3x3 matrix')\n\n t = rot.trace()\n if t > 0:\n t = np.sqrt(t + 1.0)\n w = 0.5 * t\n t = 0.5 / t\n x = (rot[2, 1] - rot[1, 2]) * t\n y = (rot[0, 2] - rot[2, 0]) * t\n z = (rot[1, 0] - rot[0, 1]) * t\n return Quaternion(w, np.array([x, y, z]))\n else:\n i = 0\n if rot[1, 1] > rot[0, 0]:\n i = 1\n if rot[2, 2] > rot[i, i]:\n i = 2\n j = (i + 1) % 3\n k = (j + 1) % 3\n\n data = np.zeros(4) # quaternion item [x, y, z, w]\n t = np.sqrt(rot[i, i] - rot[j, j] - rot[k, k] + 1.0)\n data[i] = 0.5 * t\n t = 0.5 / t\n data[-1] = (rot[k, j] - rot[j, k]) * t # w\n data[j] = (rot[j, i] + rot[i, j]) * t\n data[k] = (rot[k, i] + rot[i, k]) * t\n return Quaternion(data[-1], data[:3])", "def transformation_matrix(self):\n t = np.array([[0.0], [0.0], [0.0]])\n Rt = np.hstack([self.rotation_matrix, t])\n return np.vstack([Rt, np.array([0.0, 0.0, 0.0, 1.0])])", "def rotation_matrix(theta=0, phi=0, psi=0, units='deg'):\n\n rpy = Rpy(theta,units)\n rmx = Rmx(phi, units)\n rpz = Rpz(psi, units)\n\n return np.matmul(rpy, np.matmul(rmx, rpz))", "def get_random_quaternion(self):\n random_angles = self.get_random_vector([0,0,0], [2*np.pi, 2*np.pi, 1])\n return tf.transformations.quaternion_from_euler(random_angles[0],\n random_angles[1],\n 0)", "def angle_to_rotation_matrix(angle: torch.Tensor) -> torch.Tensor:\n ang_rad = deg2rad(angle)\n cos_a: torch.Tensor = torch.cos(ang_rad)\n sin_a: torch.Tensor = torch.sin(ang_rad)\n return torch.stack([cos_a, sin_a, -sin_a, cos_a], dim=-1).view(*angle.shape, 2, 2)", "def quatPassiveRot(q, v):\n\tv_q = np.zeros((4, 1))\n\tv_q[1:] = v\n\tv_qnew = quatLeftMat(q).T @ quatRightMat(q) @ v_q\n\treturn v_qnew[1:]", "def mat2quat(mat, rot_type='proper'):\n #leila: read this: https://www.euclideanspace.com/maths/algebra/realNormedAlgebra/quaternions/index.htm\n ax_ang = vrrotmat2vec(mat, rot_type)\n q0 = np.cos(ax_ang[3, :]/2)\n q1 = ax_ang[0, :]*np.sin(ax_ang[3, :]/2)\n q2 = ax_ang[1, :]*np.sin(ax_ang[3, :]/2)\n q3 = ax_ang[2, :]*np.sin(ax_ang[3, :]/2)\n qtype = ax_ang[4, :]\n\n return quat.Quaternion(q0, q1, q2, q3, qtype)", "def get_rotation(self) -> np.array:\n axis = self.get_arms()[1]\n force = [self.d_x, self.d_y] # \"Force applied on the arm\"\n o_m = [self.target.x_obj - axis.x_obj, self.target.y_obj - axis.y_obj]\n torque = o_m[0]*force[1] - o_m[1] * force[0] # OM vectorial F\n if torque == 1: # Anti clockwise rotation\n rotation = np.array([[0, -1], [1, 0]])\n if torque == -1: # Clockwise rotation\n rotation = np.array([[0, 1], [-1, 0]])\n if torque == 0: # No rotation\n rotation = np.array([[0, 0], [0, 0]])\n return rotation", "def quaternion_to_RPY(q: array):\n\n roll: float = arctan2(2 * (q[0] * q[1] + q[2] * q[3]), 1 - (2 * (power(q[1], 2) + power(q[2], 2))))\n pitch: float = arcsin(2 * (q[0] * q[2] - q[3] * q[1]))\n yaw: float = arctan2(2 * (q[0] * q[3] + q[1] * q[2]), 1 - (2 * (power(q[2], 2) + power(q[3], 2))))\n\n return roll, pitch, yaw", "def base_orientation_quaternion(self):\n raise NotImplementedError('Not yet implemented!')", "def quaternion_from_rpy(roll, pitch, yaw):\n quaternion = tf.transformations.quaternion_from_euler(roll, pitch, yaw)\n return pyquaternion.Quaternion(numpy.roll(quaternion, 1))", "def getRotationMatrix(x, y, z, angle):\n # impossible to have a rotational matrix around (0, 0 ,0)\n if x == 0 and y == 0 and z == 0:\n raise Exception(\"Cannot have a rotation matrix around (0, 0, 0)\")\n\n # normalize vector\n vec = MatrixExtended([x, y, z])\n length = np.linalg.norm(vec)\n x /= length\n y /= length\n z /= length\n\n # some shortcuts for readability\n xx = x * x\n yy = y * y\n zz = z * z\n C = math.cos\n S = math.sin\n\n # calculate matrix elements\n e11 = xx + (1 - xx) * C(angle)\n e12 = x * y * (1 - C(angle)) - z * S(angle)\n e13 = x * z * (1 - C(angle)) + y * S(angle)\n e21 = x * y * (1 - C(angle)) + z * S(angle)\n e22 = yy + (1 - yy) * C(angle)\n e23 = y * z * (1 - C(angle)) - x * S(angle)\n e31 = x * z * (1 - C(angle)) - y * S(angle)\n e32 = y * z * (1 - C(angle)) + x * S(angle)\n e33 = zz + (1 - zz) * C(angle)\n\n return MatrixExtended([\n [e11, e12, e13, 0],\n [e21, e22, e23, 0],\n [e31, e32, e33, 0],\n [0, 0, 0, 1]])", "def euler_to_quaternion(euler: tuple) -> object:\n\n (yaw, pitch, roll) = (euler[0], euler[1], euler[2])\n qy = np.sin(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) - np.cos(roll / 2) * np.sin(pitch / 2) * np.sin(yaw / 2)\n qx = np.cos(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2)\n qw = np.cos(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2) - np.sin(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2)\n qz = np.cos(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.sin(pitch / 2) * np.sin(yaw / 2)\n return qx, qy, qz, qw", "def getDejaVuMatrix(self):\n mtx = self.getRotMatrix((4, 4), transpose=None) # from Quaternion\n mtx[3] = self.getTranslation()\n mtx[:3, 3] = mtx[3, :3]\n mtx[3, :3] = [0, 0, 0]\n return mtx", "def _quatm(q1, q0):\n w0, x0, y0, z0 = q0\n w1, x1, y1, z1 = q1\n\n return torch.cuda.FloatTensor([\n -x1*x0 - y1*y0 - z1*z0 + w1*w0,\n x1*w0 + y1*z0 - z1*y0 + w1*x0,\n -x1*z0 + y1*w0 + z1*x0 + w1*y0,\n x1*y0 - y1*x0 + z1*w0 + w1*z0,\n ])", "def RotationMatrix(theta, x, y, z, point=None):\n\treturn mach.rotation_matrix(theta, [x, y, z])", "def quatreal(q):\n a = q[0,0]\n b = q[0,1]\n c = q[0,2]\n d = q[0,3]\n amat = a*np.identity(4)\n bmat = b*np.array([[0,1,0,0],[-1,0,0,0],[0,0,0,-1],[0,0,1,0]])\n cmat = c*np.array([[0,0,1,0],[0,0,0,1],[-1,0,0,0],[0,-1,0,0]])\n dmat = d*np.array([[0,0,0,1],[0,0,-1,0],[0,1,0,0],[-1,0,0,0]])\n return amat+bmat+cmat+dmat", "def get_rot(self) -> WAQuaternion:\n pass", "def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor:\n if not torch.is_tensor(quaternion):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(\n type(quaternion)))\n\n if not quaternion.shape[-1] == 4:\n raise ValueError(\"Input must be a tensor of shape Nx4 or 4. Got {}\"\n .format(quaternion.shape))\n # unpack input and compute conversion\n q1: torch.Tensor = quaternion[..., 1]\n q2: torch.Tensor = quaternion[..., 2]\n q3: torch.Tensor = quaternion[..., 3]\n sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3\n\n sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta)\n cos_theta: torch.Tensor = quaternion[..., 0]\n two_theta: torch.Tensor = 2.0 * torch.where(\n cos_theta < 0.0,\n torch.atan2(-sin_theta, -cos_theta),\n torch.atan2(sin_theta, cos_theta))\n\n k_pos: torch.Tensor = two_theta / sin_theta\n k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta)\n k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg)\n\n angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3]\n angle_axis[..., 0] += q1 * k\n angle_axis[..., 1] += q2 * k\n angle_axis[..., 2] += q3 * k\n return angle_axis", "def getRotMatrix(self, shape=(4, 4), transpose=None):\n try:\n assert (shape in [(3, 3), (4, 4), (9,), (16,)])\n except:\n raise ValueError('shape must be (3,3), (4,4), (9,) or (16,)')\n\n # get the inverse 4x4 from rotax\n mtx = rotax.rotax(numpy.array([0., 0., 0.], 'f'), self.pure, 2 * numpy.arccos(self.real))\n\n # strip if necessary\n if shape in ((3, 3), (9,)):\n mtx = [x[:3] for x in mtx]\n mtx = mtx[:3]\n\n if not transpose:\n return numpy.reshape(numpy.transpose(mtx), shape)\n else:\n return numpy.reshape(mtx, shape)", "def rotation_mat(self) -> np.ndarray:\n rot = np.zeros((3, 3))\n\n txx = 2 * self.x * self.x\n tyy = 2 * self.y * self.y\n tzz = 2 * self.z * self.z\n twx = 2 * self.w * self.x\n twy = 2 * self.w * self.y\n twz = 2 * self.w * self.z\n txy = 2 * self.x * self.y\n txz = 2 * self.x * self.z\n tyz = 2 * self.y * self.z\n\n rot[0, 0] = 1. - tyy - tzz\n rot[0, 1] = txy - twz\n rot[0, 2] = txz + twy\n rot[1, 0] = txy + twz\n rot[1, 1] = 1. - txx - tzz\n rot[1, 2] = tyz - twx\n rot[2, 0] = txz - twy\n rot[2, 1] = tyz + twx\n rot[2, 2] = 1. - txx - tyy\n\n return rot", "def to_quaternion(self, roll=0.0, pitch=0.0, yaw=0.0):\n t0 = math.cos(math.radians(yaw * 0.5))\n t1 = math.sin(math.radians(yaw * 0.5))\n t2 = math.cos(math.radians(roll * 0.5))\n t3 = math.sin(math.radians(roll * 0.5))\n t4 = math.cos(math.radians(pitch * 0.5))\n t5 = math.sin(math.radians(pitch * 0.5))\n\n w = t0 * t2 * t4 + t1 * t3 * t5\n x = t0 * t3 * t4 - t1 * t2 * t5\n y = t0 * t2 * t5 + t1 * t3 * t4\n z = t1 * t2 * t4 - t0 * t3 * t5\n\n return [w, x, y, z]", "def _rotation_matrix(theta):\n c, s = np.cos(theta), np.sin(theta)\n return np.array(((c, -s), (s, c)))", "def rotation_matrix(phi):\n return np.asmatrix([\n [np.cos(phi), -np.sin(phi), 0],\n [np.sin(phi), np.cos(phi), 0],\n [0, 0, 1]\n ])", "def rot2mat(rotation: np.ndarray) -> np.ndarray:\n rotation_radians = ndarray_to_rotation(rotation)\n pitch = np.deg2rad(rotation_radians.pitch)\n roll = np.deg2rad(rotation_radians.roll)\n yaw = np.deg2rad(rotation_radians.yaw)\n return transforms3d.euler.euler2mat(roll, pitch, yaw).T" ]
[ "0.8077829", "0.79751414", "0.7973847", "0.797261", "0.79455817", "0.79306656", "0.79097867", "0.780534", "0.7727341", "0.77201724", "0.77022475", "0.7419111", "0.74067664", "0.7311962", "0.7208562", "0.7142397", "0.71323454", "0.71113116", "0.70910096", "0.7048041", "0.69940317", "0.6971428", "0.6941373", "0.69386965", "0.69329625", "0.6930702", "0.69224066", "0.6919321", "0.6919321", "0.69137985", "0.6898834", "0.6868026", "0.6868026", "0.68509674", "0.6848649", "0.68468344", "0.6846473", "0.68445957", "0.68262446", "0.6817498", "0.6812973", "0.68067306", "0.68035173", "0.6796243", "0.6761111", "0.67498", "0.67055744", "0.6701874", "0.6696724", "0.66649985", "0.66607684", "0.6660748", "0.6653667", "0.66239536", "0.66127306", "0.6604881", "0.6587833", "0.6576355", "0.6575108", "0.6562569", "0.6549369", "0.6541474", "0.652368", "0.6518132", "0.6503336", "0.6499726", "0.6499726", "0.64790064", "0.64687574", "0.6462367", "0.64555436", "0.64419985", "0.6441698", "0.644071", "0.6418779", "0.64143133", "0.64021724", "0.6380904", "0.6353233", "0.635217", "0.6339214", "0.6329648", "0.6315079", "0.6311279", "0.6291337", "0.6285608", "0.6262913", "0.62621385", "0.6261672", "0.62490964", "0.6243126", "0.62248117", "0.6222157", "0.62094337", "0.6192065", "0.6181783", "0.6180523", "0.6165885", "0.61611146", "0.6158212" ]
0.8253612
0
Returns a count of the number of unique metrics currently recorded for apdex, time and value metrics.
def metrics_count(self): return len(self.__stats_table)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def number_of_running_metrics(self):\n try:\n return len(self.get_classads(\"OSGRSV==\\\"metrics\\\"\"))\n except TypeError:\n self.rsv.log(\"ERROR\", \"Classad parsing failed, unable to count running metrics\")", "def metric_data_count(self):\n\n if not self.__settings:\n return 0\n\n return len(self.__stats_table)", "def get_counts(self):\n self._update_counts()\n return self.failures, self.warnings, self.infos", "def get_number_of_measurement(self):\n num_of_meas = 0\n for time in self.mdvtc.keys():\n num_of_meas = num_of_meas + self.mdvtc[time].get_number_of_measurement()\n #\n return num_of_meas", "def device_count():\n apipath = \"/targets/devices\"\n url = SERVER + apipath\n params = {\n 'q': '(deviceType:ASA)',\n 'agg': 'count'}\n headers = {\n 'Accept': \"application/json\",\n 'Content-Type': \"application/json\",\n 'Authorization': \"bearer {}\".format(token)}\n response = requests.get(url, verify=False, stream=True, headers=headers, params=params)\n getstatuscode = response.status_code\n getresponse = response.json()\n if getstatuscode == 200:\n return getresponse\n else:\n response.raise_for_status()", "def _fetch_count_metrics_and_clear(self):\n with self._count_rlock:\n count_metrics = self._count_metrics\n self._count_metrics = defaultdict(int)\n\n return count_metrics", "def count(self) -> Tuple[groupable, pdarray]:\n repMsg = generic_msg(\n cmd=\"countReduction\",\n args={\"segments\": cast(pdarray, self.segments), \"size\": self.length},\n )\n self.logger.debug(repMsg)\n return self.unique_keys, create_pdarray(repMsg)", "def get_total_counts(self):\n ret = {}\n all_loggers_count = 0\n for logger, name_map in self.acc_map.items():\n cur_logger_count = 0\n ret[logger.name] = {}\n for name, status_map in name_map.items():\n cur_name_count = 0\n ret[logger.name][name] = {}\n for status, acc in status_map.items():\n cur_count = acc.total_count\n ret[logger.name][name][status] = cur_count\n cur_name_count += cur_count\n cur_logger_count += cur_count\n all_loggers_count += cur_count\n ret[logger.name][name]['__all__'] = cur_name_count\n ret[logger.name]['__all__'] = cur_logger_count\n ret['__all__'] = all_loggers_count\n return ret", "def get_count():\n _check_init()\n return _pypm.CountDevices()", "def analyze_count(data):\n\n dsct_vk = pd.unique(data['vk'])\n dsct_itemid = pd.unique(data['itemid'])\n\n print 'number of user:', dsct_vk.shape\n print 'number of items:', dsct_itemid.shape\n print 'the number of ratings:', data.shape\n\n print 'unique actions:', pd.unique(data['action'])\n print 'the number of action 0:', np.sum(data['action'] == 0)\n print 'the number of action 1:', np.sum(data['action'] == 1)\n print 'the number of action 2:', np.sum(data['action'] == 2)\n print 'the number of action 3:', np.sum(data['action'] == 3)\n print 'the number of action 4:', np.sum(data['action'] == 4)\n \n time_range_item = data.groupby('itemid')['real_time'].aggregate(sum_unique)\n print 'Max Range:', np.max(time_range_item)\n print 'Mean Range:', np.mean(time_range_item)\n print 'Median Range:', np.median(time_range_item)", "def count(self, key):\n self._metrics[key] += 1", "def observation_count(self):\n if not self.can_update():\n self._handle_error(910, [self.type])\n return self.tc_requests.observation_count(\n self.api_type, self.api_branch, self.unique_id, owner=self.owner\n )", "def get_metrics(self):\n self.logger.debug(\"Fetching metrics.\")\n return self._api_query(\"metrics\")['metrics']", "async def test_nr_of_metrics(self):\n response = await self.collect(get_request_json_side_effect=[self.data_model, self.reports])\n self.assert_measurement(\n response,\n value=str(len(self.entities)),\n total=self.expected_software_metrics,\n entities=self.entities,\n )", "def counts(self):\n return sum(self.counter.values()), len(self.visited)", "def counts(self):\n return sum(self.counter.values()), len(self.visited)", "def count(time):\n \n return len(events(time))", "def get_count(cls):\n total = 0\n for counter in SimpleCounterShard.objects.all():\n total += counter.count\n return total", "def count(self):\n nreq, nres = 0, 0\n for entry in self.__history:\n if entry.oreq is not None:\n nreq += 1\n if entry.ores is not None:\n nres += 1\n return nreq, nres", "def test_get_time_summary_stats_counter():\n # This is constructed to test the parsing logic for timestamps, so the number don't\n # add up.\n runtime_profile = \"- ExampleTimeStats: (Avg: 161.554ms ; \" \\\n \"Min: 101.411us ; \" \\\n \"Max: 1h2m3s4ms5us6ns ; \" \\\n \"Number of samples: 6)\"\n summary_stats = get_time_summary_stats_counter(\"ExampleTimeStats\", runtime_profile)\n assert len(summary_stats) == 1\n assert summary_stats[0].sum == 969324000\n assert summary_stats[0].min_value == 101411\n assert summary_stats[0].max_value == 3723004005006\n assert summary_stats[0].total_num_values == 6", "def gives_stats():\n dict_count = {\n \"amenities\": storage.count(Amenity),\n \"cities\": storage.count(City),\n \"places\": storage.count(Place),\n \"reviews\": storage.count(Review),\n \"states\": storage.count(State),\n \"users\": storage.count(User)\n }\n return jsonify(dict_count)", "def get_metrics(self) -> Dict[str, base.Number]:\n return self._metrics", "def get_count_overview():\n from app.core.api_views import Api\n api = Api()\n return api.getOverviewCount(\n db_name=LoggingDetails,\n field='success',\n key='logs',\n )", "def metrics(self):\n\n return six.iteritems(self.__stats_table)", "def test_counts(self):\n c = array([5,0,1,1,5,5])\n obs = counts(c)\n exp = array([1,2,0,0,0,3])\n self.assertEqual(obs, exp)\n d = array([2,2,1,0])\n obs = counts(d, obs)\n exp = array([2,3,2,0,0,3])\n self.assertEqual(obs, exp)", "def count(timeseries):\n try:\n return timeseries[0].points[0].value.int64_value\n except (IndexError, AttributeError) as exception:\n LOGGER.warning(\"Couldn't find any values in timeseries response\")\n LOGGER.debug(exception)\n return 0 # no events in timeseries", "def get_attendance_counts(attendance):\n count_a = 0\n count_p = 0\n count_d = 0\n for a in attendance:\n if a.ATT_STATUS == 'A':\n count_a+=1\n elif a.ATT_STATUS == 'D':\n count_d+=1\n elif a.ATT_STATUS == 'P':\n count_p+=1\n return (count_p,count_a,count_d)", "def count():", "def count(self):\n return self._reduce_for_stat_function(F.count, only_numeric=False)", "def hits(self) -> Mapping[str, int]:\n if len(self._clock_starts) > 0:\n warnings.warn(\n \"Retrieved hit counts while clocks are still going, \"\n \"incomplete times are not included: \"\n f\"{list(self._clock_starts.keys())}\",\n RuntimeWarning,\n )\n return self._hit_count.copy()", "def get_num_measured_outputs(self):\n i = 0\n for o in self.outputs:\n if o.is_measured_output():\n i += 1\n return i", "def numberActivities(self):\n if self.use_dic:\n nb_data = self.dic.keys()\n nb_act = (self.dic[nb_data[0]]).keys()\n return len(nb_data)*len(nb_act)\n else:\n return -1", "def class_callcount(self):\r\n # timing is stored by node, we compute timing by class on demand\r\n rval = {}\r\n for node, count in self.apply_callcount.items():\r\n typ = type(node.op)\r\n rval.setdefault(typ, 0)\r\n rval[typ] += count\r\n return rval", "def probe_counts(**kwargs):\n attributes = [\"ping_type\", \"os\", \"app_version\", \"app_build_id\", \"channel\"]\n\n return dict(\n attributes=\",\".join(attributes),\n aggregate_attributes=\"\"\"\n metric,\n metric_type,\n key\n \"\"\",\n aggregate_grouping=\"\"\"\n client_agg_type,\n agg_type\n \"\"\",\n # not boolean\n scalar_metric_types=\"\"\"\n \"counter\",\n \"quantity\",\n \"labeled_counter\",\n \"timespan\"\n \"\"\",\n boolean_metric_types=\"\"\"\n \"boolean\"\n \"\"\",\n **kwargs,\n )", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def get_resource_count(har_json):\n entries = har_json['log']['entries']\n\n resource_type_counts = Counter()\n\n for entry in entries:\n resource = entry['request']['url']\n dirty_resource_type = resource.split('.')[-1]\n resource_type = dirty_resource_type.split('?')[0] # Remove url params\n if len(resource_type) > 4:\n resource_type_counts['other'] += 1\n # print 'Found other resource type: {0}'.format(resource_type)\n else:\n resource_type_counts[resource_type] += 1\n\n return resource_type_counts", "def count_barcodes(metrics_file):\n\n barcodes = pd.read_csv(metrics_file, sep=\"\\t\", header=0, names=[\"barcode\", \"randomer\", \"count\"])\n return Counter(dict(barcodes.groupby(\"barcode\")['count'].sum().iteritems()))", "def count_measurements(database: Database) -> int:\n return int(database.measurements.count_documents(filter={}))", "def metrics(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'metrics')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def get_usage(metric: str, interval_time: int):\n\n count, ignored = check_scale(metric, f'{interval_time}s')\n param = {\n 'start': f'{interval_time}s-ago',\n 'm': f'sum:{metric}' + '{host=*}',\n }\n\n start = time.time()\n resp = urlopen(f'http://{OPENTSDB_HOST}:{OPENTSDB_PORT}/api/query?', param)\n if resp.status == 200:\n _total = json.load(resp)\n else:\n pass\n\n # remove the elements that should be ignored\n valid_source = [i for i in _total if i['tags'] not in ignored]\n\n valid_last_time = []\n for i in valid_source:\n last = sorted(i['dps'].keys())[-1]\n if (start - interval_time) <= int(last) <= (start + interval_time):\n valid_last_time.append(i)\n else:\n pass\n # elements in valid_last_time mean it should be aggregated.\n total = [i['dps'][sorted(i['dps'].keys())[-1]] for i in valid_last_time]\n\n return count, sum(total)", "def datacounts(self):\n return self._properties[\"datacounts\"]", "def timerCount(cmds):\n return int(sum(np.asarray(cmds) == 0x400001)) # numpy version\n #return cmds.count(0x400001) # python list version", "def _fetch_time_metrics_and_clear(self):\n with self._time_rlock:\n time_metrics = self._time_metrics\n self._time_metrics = defaultdict(LatencyTracker)\n\n return time_metrics", "def count(self):\n return len([i for i in self.iteritems()])", "def count(self):\n # TODO not implemented yet\n return 0", "def update_count(self):\n count_metrics = self._fetch_count_metrics_and_clear()\n self._logger.info('update_count. count_metrics = %s',\n build_metrics_counter_data(count_metrics))", "def count_counts(self):\n count_counts = defaultdict(Counter)\n for token, followers in self._dict.items():\n for f, count in followers.items():\n count_counts[token][count] += 1\n count_counts[token][0] = len(self._dict) - sum(count_counts[token].values())\n return count_counts", "def metrics(self):\n raise NotImplementedError(\"metrics\")", "def get_usage_count(equations):\n usage_count = {}\n for eq in equations:\n usage_count.setdefault(eq.lhs, 0)\n for var in eq.rhs.atoms(Variable):\n usage_count.setdefault(var, 0)\n usage_count[var] += 1\n return usage_count", "def count(ev):\n profData = getProfilingData(ev)\n if profData is not None:\n return profData.Count()\n return \"\"", "def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter", "def stats(self):\n return {\"size\": 0, \"maxsize\": 0, \"hits\": 0, \"miss\": 0}", "def metrics(self) -> list[dict[str, dict[str, float | int]]]:\n return self.performance[\"performances\"]", "def example_six():\n stats = defaultdict(int)\n stats['my_counter'] += 1", "def test_cw_metrics(self):\n\n instances = set()\n result = self.cw_client.list_metrics(Namespace=\"CWAgent\", MetricName=\"cpu_usage_system\")\n for i in result[\"Metrics\"]:\n instances.add(i[\"Dimensions\"][0][\"Value\"])\n\n for key, value in self.cdk_output_map.items():\n if \"Instance\" in key:\n self.assertTrue(value in instances)", "def get_count(name, key):\n total = 0\n query = CounterShard.all().filter('name = ', name).filter('reference_key = ', key)\n for counter in query:\n total += counter.count\n \n return total", "def test_data_source_soaps_count_get(self):\n pass", "def _getValueCounts(mapping):\n return Counter({k: len(v) for k, v in viewitems(mapping)})", "def count(self, tokens):\n return self.counts[tokens]", "def stats():\n class_counts = {}\n convert_dict = {\n 'Amenity': 'amenities',\n 'State': 'states',\n 'City': 'cities',\n 'User': 'users',\n 'Place': 'places',\n 'Review': 'reviews'\n }\n\n for _class in convert_dict.keys():\n class_counts[convert_dict[_class]] = storage.count(_class)\n\n return jsonify(class_counts)", "def Count(self):\r\n\t\treturn self._get_attribute('count')", "def Count(self):\r\n\t\treturn self._get_attribute('count')", "def status_counts(self):\n return self._status_counts", "def mymetrics(): \n _update_metric_counters()\n logging.debug(prom_objects_seen.collect())\n return flask.Response(generate_latest(), mimetype='text/plain')", "def get_all_metrics():\n return get_overlap_metrics() + get_distance_metrics() + get_distance_metrics()", "def _count_devices(self):\n number_of_devices = ctypes.c_uint()\n\n if ctypes.windll.user32.GetRawInputDeviceList(\n ctypes.POINTER(ctypes.c_int)(),\n ctypes.byref(number_of_devices),\n ctypes.sizeof(RawInputDeviceList)) == -1:\n warn(\"Call to GetRawInputDeviceList was unsuccessful.\"\n \"We have no idea if a mouse or keyboard is attached.\",\n RuntimeWarning)\n return\n\n devices_found = (RawInputDeviceList * number_of_devices.value)()\n\n if ctypes.windll.user32.GetRawInputDeviceList(\n devices_found,\n ctypes.byref(number_of_devices),\n ctypes.sizeof(RawInputDeviceList)) == -1:\n warn(\"Call to GetRawInputDeviceList was unsuccessful.\"\n \"We have no idea if a mouse or keyboard is attached.\",\n RuntimeWarning)\n return\n\n for device in devices_found:\n if device.dwType == 0:\n self._raw_device_counts['mice'] += 1\n elif device.dwType == 1:\n self._raw_device_counts['keyboards'] += 1\n elif device.dwType == 2:\n self._raw_device_counts['otherhid'] += 1\n else:\n self._raw_device_counts['unknown'] += 1", "def class_callcount(self):\n # timing is stored by node, we compute timing by class on demand\n rval = {}\n for (fgraph, node), count in self.apply_callcount.items():\n typ = type(node.op)\n rval.setdefault(typ, 0)\n rval[typ] += count\n return rval", "def get_attribute_counts(self):\n counts = defaultdict(int)\n for attr in self:\n counts[attr.name] += 1\n\n return dict(counts)", "def metrics(self):\n return self.__metrics", "def fetch_counts_for_debug(stdout):\n test_names = TEST_NAMES_DEBUG_APP_PATTERN.findall(stdout)\n test_counts = collections.Counter(test_class for test_class, _ in test_names\n if test_class not in IGNORED_CLASSES)\n\n return test_counts", "def test_counts(pawprint_default_tracker_db_with_table):\n\n tracker = pawprint_default_tracker_db_with_table\n\n # Add a bunch of events\n query = (\n \"\"\"\n INSERT INTO {} (timestamp, user_id, event) VALUES\n ('2016-01-01 12:30', 'alice', 'logged_in'),\n ('2016-01-01 12:40', 'bob', 'logged_in'),\n ('2016-01-01 16:00', 'charlotte', 'logged_in'),\n ('2016-01-02 00:00', 'dan', 'logged_in'),\n ('2016-01-02 00:00', 'elizabeth', 'logged_in'),\n ('2016-01-05 00:00', 'frank', 'logged_in'),\n ('2016-01-10 00:00', 'gabrielle', 'logged_in'),\n ('2016-01-20 00:00', 'hans', 'logged_in'),\n ('2016-02-01 00:00', 'iris', 'logged_in'),\n ('2016-02-01 00:00', 'james', 'logged_in'),\n ('2016-03-01 00:00', 'kelly', 'logged_in'),\n ('2016-03-01 00:00', 'laura', 'logged_in'),\n ('2016-03-01 00:00', 'mike', 'not_logged_in')\n \"\"\"\n ).format(tracker.table)\n\n pd.io.sql.execute(query, tracker.db)\n\n logins_hourly = tracker.count(event=\"logged_in\", resolution=\"hour\")\n logins_daily = tracker.count(event=\"logged_in\")\n logins_weekly = tracker.count(event=\"logged_in\", resolution=\"week\")\n logins_monthly = tracker.count(event=\"logged_in\", resolution=\"month\")\n logins_weekly_left_range = tracker.count(\n event=\"logged_in\", resolution=\"week\", start=datetime(2016, 2, 1)\n )\n logins_weekly_right_range = tracker.count(\n event=\"logged_in\", resolution=\"week\", end=datetime(2016, 2, 1)\n )\n logins_daily_full_range = tracker.count(\n event=\"logged_in\", start=datetime(2016, 1, 15), end=datetime(2016, 2, 15)\n )\n\n # Hourly\n assert len(logins_hourly) == 8\n assert np.all(logins_hourly[\"count\"].values == [2, 1, 2, 1, 1, 1, 2, 2])\n\n # Daily\n assert len(logins_daily) == 7\n assert np.all(logins_daily[\"count\"].values == [3, 2, 1, 1, 1, 2, 2])\n\n # Weekly\n assert len(logins_weekly) == 5\n assert np.all(logins_weekly[\"count\"].values == [5, 2, 1, 2, 2])\n\n # Others\n assert len(logins_monthly) == 3\n assert len(logins_weekly_left_range) == 2 # weeks start on Monday\n assert len(logins_weekly_right_range) == 4 # and not at the start / end dates provided\n assert len(logins_daily_full_range) == 2", "def totalCount(self):\n return sum(self.values())", "def totalCount(self):\n return sum(self.values())", "def totalCount(self):\n return sum(self.values())", "def count(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'count')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def count(self, value): # real signature unknown; restored from __doc__\n return 0", "def test_get_virtual_machine_count_metrics1(self):\n pass", "def hits(self):\n return len(self.successes) + len(self.failures)", "def count(self):\n return self.get_count()", "def _calculate_metrics(self):\n metrics = {}\n precision, recall = self.calc_precision_recall()\n metrics[\"precision\"] = precision\n metrics[\"recall\"] = recall\n metrics[\"entropy\"] = self.calc_entropy()\n metrics[\"component_entropy\"] = self.calc_component_entropy()\n metrics[\"num_comps\"] = len(self.get_components())\n metrics[\"num_diagnoses\"] = len(self.diagnoses)\n metrics[\"distinct_diagnoses_scores\"] = len(Counter(list(map(lambda x: x.probability, self.diagnoses))))\n metrics[\"num_tests\"] = len(self.get_tests())\n metrics[\"num_distinct_traces\"] = len(self.get_distinct_traces())\n metrics[\"num_failed_tests\"] = len(self._get_tests_by_error(1))\n metrics[\"num_passed_tests\"] = len(self._get_tests_by_error(0))\n passed_comps = set(self._get_components_by_error(0))\n failed_comps = set(self.get_components_in_failed_tests())\n metrics[\"num_failed_comps\"] = len(failed_comps)\n metrics[\"only_failed_comps\"] = len(failed_comps - passed_comps)\n metrics[\"only_passed_comps\"] = len(passed_comps - failed_comps)\n metrics[\"num_bugs\"] = len(self.get_bugs())\n metrics[\"wasted\"] = self.calc_wasted_components()\n metrics[\"top_k\"] = self.calc_top_k()\n metrics[\"num_comps_in_diagnoses\"] = len(self._get_comps_in_diagnoses())\n metrics[\"bugs_cover_ratio\"] = self._get_bugs_cover_ratio()\n metrics[\"average_trace_size\"] = self._get_average_trace_size()\n metrics[\"average_component_activity\"] = self._get_average_component_activity()\n metrics[\"average_diagnosis_size\"] = self._get_average_diagnosis_size()\n metrics[\"bugs_scores_average\"], metrics[\"bugs_scores_std\"], metrics[\"bugs_scores_entropy\"] = self._get_bugs_scores()\n metrics[\"non_bugs_scores_average\"], metrics[\"non_bugs_scores_std\"], metrics[\"non_bugs_scores_entropy\"] = self._get_non_bugs_scores()\n metrics.update(self.cardinality())\n # metrics[\"ochiai\"] = self.calc_ochiai_values()\n return metrics", "def count_benchmarks():\n return len(setup_storage().fetch_benchmark({}))", "def counts(self) -> dict:\n return Counter(self.sequence)", "def tally(self):\n return self.count", "def size(self) -> Tuple[int, int]:\n count_keys = 0 # store the number of different 'key'.\n count_values = 0 # store the the number of different 'value'.\n for node in self.hashTable:\n count_values = count_values + node.count\n count_keys = count_keys + len(node.keys)\n return count_keys, count_values", "def static_metrics(self) -> dict[str, float | int]:\n return self.performance[\"meta\"]", "def count(self):\n\n return self._get(\"count\", rtype=UInt)", "def trainCount(\n trainData, \n questionType,\n questionDict,\n questionIdict, \n objDict, \n objIdict,\n numAns):\n count_wa = np.zeros((len(objIdict), numAns))\n count_a = np.zeros((numAns))\n objIds = extractObjId(\n trainData[0], \n questionType, \n questionDict, \n questionIdict)\n for i in range(objIds.shape[0]):\n objId = objIds[i]\n obj = questionIdict[objId - 1]\n ansId = trainData[1][i, 0]\n objId2 = objDict[obj]\n count_wa[objId2, ansId] += 1\n count_a[ansId] += 1\n # Add UNK count\n count_a[-1] += 1\n return count_wa, count_a", "def getUsersBySSID():\n\tstats = {}\n\tms = MobileStation.objects.filter(ssid__isnull=False)\n\tfor ssid in set(MobileStation.objects.values_list('ssid', flat=True)):\n\t\tstats[ssid] = MobileStation.objects.areAssociated().filter(ssid=ssid).count()\n\treturn stats", "def count(self):\n\n raise NotImplementedError", "def counts(self):\n\n if self._counts is not None:\n return self._counts\n else:\n try:\n return self.cps * self.livetime\n except TypeError:\n raise SpectrumError(\n 'Unknown livetime; cannot calculate counts from CPS')", "def count(self, tokens):\n return self._count[tuple(tokens)]", "def summarize(self) -> Mapping[str, int]:\n return dict(\n compounds=self.count_compounds(),\n side_effects=self.count_side_effects(),\n indications=self.count_indications(),\n umls=self.count_umls(),\n )", "def metrics(self):\n return self._metrics", "def metrics(self):\n return self._metrics", "def get_all_dataset_counts(\n self,\n ) -> Dict[Tuple[str, int, int], int]:\n res = self._engine.execute(\n select(\n [\n PRODUCT.c.name,\n TIME_OVERVIEW.c.start_day,\n TIME_OVERVIEW.c.period_type,\n TIME_OVERVIEW.c.dataset_count,\n ]\n )\n .select_from(TIME_OVERVIEW.join(PRODUCT))\n .where(TIME_OVERVIEW.c.product_ref == PRODUCT.c.id)\n .order_by(\n PRODUCT.c.name, TIME_OVERVIEW.c.start_day, TIME_OVERVIEW.c.period_type\n )\n )\n\n return {\n (\n r.name,\n *TimePeriodOverview.from_flat_period_representation(\n r.period_type, r.start_day\n )[:2],\n ): r.dataset_count\n for r in res\n }", "def task3a(self):\n browser_count = {}\n for entry in self.records:\n if((entry['visitor_device'] == 'browser') and (entry['event_type'] == 'read')):\n browser = entry['visitor_useragent']\n if (browser in browser_count):\n browser_count[entry['visitor_useragent']] += 1\n else:\n browser_count[entry['visitor_useragent']] = 1\n GUI.show_histo(browser_count, \"vert\", \"Number of Accesses using Browser\", \"Browser Distribution\")", "def get_metrics(self):\n return None" ]
[ "0.6411366", "0.6283042", "0.62296087", "0.6220417", "0.61111647", "0.6047972", "0.60304636", "0.5969979", "0.59513307", "0.5878805", "0.5827968", "0.5753369", "0.5703473", "0.56953144", "0.56933933", "0.56933933", "0.5690032", "0.5668702", "0.56455106", "0.5636696", "0.56353694", "0.5620147", "0.56077564", "0.5602681", "0.5592418", "0.5571603", "0.5568807", "0.55369514", "0.5507133", "0.5506029", "0.5504633", "0.5498267", "0.548768", "0.54729164", "0.54719996", "0.54719996", "0.54719996", "0.54719996", "0.54715645", "0.54708236", "0.54609716", "0.545132", "0.5451227", "0.5448816", "0.54399306", "0.5427897", "0.54258287", "0.5419275", "0.5417845", "0.5417562", "0.5408946", "0.5406547", "0.53984153", "0.5398128", "0.53969675", "0.5390657", "0.53872776", "0.5381089", "0.5375169", "0.53618926", "0.53606606", "0.5358235", "0.53581595", "0.5357596", "0.5357596", "0.5349776", "0.5349084", "0.53400207", "0.5337269", "0.5334993", "0.5334892", "0.53306955", "0.53262585", "0.5323736", "0.53215295", "0.53215295", "0.53215295", "0.5320835", "0.53108346", "0.53062135", "0.5304726", "0.53026265", "0.5302289", "0.5298677", "0.5296915", "0.5294834", "0.5293615", "0.52928203", "0.52926236", "0.52914274", "0.5288801", "0.5280909", "0.5280162", "0.5279869", "0.52763927", "0.5275152", "0.5275152", "0.52729756", "0.52726495", "0.5271937" ]
0.6419223
0
Run the script at given path catching exceptions. This function should only be used internally by Pyto.
def runScriptAtPath(path): sys.argv = [path] for arg in PytoClasses.Python.shared.args: sys.argv.append(str(arg)) def run() -> None: os.system = PytoClasses.Python.shared.system directory = os.path.expanduser(os.path.dirname(path)) sys.path.insert(0, directory) try: global __script__ spec = importlib.util.spec_from_file_location("__main__", path) __script__ = importlib.util.module_from_spec(spec) spec.loader.exec_module(__script__) PytoClasses.Python.shared.values = [item for item in dir(__script__) if not item.startswith("__")] except SystemExit: print("SystemExit") except Exception as e: exc_type, exc_obj, exc_tb = sys.exc_info() extracts = traceback.extract_tb(sys.exc_info()[2]) count = len(extracts) lineNumber = -1 fileName = path for i, extract in enumerate(extracts): if extract[0] == fileName: lineNumber = extract[1] break count -= 1 if (type(e) == SyntaxError): # The last word in a `SyntaxError` exception is the line number lineNumber = [int(s) for s in (str(e)[:-1]).split() if s.isdigit()][-1] PytoClasses.Python.shared.errorType = exc_type.__name__ PytoClasses.Python.shared.errorReason = str(e) PytoClasses.EditorViewController.visible.showErrorAtLine(lineNumber) print(traceback.format_exc(limit=-count)) sys.path.remove(directory) PytoClasses.ReviewHelper.shared.launches = PytoClasses.ReviewHelper.shared.launches+1 PytoClasses.ReviewHelper.shared.requestReview() PytoClasses.Python.shared.isScriptRunning = False thread = threading.Thread(target=run, args=()) def loop(): while PytoClasses.Python.shared.isScriptRunning: time.sleep(1) ignoredThreads.append(thread) raise Exception("Stopped script!") def runLoop(): try: loop() except: pass thread.start() runLoop() return __script__
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def runScript(path=None):\n if path:\n exec(compile(open(path, \"rb\").read(), path, 'exec'))", "def do_exec(self, arg):\n self.run_file(arg['path'])", "def _run_file(file_path, globals_):\n script_name = os.path.basename(file_path)\n\n sys.path = (_PATHS.script_paths(script_name) +\n _PATHS.scrub_path(script_name, sys.path))\n\n fix_google_path()\n\n execfile(_PATHS.script_file(script_name), globals_)", "async def _run_script(self, path: Path) -> None:\n with open(path, 'r') as f:\n self.conn.executemany(f.read())", "def run_file(file_path, globals_, script_dir=SCRIPT_DIR):\n fix_sys_path()\n script_name = os.path.basename(file_path)\n script_name = SCRIPT_EXCEPTIONS.get(script_name, script_name)\n script_path = os.path.join(script_dir, script_name)\n print script_path\n execfile(script_path, globals_)", "def do_run_script(self, arg):\n try:\n with open(os.path.join(os.getcwd(), arg), 'r') as fin:\n script = fin.readlines()\n for line in script:\n self.onecmd(line)\n except (FileNotFoundError) as exc:\n print(exc)", "def run_call(path: Path) -> None:\n if not (path / \"__main__.py\").exists():\n return\n try:\n subprocess.check_call([sys.executable, path.as_posix()], stdout=subprocess.DEVNULL)\n except subprocess.CalledProcessError as e:\n raise SnapshotMismatchError(f\"Path {path} cannot be imported: {e}\") from None", "def script_test(path):\n log.info(\" ... EXECUTING {}\".format(str(path)))\n\n cmd = [sys.executable, str(path)]\n cp = subprocess.run(cmd, stderr=subprocess.PIPE)\n if cp.returncode:\n log.info(\" ... FAILED\")\n log.info(\" ___ TRACEBACK\")\n log.info(cp.stderr.decode(\"utf-8\") + \"\\n\\n\")\n return False\n else:\n log.info(\" ... PASSED\")\n return True", "def run(path):\n # https://github.com/friendlycode/grparks/issues/20\n print(\"TODO: modify file here\")\n print(path)", "def run_script (script, *l) :\n if not os.path.exists (script) :\n raise PQHException (\"file %s not found\" % script)\n py = get_interpreter_path ()\n cmd = \"%s %s\" % (py, script)\n if len (l) > 0 :\n cmd += \" \" + \" \".join ( [str (x) for x in l])\n out,err = run_cmd (cmd)\n return out,err", "def open_script(script_path):\n pass", "def execute(file_path):\n os.startfile(file_path)", "def runScript(self, script):\n data = FilePath(__file__).parent().child('data')\n sample_file = data.child('1.input.ofx')\n\n args = (script, [sample_file.path])\n log.msg('executing %r' % (args,))\n out, err, rc = yield utils.getProcessOutputAndValue(*args, env=None)\n log.msg('rc: %r' % (rc,))\n log.msg('out: %r' % (out,))\n log.msg('err: %r' % (err,))\n if rc != 0:\n self.fail(\"Failed: %s\\n\\n%s\" % (out, err))", "def run_script(self):\n pass", "def PyHiew_ExecuteScript(script, g, strip_path = False):\r\n PY_COMPILE_ERR = None\r\n try:\r\n execfile(script, g)\r\n except Exception, e:\r\n PY_COMPILE_ERR = str(e) + \"\\n\" + traceback.format_exc()\r\n PY_COMPILE_ERR = PY_COMPILE_ERR.replace(\r\n script[:-len(os.path.basename(script))],\r\n '')\r\n if PYHIEW_SHOW_EXEC_ERRORS:\r\n MessageBox(PY_COMPILE_ERR)\r\n\r\n return PY_COMPILE_ERR", "def run_script(self, params, config_no):\n raise NotImplementedError()", "def run(self, script, *args, **kwargs):\n return self._run('run', script, *args, **kwargs)", "def exec_script(self, script):\n filename = os.path.join(self.script_dir, script + \".sh\")\n # http://docs.python.org/library/os.html#os.X_OK\n if os.access(filename, os.X_OK):\n with open(filename):\n subprocess.call(filename)\n self.vibrate(0.1)", "def RunCmdFile(self, path):\n if not self.sim42interp == self.shell.interp:\n self.UseCommandInterface(True)\n self.shell.run(\"read \" + path, prompt=0, verbose=0)", "def run_setup_script(self, script_path):\n try:\n f = open(script_path, 'r')\n setup_script = f.read()\n # print(setup_script)\n c = self.conn.cursor()\n c.executescript(setup_script)\n except (Error, IOError) as e:\n print('[Datanase] Error:')\n print(e)", "def run_script(self, pathname, caller=None):\n self.msg(2, \"run_script\", pathname)\n\n pathname = os.path.realpath(pathname)\n m = self.findNode(pathname)\n if m is not None:\n return m\n\n if sys.version_info[0] != 2:\n with open(pathname, 'rb') as fp:\n encoding = util.guess_encoding(fp)\n\n with open(pathname, _READ_MODE, encoding=encoding) as fp:\n contents = fp.read() + '\\n'\n if contents.startswith(BOM):\n # Ignore BOM at start of input\n contents = contents[1:]\n\n else:\n with open(pathname, _READ_MODE) as fp:\n contents = fp.read() + '\\n'\n\n co_ast = compile(contents, pathname, 'exec', ast.PyCF_ONLY_AST, True)\n co = compile(co_ast, pathname, 'exec', 0, True)\n m = self.createNode(Script, pathname)\n self._updateReference(caller, m, None)\n self._scan_code(m, co, co_ast)\n m.code = co\n if self.replace_paths:\n m.code = self._replace_paths_in_code(m.code)\n return m", "def exec_file(self, path):\n assert os.path.isabs(path)\n\n source = None\n\n try:\n with open(path, 'rt') as fd:\n source = fd.read()\n except Exception as e:\n raise SandboxLoadError(self._context.source_stack,\n sys.exc_info()[2], read_error=path)\n\n self.exec_source(source, path)", "def execute(self, code, environment = dict()):\r\n if not self.config.get('scripting', 'enable') and type(code) == str:\r\n self.send(code, log = False)\r\n else:\r\n if type(code) == str:\r\n c = compile(code, 'errors.log', 'exec')\r\n else:\r\n c = code\r\n eval(c, self.getEnvironment(environment))", "def call_script(self, script):\n filename, callable = script.rsplit(':', 1)\n filename = os.path.abspath(filename)\n module = imp.load_source('script', filename)\n script = getattr(module, callable.strip())\n\n try:\n script(self.options, self.buildout, self.augmented_environment())\n except TypeError:\n # BBB: Support hook scripts that do not take the environment as\n # the third parameter\n script(self.options, self.buildout)", "def exec_file(path: str, global_vars: Dict[str, Any]) -> None:\n with open(path) as file:\n exec(compile(file.read(), path, \"exec\"), global_vars) # pylint: disable=exec-used", "def run(filename):\n try:\n with open(filename) as f:\n interp.runcode(f.read())\n except IOError as e:\n self.perror(e)", "def run_workdir(self, path):\n pass", "def run(path):\n config = conf.get_yaml_field(gl.configFile)\n exe_con = config['ENABLE_EXECUTION']\n exe_num = config['EXECUTION_NUM']\n rerun = config['ENABLE_RERUN']\n reruns_nums = config['RERUN_NUM']\n repeat = config['ENABLE_REPEAT']\n repeat_num = config['REPEAT_NUM']\n exec_mode = config['ENABLE_EXEC_MODE']\n debug_mode = config['ENABLE_DEBUG_MODE']\n last_failed = config['ENABLE_LAST_FAILED']\n failed_first = config['ENABLE_FAILED_FIRST']\n\n # custom function\n RunTestCase.copy_custom_function()\n\n # failed first\n failed_first_args = (' --ff ' if failed_first else '') if not last_failed else ''\n\n # last failed\n last_failed_args = (' --lf ' if last_failed else '') if not failed_first else ''\n\n # Enable repeat case.\n repeat_args = ' --count={} '.format(repeat_num) if repeat else ''\n\n # Enable CPU concurrency\n py_args = ' -n {} '.format(exe_num) if exe_con else ''\n\n # Enable failed retry\n reruns_args = ' --reruns {} '.format(reruns_nums) if rerun else ''\n\n # debug mode print debug info.\n debug = '' if debug_mode else '--tb=no'\n\n \"\"\"\n Load the pytest framework,\n which must be written here or DDT will be loaded first.\n from httptesting.case import test_load_case\n \"\"\"\n case_path = gl.loadcasePath\n # Output mode console or report.\n if exec_mode:\n cmd = 'cd {} && py.test -q -s {} {} {} {}'.format(\n case_path, reruns_args, 'test_load_case.py',\n repeat_args, debug\n )\n else:\n cmd = 'cd {} && py.test {} {} {} {} {} {} --html={} {} --self-contained-html'.format(\n case_path,\n py_args,\n reruns_args,\n last_failed_args,\n failed_first_args,\n 'test_load_case.py',\n repeat_args,\n path,\n debug\n )\n try:\n os.system(cmd)\n except (KeyboardInterrupt, SystemExit):\n print('已终止执行.')", "def main():\n if os.path.isdir(path):\n for filename in os.listdir(path):\n if filename.endswith('.asm'):\n execute_asm_file(path + '/' + filename, filename)\n else:\n execute_asm_file(path, path[path.rfind(\"/\") + 1:])", "def run_script(func):\n try:\n sys.exit(func(sys.argv[1:], STDIN, sys.stdout))\n except KeyboardInterrupt:\n logger.error(\"Interrupted\")\n sys.exit(EXIT_ERROR)", "def runScript(self, script):\n c = self\n game = self.game\n app = self.game.app\n shell = self.shell\n sprite = self.sprite\n s = shell\n self = self.env\n exec(open(\"script/\" + script).read())", "def exec_python_script(self, filepath=False, script_txt=False):\n if script_txt is False and type(filepath) is str:\n with open(filepath, 'r') as file_:\n script_txt = file_.read()\n \n elif type(script_txt) is str and filepath is False:\n filepath = \"inline-script\"\n \n else:\n SystemError(\"'exec_python_script' function used incorrectly!\"\n +\" Choose either script_txt or filepath\")\n\n # Declare all the variables in the global scope so the user can use them\n _vars = {var_name: getattr(self, var_name) for var_name in self.variables}\n\n\n # Run the script in a try loop\n try:\n exec(script_txt, _vars)\n except Exception as e:\n err_msg = repr(e)\n if hasattr(e, 'txt'):\n err_msg = \"Error in your python code.\\n\\n\"+f\"Script: {filepath}\" + \"\\n\"\n if hasattr(e, \"lineno\"):\n err_msg += f\"Bad Line: {e.text}\" + \"\\n\" + f\"Line Num: {e.lineno}\"\n err_msg += \"\\nError Msg: \" + f\"{e.msg}\"\n\n ltxt = script_txt.split(\"\\n\")\n if hasattr(e, \"lineno\"):\n ltxt[e.lineno-1] += \" <------- BAD LINE\"\n err_msg += \"\\n\\n\\n\\n\\n\\nPython Script:\\n\" + '\\n'.join(ltxt)\n\n self.print_error(err_msg)\n\n for var_name in _vars:\n setattr(self, var_name, _vars[var_name])\n if var_name not in self.variables: self.variables.append(var_name)", "def run(self):\n try:\n self.runCommand()\n except TortugaException as ex:\n print(ex.getErrorMessage())\n raise SystemExit(ex.getErrorCode())\n except SystemExit:\n raise\n except Exception as ex:\n print(str(ex))\n raise SystemExit(-1)", "def runScript(*args, **kwargs):\n env = os.environ.copy()\n env['PYTHONPATH'] = os.pathsep.join(sys.path)\n return chromium_utils.RunCommand(*args, env=env, **kwargs)", "def run(path, cmd):\n logging.info('Processing %s', path)\n logging.debug('Running: %s', ' '.join(cmd))\n subprocess.call(cmd)", "def run_file(self, fpath):\n with open(fpath, \"r\", encoding=\"utf-8\") as fin:\n return self.run_commands(fin.read())", "def _do_run(self, path, args):\n try:\n self.router.route(path, args)\n except TypeError, e:\n # To catch the follow errors\n # TypeError: xxxx got an unexpected keyword argument 'k'\n # TypeError: 'print_my_good() takes at least 1 argument (0 given)'\n print \"run job %s with arg < %s > error:\" % (path, \", \".join(args))\n print \"%s\" % e", "def test_script_integrity(capsys):\n script = os.path.abspath(\"examples/scikitlearn-iris/main.py\")\n\n return_code = subprocess.call([\"python\", script, \"0.1\"])\n\n assert return_code != 2, \"The example script does not exists.\"\n assert return_code != 1, \"The example script did not terminates its execution.\"\n assert (\n return_code == 0 and not capsys.readouterr().err\n ), \"The example script encountered an error during its execution.\"", "def run(self):\r\n try:\r\n file_path = os.path.join(self.temp, \"debugtalk.py\")\r\n loader.FileLoader.dump_python_file(file_path, self.__code)\r\n self.resp = decode(subprocess.check_output([EXEC, file_path], stderr=subprocess.STDOUT, timeout=60))\r\n\r\n except subprocess.CalledProcessError as e:\r\n self.resp = decode(e.output)\r\n\r\n except subprocess.TimeoutExpired:\r\n self.resp = 'RunnerTimeOut'\r\n\r\n shutil.rmtree(self.temp)", "def run_execute_file(file_path, globals=None, locals=None):\n if globals is None:\n globals = {}\n globals.update({\n \"__file__\": file_path,\n \"__name__\": \"__main__\",\n })\n with open(file_path, 'rb') as file:\n exec(compile(file.read(), file_path, 'exec'), globals, locals)", "def run_mypy(path: Path) -> None:\n try:\n output = subprocess.check_output(\n [sys.executable, \"-m\", \"mypy\", path.as_posix()],\n stderr=subprocess.STDOUT,\n encoding=\"utf8\",\n )\n except subprocess.CalledProcessError as e:\n output = e.output\n errors = []\n for message in output.splitlines():\n if not message or message.startswith(\"Found\"):\n continue\n if any(imsg in message for imsg in IGNORE_MYPY_ERRORS):\n continue\n errors.append(message)\n\n if errors:\n raise SnapshotMismatchError(\"\\n\".join(errors)) from None", "def run_as_script(scenario_path=None):\n import cea.globalvar\n gv = cea.globalvar.GlobalVariables()\n\n if scenario_path is None:\n scenario_path = gv.scenario_reference\n\n locator = cea.inputlocator.InputLocator(scenario_path=scenario_path)\n weather_file = locator.get_default_weather()\n moo_optimization(locator=locator, weather_file= weather_file, gv=gv)\n\n print 'test_optimization_main() succeeded'", "def run_python_script(package=None, module=None, args=[], p_args=[]):\n assert module is not None\n assert isinstance(args, (tuple, list)) and isinstance(p_args, (tuple, list))\n path = python_script_exists(package, module)\n run_program(sys.executable, p_args + [path] + args)", "def run_file(self, user_input):\n # Extract the important information\n self.path, self.name = self.extractor.extract_program_information(user_input)\n\n # Determine what language the program is\n program_type = self.determine_program_type(path, name)\n\n # If the file is python, run it the specific way\n # @TODO: Make it work without shell=True\n if program_type == \"python\":\n subprocess.Popen(\"python \" + self.path + self.name, shell=True)", "def execute(self, args=\"\"):\r\n return super(PythonScript, self).execute(_EXECUTABLE, args)", "def __try_exec_line(self, line: Text) -> None:\n try:\n exec(line, self.vars)\n except Exception as err:\n print(f'Issue during execution of setup: {err}')\n print(f'Line was: {line}')\n return # TODO: does this stop the process??", "def run(self):\n # Transform paths in absolute paths since we'll change the working directory\n input_files = {local + os.path.splitext(path)[1]: os.path.abspath(path)\n for local, path in listitems(self._file_paths) if 'moli' in local}\n output_files = {local + os.path.splitext(path)[1]: os.path.abspath(path)\n for local, path in listitems(self._file_paths) if 'molo' in local}\n\n # Resolve all the names in the script\n local_files = {local: local + os.path.splitext(path)[1]\n for local, path in listitems(self._file_paths)}\n script = self._script.format(**local_files) + 'quit\\n'\n\n with mdtraj.utils.enter_temp_directory():\n # Copy input files\n for local_file, file_path in listitems(input_files):\n shutil.copy(file_path, local_file)\n\n # Save script and run tleap\n with open('leap.in', 'w') as f:\n f.write(script)\n leap_output = subprocess.check_output(['tleap', '-f', 'leap.in']).decode()\n\n # Save leap.log in directory of first output file\n if len(output_files) > 0:\n #Get first output path in Py 3.X way that is also thread-safe\n for val in listvalues(output_files):\n first_output_path = val\n break\n first_output_name = os.path.basename(first_output_path).split('.')[0]\n first_output_dir = os.path.dirname(first_output_path)\n log_path = os.path.join(first_output_dir, first_output_name + '.leap.log')\n shutil.copy('leap.log', log_path)\n\n # Copy back output files. If something goes wrong, some files may not exist\n error_msg = ''\n try:\n for local_file, file_path in listitems(output_files):\n shutil.copy(local_file, file_path)\n except IOError:\n error_msg = \"Could not create one of the system files.\"\n\n # Look for errors in log that don't raise CalledProcessError\n error_patterns = ['Argument #\\d+ is type \\S+ must be of type: \\S+']\n for pattern in error_patterns:\n m = re.search(pattern, leap_output)\n if m is not None:\n error_msg = m.group(0)\n break\n\n if error_msg != '':\n raise RuntimeError(error_msg + ' Check log file {}'.format(log_path))\n\n # Check for and return warnings\n return re.findall('WARNING: (.+)', leap_output)", "def open_program(path):\r\n os.startfile(path)", "def test_works(self):\n script = FilePath(__file__).parent().parent().parent() \\\n .child('scripts').child('parsefin').path\n\n return self.runScript(script)", "def _execute(script, prefix=None, path=None):\n path = tempfile.gettempdir() if path is None else path\n result = 1\n try:\n fh = tempfile.NamedTemporaryFile('w', delete=False)\n fh.write(script)\n fh.close()\n print('Executing script below with cwd=%s\\n{{{\\n%s\\n}}}\\n' %\n (path, script))\n try:\n os.chmod(fh.name, stat.S_IRWXU)\n env = os.environ.copy()\n if prefix is not None:\n env['COLCON_BUNDLE_INSTALL_PREFIX'] = prefix\n result = subprocess.run(\n fh.name, cwd=path, env=env, stdout=PIPE, stderr=PIPE,\n universal_newlines=True)\n if result.stdout is not None:\n logger.debug('stdout output: \\n' + result.stdout)\n if result.stderr is not None:\n logger.warn('stderr output: \\n' + result.stderr)\n except OSError as ex:\n print('Execution failed with OSError: %s' % ex)\n finally:\n if os.path.exists(fh.name):\n os.remove(fh.name)\n logger.info('Return code was: %s' % result)\n return result.returncode == 0", "def pyscript(fp, **context):\n try:\n exec fp in context\n except SystemExit:\n pass\n return context['response']", "def test_script(self) -> None:\n main()", "def compile_run(\n path,\n host,\n params={}\n ):\n\n compiled_path = MyCLI.compile(path)\n MyCLI.run(compiled_path, host, params)", "def run_script(input_file, run_dir, script_name, interpreter='python'):\n from paver.runtime import sh\n from paver.path import path\n docdir = path(input_file).dirname()\n output_text = sh('cd %(docdir)s/%(run_dir)s;%(interpreter)s %(script_name)s 2>&1' % vars(),\n capture=True)\n response = '\\n::\\n\\n\\t$ %(interpreter)s %(script_name)s\\n\\t' % vars()\n response += '\\n\\t'.join(output_text.splitlines())\n while not response.endswith('\\n\\n'):\n response += '\\n'\n return response", "def main() -> None:\n try:\n run()\n except errors.BaseError as e:\n sys.stderr.write(f'{str(e)}\\n')\n sys.exit(e.code)", "def run_script(script_path, cwd='.'):\n run_thru_shell = sys.platform.startswith('win')\n if script_path.endswith('.py'):\n script_command = [sys.executable, script_path]\n else:\n script_command = [script_path]\n\n utils.make_executable(script_path)\n\n try:\n proc = subprocess.Popen(script_command, shell=run_thru_shell, cwd=cwd) # nosec\n exit_status = proc.wait()\n if exit_status != EXIT_SUCCESS:\n raise FailedHookException(\n f'Hook script failed (exit status: {exit_status})'\n )\n except OSError as err:\n if err.errno == errno.ENOEXEC:\n raise FailedHookException(\n 'Hook script failed, might be an empty file or missing a shebang'\n ) from err\n raise FailedHookException(f'Hook script failed (error: {err})') from err", "def execute(self):\n teardown_verbosity = self._vars.PEX_TEARDOWN_VERBOSE\n try:\n with self.patch_sys():\n working_set = self._activate()\n TRACER.log('PYTHONPATH contains:')\n for element in sys.path:\n TRACER.log(' %c %s' % (' ' if os.path.exists(element) else '*', element))\n TRACER.log(' * - paths that do not exist or will be imported via zipimport')\n with self.patch_pkg_resources(working_set):\n self._wrap_coverage(self._wrap_profiling, self._execute)\n except Exception:\n # Allow the current sys.excepthook to handle this app exception before we tear things down in\n # finally, then reraise so that the exit status is reflected correctly.\n sys.excepthook(*sys.exc_info())\n raise\n except SystemExit as se:\n # Print a SystemExit error message, avoiding a traceback in python3.\n # This must happen here, as sys.stderr is about to be torn down\n if not isinstance(se.code, int) and se.code is not None:\n print(se.code, file=sys.stderr)\n raise\n finally:\n # squash all exceptions on interpreter teardown -- the primary type here are\n # atexit handlers failing to run because of things such as:\n # http://stackoverflow.com/questions/2572172/referencing-other-modules-in-atexit\n if not teardown_verbosity:\n sys.stderr.flush()\n sys.stderr = DevNull()\n sys.excepthook = lambda *a, **kw: None", "def main():\n arg0 = sys.argv[0]\n if not os.path.isfile(arg0):\n sys.exit(\"sys.argv[0] is not a path to a file: \\\"\" + str(arg0) + \"\\\". Exiting now.\")\n absolute_path_to_file = os.path.realpath(arg0) # realpath follows symlinks, which is what we want in this case.\n absolute_path_to_src = os.path.dirname(absolute_path_to_file)\n (absolute_path_to_repo, src_dirname) = os.path.split(absolute_path_to_src)\n if src_dirname != \"src\":\n sys.exit(\"The driver script should be located in directory \\\"src\\\". It is instead in \\\"\" + src_dirname + \"\\\". Exiting now.\")\n os.chdir(absolute_path_to_repo)", "def run_script(self, script_name, script_args=None, node_paths=None):\n # TODO: consider add a pants.util function to manipulate command line.\n package_manager_args = self._get_run_script_args()\n package_manager_args.append(script_name)\n if script_args:\n package_manager_args.append('--')\n package_manager_args.extend(script_args)\n return self.run_command(args=package_manager_args, node_paths=node_paths)", "def test_script(self):\n path = Template().get_script()\n self.assertTrue(os.path.exists(path))", "def Non_VASP_Script(my_project):\n\n WORKFLOWS = my_project['Workflow']\n Workflow_Params = WORKFLOWS['Steps'][2]\n Workflow_name = Workflow_Params['NAME']\n job_dir = my_project['NAME'] + Workflow_Params['NAME']\n chkpt = job_dir + '.json'\n prev_filter = Workflow_Params['Continue']['Filter']\n prev_chkpt = Workflow_Params['Continue']['Source']\n Script = Workflow_Params['Script']\n executable = Script['Executable']\n non_arg_inputs = Script['NonArgInput']\n arg_inputs = Script['ArgInput']\n\n rerun_paths = continue_job_inputs(chkpt_files= prev_chkpt,\\\n user_filters=prev_filter)\n\n # Run the script now at the rerun_paths\n for r in rerun_paths:\n if inputs:\n shutil.copy(inputs, r)\n os.chdir(r)\n print ('Running {0} in {1}'.format(executable, r))\n script_output = sp.run([executable]+ arg_inputs, stdout=sp.PIPE).stdout.decode('utf-8')\n \n\n return None", "def run_script(self, filename=None, silent=False, set_focus=False):\r\n if filename is None:\r\n self.shell.restore_stds()\r\n filename = QFileDialog.getOpenFileName(self,\r\n self.tr(\"Run Python script\"), os.getcwdu(),\r\n self.tr(\"Python scripts\")+\" (*.py ; *.pyw)\")\r\n self.shell.redirect_stds()\r\n if filename:\r\n filename = unicode(filename)\r\n os.chdir( os.path.dirname(filename) )\r\n filename = os.path.basename(filename)\r\n self.emit(SIGNAL(\"refresh()\"))\r\n else:\r\n return\r\n command = \"execfile(%s)\" % repr(osp.abspath(filename))\r\n if set_focus:\r\n self.shell.setFocus()\r\n if self.dockwidget and not self.ismaximized:\r\n self.dockwidget.setVisible(True)\r\n self.dockwidget.raise_()\r\n if silent:\r\n self.shell.write(command+'\\n')\r\n self.shell.run_command(command)\r\n else:\r\n self.shell.write(command)", "def exec_from_inputfile(args):\n args.path = os.path.abspath(args.path)\n if not check(args.path, 'e'):\n clean_up(args.debug, args.folder, args.action, 1)\n\n logger.info(\"You are using the inputfile. All parameters other than folder, API key and debug will be ignored\")\n try:\n startargs = readconfig(args.path)\n makeconfig(*startargs[:13], date=args.today, folder=args.folder)\n\n r = Run('n', args.folder, args.debug)\n r.start()\n\n except TypeError:\n logger.critical(\"Wrong data format. Check the documentation\")\n clean_up(args.debug, args.folder, args.action, 1)", "def run_script(extension_invocation_info):\n acm.RunModuleWithParameters(__name__, acm.GetDefaultContext())", "def run_as_script(scenario_path=None):\n import cea.globalvar\n import cea.inputlocator as inputlocator\n\n gv = cea.globalvar.GlobalVariables()\n\n if scenario_path is None:\n scenario_path = gv.scenario_reference\n\n locator = inputlocator.InputLocator(scenario_path=scenario_path)\n total_demand = pd.read_csv(locator.get_total_demand())\n building_names = pd.read_csv(locator.get_total_demand())['Name']\n\n substation_main(locator, total_demand, total_demand['Name'], gv, False)\n\n print 'substation_main() succeeded'", "def test_exit_on_missing_file(self):\n with self.assertRaises(SystemExit):\n pyint = Interpreter()\n pyint.run(file=MISSING_FILE)", "def test_execute_and_import():\n code = dedent('''\n import os\n print os.path\n ''')\n results = ExecuteCode.execute_code(code)\n\n assert results != None\n assert results != ''", "def run_script(script_file: str, config_file: str, **kwargs: Any) -> None:\n # Add config path and current working directory to sys.path to correctly load the configuration\n script_filepath = Path(script_file)\n config_filepath = Path(config_file)\n sys.path.insert(0, script_filepath.resolve().parent.as_posix())\n sys.path.insert(0, config_filepath.resolve().parent.as_posix())\n sys.path.insert(0, os.getcwd())\n\n module = load_module(script_filepath)\n _check_script(module)\n\n run_fn = module.__dict__[\"run\"]\n\n # Lazy setup configuration\n config = ConfigObject(config_filepath, script_filepath=script_filepath)\n\n run_fn(config, **kwargs)", "def run(self, filePath = None):\n\n\t\t\tfileName = self._getFilePath(filePath = filePath)\n\t\t\ttry:\n\t\t\t\tos.startfile(fileName)\n\t\t\texcept AttributeError:\n\t\t\t\tsubprocess.call(['open', fileName])", "def invoke(self):\n self.exitCode = self.script()", "def RunScript(self):\r\n if (not os.path.isfile(self.configDict[\"xsiPath\"])):\r\n print \"XSI does not exist\"\r\n return True\r\n \r\n print (\"start running \" + os.path.basename(self.__script.name))\r\n returnValue = self.RunApplication(self.configDict[\"xsiPath\"] + \r\n \" -script \\\"\" + self.__script.name + \"\\\" > NUL 2>>&1\", \r\n self.__workingDir)\r\n \r\n if (returnValue == 0):\r\n print \"finished running \" + os.path.basename(self.__script.name)\r\n else:\r\n print \"crashed running \" + os.path.basename(self.__script.name)\r\n # since may not be able to do anything with the generated files\r\n return False \r\n \r\n # XSI generates unicode logs -- convert to UTF-8\r\n (encoder, decoder, reader, writer) = codecs.lookup(\"utf-16-le\")\r\n for logfile in self.__logFiles:\r\n logInMemory = \"\"\r\n log = reader(open(logfile))\r\n line = log.readline()\r\n while (line):\r\n logInMemory = logInMemory + line\r\n line = log.readline()\r\n log.close()\r\n logInMemory.encode(\"utf-8\")\r\n log = open(logfile, \"w\")\r\n log.write(logInMemory)\r\n log.close()\r\n \r\n for logfile in self.__importLogFiles:\r\n logInMemory = \"\"\r\n log = open(logfile)\r\n line = log.readline()\r\n while (line):\r\n warningFind = line.find(\"WARNING\")\r\n ctfPathFind = line.find(FXsi.__REPLACE_PATH)\r\n if ((warningFind != -1) and (ctfPathFind != -1)):\r\n line = line.replace(\"WARNING\", \"CTF_OVERRIDE\")\r\n logInMemory = logInMemory + line\r\n line = log.readline()\r\n log.close()\r\n log = open(logfile, \"w\")\r\n log.write(logInMemory)\r\n log.close()\r\n \r\n # since XSI has to save in its own project tree, need to relocate files\r\n for projectPath, testProcedurePath in self.__pathMap:\r\n for entry in os.listdir(projectPath):\r\n shutil.move(os.path.join(projectPath, entry), \r\n os.path.join(testProcedurePath, entry))\r\n \r\n return True", "def sub_process(path, student_name, course_name, block_id) :\n\t\n\tcommand = ['python', '../lib/python2.7/site-packages/eyeGaze.py', path, student_name, course_name, block_id]\n\tprocess_call = subprocess.call(command)", "def run(self):\n\n pwd = self.chdir()\n if pwd is None: return -1\n res = mkstuff.run_cmd(self.bindir + '/' + self.func + ' ' + self.args)\n os.chdir(pwd)\n return res", "def run_script(input_file, script_name, interpreter='python'):\r\n from paver.easy import sh\r\n from paver.path import path\r\n rundir = path(input_file).dirname()\r\n output_text = sh('cd %(rundir)s && %(interpreter)s %(script_name)s 2>&1' % vars(), capture=True)\r\n response = '\\n::\\n\\n\\t$ %(interpreter)s %(script_name)s\\n\\t' % vars()\r\n response += '\\n\\t'.join(output_text.splitlines())\r\n while not response.endswith('\\n\\n'):\r\n response += '\\n'\r\n return response", "def run_file(filename, logfile=None, execdir=None):\n if not runpy_available: #pragma:nocover\n raise pyutilib.common.ConfigurationError(\"Cannot apply the run_file() function because runpy is not available\") \n #\n # Open logfile\n #\n if not logfile is None:\n sys.stderr.flush()\n sys.stdout.flush()\n save_stdout = sys.stdout\n save_stderr = sys.stderr\n OUTPUT=open(logfile,\"w\")\n sys.stdout=OUTPUT\n sys.stderr=OUTPUT\n #\n # Add the file directory to the system path\n #\n if '/' in filename:\n tmp= \"/\".join((filename).split(\"/\")[:-1])\n tmp_import = (filename).split(\"/\")[-1]\n sys.path.append(tmp)\n elif '\\\\' in filename:\n tmp = \"\\\\\".join((filename).split(\"\\\\\")[:-1])\n tmp_import = (filename).split(\"\\\\\")[-1]\n sys.path.append(tmp)\n else:\n tmp_import = filename\n name = \".\".join((tmp_import).split(\".\")[:-1])\n #\n # Run the module\n #\n try:\n if not execdir is None:\n tmp=os.getcwd()\n os.chdir(execdir)\n tmp_path = sys.path\n sys.path = [execdir] + sys.path\n runpy.run_module(name,None,\"__main__\")\n if not execdir is None:\n os.chdir(tmp)\n sys.path = tmp_path\n except Exception: #pragma:nocover\n if not logfile is None:\n OUTPUT.close()\n sys.stdout = save_stdout\n sys.stderr = save_stderr\n raise\n #\n # Close logfile\n #\n if not logfile is None:\n OUTPUT.close()\n sys.stdout = save_stdout\n sys.stderr = save_stderr", "def runfile(self, s):\n return self.shell.ex(load_wrap(s, attach=False))", "def RunScript(code):\n with ScriptContext() as script_module:\n try:\n exec code in script_module.__dict__\n except:\n # Get exception output as close to exec as possible.\n # We don't take the first entry in the traceback because it just contains\n # \"exec\". Everything after that is the submitted code.\n try:\n etype, evalue, tb = sys.exc_info()\n traceback.print_exception(etype,\n evalue,\n tb.tb_next, # one frame up\n file=sys.stderr)\n finally:\n del tb # break circular references when using exc_info\n\n return sys.stdout.getvalue(), sys.stderr.getvalue()", "def main(args):\n\n # Get additional args for the script\n if len(args) > 1:\n scriptargs = args[1:]\n else:\n scriptargs = None\n\n scriptpath = find_script(args[0])\n\n if not scriptpath:\n print('\\nCannot find that script!: {}'.format(args[0]))\n return 1\n\n # Check for extra input needed.\n forgotargs = check_input(scriptpath, scriptargs)\n if forgotargs:\n if not scriptargs:\n scriptargs = forgotargs\n else:\n scriptargs.extend(forgotargs)\n # Shell script..\n shellret = shell_script(scriptpath, scriptargs)\n if shellret:\n print('wrun: script returned non-zero!: {}'.format(shellret))\n\n return shellret", "def scriptChecker(filename):\n if not os.path.exists(filename):\n print 'ERROR: %s does not exist' % filename\n import errno\n return errno.ENOENT\n\n # The script-checker program is called directly. If we call the code\n # from this python interpreter, any changes to an observing script will\n # not be noticed.\n #\n # This is due to the way python works: a second import statement of the\n # same module does nothing!\n import subprocess\n script = helpers.getCarmaBuildPath() + '/scripts/script-checker'\n cmd = [script, filename]\n ret = subprocess.call(cmd)\n if ret != 0:\n print 'ERROR: script-checker returned status code:', ret", "def execute_script(self, action, *args):\n self.host.cmd(('./%s' + len(args) * ' %s') % (action, *args))", "def _run_exact_solution(solution_dir, test=False):\n\n sys.path.append(solution_dir)\n # add to allow util.py import from day's directory\n sys.path.append(os.path.dirname(solution_dir))\n import solution\n\n if test:\n try:\n solution.run\n except AttributeError:\n solution_filepath = os.path.join(solution_dir, _SOLUTION_FILENAME)\n msg = \"The problem solution {0} does not contain a run() function!\"\n raise EnvironmentError(msg.format(solution_filepath))\n\n solution.test()\n\n # if we hit this, no exceptions, so success\n return \"Success!\"\n else:\n input_val = get_input_for_problem(solution_dir)\n return solution.run(input_val)", "def __call__(self, basepath: str, scriptpath: str) -> Process:\n ...", "def _run_python(self, pyscript, py_version='python'):\n return self.client_remote.run(args=[py_version, '-c', pyscript],\n wait=False)", "def _run_simulator(self):\n os.chdir(self.test_cases_path)\n\n simulator_config_filename = self.simulator_config_filename\n script, options = runner.parse_commands(simulator_config_filename)\n\n if sys.platform.startswith('win'):\n subprocess.call([script] + options, shell=True)\n else:\n subprocess.call([script] + options)\n\n os.chdir(self.this_file_path)", "def import_code(code_path: Optional[Union[Path, str]]) -> None:\n if code_path is not None:\n if not Path(code_path).exists():\n msg.fail(\"Path to Python code not found\", code_path, exits=1)\n try:\n import_file(\"python_code\", code_path)\n except Exception as e:\n msg.fail(f\"Couldn't load Python code: {code_path}\", e, exits=1)", "def run_import(path: Path) -> None:\n if not (path / \"__main__.py\").exists():\n return\n try:\n subprocess.check_call(\n [sys.executable, \"-m\", \"pip\", \"install\", \"--no-input\", path.parent.as_posix()],\n stdout=subprocess.DEVNULL,\n )\n if (path / \"__main__.py\").exists():\n subprocess.check_call(\n [sys.executable, \"-c\", f\"import {path.name}\"],\n stdout=subprocess.DEVNULL,\n )\n subprocess.check_call(\n [sys.executable, \"-m\", \"pip\", \"uninstall\", \"--no-input\", \"-y\", path.name],\n stdout=subprocess.DEVNULL,\n )\n except subprocess.CalledProcessError as e:\n raise SnapshotMismatchError(f\"Path {path} cannot be imported: {e}\") from None", "def run_module(self, path):\n\n module = self.import_module(path)\n result = None\n\n if module:\n try:\n result = module.run()\n except AttributeError:\n self.error('Error Running Module: Missing run() method.')\n except Exception:\n e = sys.exc_info()[1]\n traceback = sys.exc_info()[2]\n self.warning('Exeption caught in module: {0} line: {1}'.format(\n e,\n traceback.tb_lineno))\n self.calls.append({path: result})\n state.save_hook_call(path, result)\n return result", "def run_script(self, script, env=None, return_output=False):\n command = [\"/bin/sh\", \"-e\"]\n command.append(script)\n\n return self.run(command, env, return_output)", "def _run_script(fullname):\n name = posixpath.basename(fullname)\n if name[-3:] == '.py':\n name = name[:-3] # strip .py extension\n\n modname = [string.join(fullname.split('/')[0:-1],'/')]\n trylist = ((name, None), (name+'.py', None),\n (name, modname), (name+'.py', modname))\n\n # look for the module in standard locations, load it if you\n # find it, otherwise return 1\n for fname, path in trylist:\n try:\n if path:\n fp, pathname, description = imp.find_module(fname, path)\n else:\n fp, pathname, description = imp.find_module(fname)\n except ImportError:\n fp = None\n if fp:\n sys.argv[0] = pathname\n try:\n mod = imp.load_module('__main__', fp, pathname, description)\n finally:\n fp.close()\n return 1\n return 0", "def run(self):\n try:\n self._run()\n except Exception as err:\n # TODO: Do Task Failure to run exception handling\n pass", "def run(context, path=\"\"):\n common.success(f\"Tests {path} running \")\n return start.run_python(\n context,\n f\"-m pytest {path}\"\n )", "def run_file(self, value=None):\n self.save_file()\n self.p = Popen(\"./Project/myfile.py\", stdout=PIPE, stderr=PIPE)\n output, errors = self.p.communicate()\n self.my_output.delete(\"1.0\", END)\n self.my_output.insert(\"1.0\", output)\n if errors != \"\":\n print_to_log(errors)\n self.my_output.configure(fg=\"red\")\n else:\n self.my_output.configure(fg=\"white\")\n self.my_output.insert(\"1.0\", errors)", "def runscript(self, path):\n self.rpc.call(MsfRpcMethod.SessionMeterpreterScript, [self.sid, path])\n return self.read()", "def ExecuteScript(script):\n os.system(\"%s > /dev/null 2>&1\" % script)", "def do_pyscript(self, arg, opts=None):\n if not arg:\n self.perror(\"pyscript command requires at least 1 argument ...\", traceback_war=False)\n self.do_help('pyscript')\n return\n\n if not USE_ARG_LIST:\n arg = shlex.split(arg, posix=POSIX_SHLEX)\n\n # Get the absolute path of the script\n script_path = os.path.expanduser(arg[0])\n\n # Save current command line arguments\n orig_args = sys.argv\n\n # Overwrite sys.argv to allow the script to take command line arguments\n sys.argv = [script_path]\n sys.argv.extend(arg[1:])\n\n # Run the script - use repr formatting to escape things which need to be escaped to prevent issues on Windows\n self.do_py(\"run({!r})\".format(script_path))\n\n # Restore command line arguments to original state\n sys.argv = orig_args", "def run_gcode_file(self, path, **kwargs):\r\n return self._arm.run_gcode_file(path, **kwargs)", "def _run_script(fullname):\n name = posixpath.basename(fullname)\n if name[-3:] == '.py':\n name = name[:-3] # strip .py extension\n\n modname = [string.join(fullname.split('/')[0:-1],'/')]\n trylist = ((name, None), (name+'.py', None),\n (name, modname), (name+'.py', modname))\n\n # look for the modulate in standard locations, load it if you\n # find it, otherwise return 1\n for fname, path in trylist:\n try:\n if path:\n fp, pathname, description = imp.find_module(fname, path)\n else:\n fp, pathname, description = imp.find_module(fname)\n except ImportError:\n fp = None\n if fp:\n sys.argv[0] = pathname\n try:\n mod = imp.load_module('__main__', fp, pathname, description)\n finally:\n fp.close()\n return 1\n return 0", "def run(self):\n try:\n self.parse_args(None)\n self.execute_command()\n except FileExistsException, e:\n print \"Can't copy file as destination already exists.\"\n print \"Exiting...\"\n except Exception, e:\n print \"Exception occured: %s\\nExiting...\" % e", "def run_script_with_context(script_path, cwd, context):\n _, extension = os.path.splitext(script_path)\n\n with open(script_path, encoding='utf-8') as file:\n contents = file.read()\n\n with tempfile.NamedTemporaryFile(delete=False, mode='wb', suffix=extension) as temp:\n env = StrictEnvironment(context=context, keep_trailing_newline=True)\n template = env.from_string(contents)\n output = template.render(**context)\n temp.write(output.encode('utf-8'))\n\n run_script(temp.name, cwd)", "def run_script():\n # pylint: disable=unsupported-assignment-operation\n script_source.data['script'] = [inp_script.value]" ]
[ "0.72653073", "0.6912623", "0.68354243", "0.67718136", "0.67703193", "0.65939707", "0.6413898", "0.63070154", "0.61949843", "0.61937946", "0.6164765", "0.6151639", "0.60480416", "0.60265625", "0.60191596", "0.6006073", "0.5994495", "0.59873414", "0.5959248", "0.58620226", "0.5859723", "0.58492535", "0.5796345", "0.5794666", "0.57743543", "0.5767361", "0.57512784", "0.572017", "0.5686311", "0.5680942", "0.56612295", "0.56507105", "0.5648736", "0.5636002", "0.56233597", "0.56224227", "0.5614047", "0.5586191", "0.55745715", "0.5572075", "0.55717576", "0.5550373", "0.55480444", "0.5539774", "0.5538784", "0.5536208", "0.55325466", "0.55189264", "0.55176485", "0.5513116", "0.55130965", "0.5509538", "0.55078566", "0.5503161", "0.55003715", "0.5484156", "0.5455", "0.5452174", "0.5443917", "0.54387534", "0.54315877", "0.54080844", "0.5406283", "0.53957415", "0.5389169", "0.5388882", "0.5382392", "0.5372897", "0.53700256", "0.5366226", "0.5356897", "0.53565407", "0.53424084", "0.53405493", "0.53375256", "0.53316176", "0.5329205", "0.5328407", "0.53227645", "0.5321698", "0.5307784", "0.5304206", "0.52983683", "0.52953184", "0.52936727", "0.52872354", "0.52862215", "0.5277801", "0.52733916", "0.5263753", "0.5263567", "0.52472067", "0.5246504", "0.5244077", "0.5238478", "0.52296436", "0.5228479", "0.5224145", "0.5209982", "0.52029574" ]
0.7851395
0
Fast translation, rotation & scale in 2D using np.einsum in case input is not a single point
def fast_TRS_2d(input, transform_matrix, input_is_point=False): if input_is_point: return np.delete(np.dot(transform_matrix, np.insert(input, 2, 1)), 2) else: return np.delete(np.einsum('jk,ik->ij', transform_matrix, np.insert(input, 2, 1, axis=1)), 2, 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transAffine2D( iScale=(1, 1), iTrans=(0, 0), iRot=0, iShear=(0, 0) ): \n iRot = iRot * np.pi / 180\n oMatScale = np.matrix( ((iScale[0],0,0),(0,iScale[1],0),(0,0,1)) )\n oMatTrans = np.matrix( ((1,0,iTrans[0]),(0,1,iTrans[1]),(0,0,1)) )\n oMatRot = np.matrix( ((np.cos(iRot),-np.sin(iRot),0),\\\n (np.sin(iRot),np.cos(iRot),0),(0,0,1)) )\n oMatShear = np.matrix( ((1,iShear[0],0),(iShear[1],1,0),(0,0,1)) )\n # ustvari izhodno matriko\n oMat2D = oMatTrans * oMatShear * oMatRot * oMatScale\n return oMat2D", "def affine_transform(trans_mat, p0):\r\n n_data, n_dim = np.shape(p0)\r\n p0 = np.hstack((p0, np.ones((n_data, 1))))\r\n #return np.transpose(np.dot(np.transpose(trans_mat), np.transpose(p0)))\r\n return np.dot(p0, trans_mat)", "def apply_direction_scale( vectors, direction, scale ):\n \"\"\"\n scaling is defined as:\n \n [p'][1 + (k - 1)n.x^2, (k - 1)n.x n.y^2, (k - 1)n.x n.z ]\n S(n,k) = [q'][(k - 1)n.x n.y, 1 + (k - 1)n.y, (k - 1)n.y n.z ]\n [r'][(k - 1)n.x n.z, (k - 1)n.y n.z, 1 + (k - 1)n.z^2 ]\n \n where:\n v' is the resulting vector after scaling\n v is the vector to scale\n n is the direction of the scaling\n n.x is the x component of n\n n.y is the y component of n\n n.z is the z component of n\n k is the scaling factor\n \"\"\"\n scaleMinus1 = scale - 1\n matrix = numpy.array(\n [\n # m1\n [\n # m11 = 1 + (k - 1)n.x^2\n 1 + scaleMinus1 * (direction[ 0 ]**2),\n # m12 = (k - 1)n.x n.y^2\n scaleMinus1 * direction[ 0 ] * direction[ 1 ]**2,\n # m13 = (k - 1)n.x n.z\n scaleMinus1 * direction[ 0 ] * direction[ 2 ]\n ],\n # m2\n [\n # m21 = (k - 1)n.x n.y\n scaleMinus1 * direction[ 0 ] * direction[ 1 ],\n # m22 = 1 + (k - 1)n.y\n 1 + scaleMinus1 * direction[ 1 ],\n # m23 = (k - 1)n.y n.z\n scaleMinus1 * direction[ 1 ] * direction[ 2 ]\n ],\n # m3\n [\n # m31 = (k - 1)n.x n.z\n scaleMinus1 * direction[ 0 ] * direction[ 2 ],\n # m32 = (k - 1)n.y n.z\n scaleMinus1 * direction[ 1 ] * direction[ 2 ],\n # m33 = 1 + (k - 1)n.z^2\n 1 + scaleMinus1 * direction[ 2 ]**2\n ]\n ],\n dtype = numpy.float\n )\n \n return numpy.dot( vectors, matrix )", "def estimate_affine_matrix_3d_to_2d(X, x):\n assert x.shape[0] == X.shape[0]\n assert x.shape[0] >= 4\n X = X.T # (3, n)\n x = x.T # (2, n)\n n = x.shape[1]\n\n ###---- 1. normalization\n ## 2d points\n mean = np.mean(x, 1) # (2, )\n x = x - np.tile(mean[:, np.newaxis], [1, n]) # (2, n)\n average_norm = np.mean(np.sqrt(np.sum(x ** 2, 0)))\n scale = np.sqrt(2) / average_norm\n x = scale * x\n\n # T = [[scale, 0, -mean * scale], \n # [ 0, scale, -mean * scale], \n # [ 0, 0, 1 ]]\n T = np.zeros((3, 3), dtype=np.float32)\n T[0, 0] = T[1, 1] = scale\n T[:2, 2] = -mean * scale\n T[2, 2] = 1\n\n ## 3d points\n X_homo = np.vstack((X, np.ones((1, n)))) # (4, n)\n mean = np.mean(X, 1) # (3, )\n X = X - np.tile(mean[:, np.newaxis], [1, n]) # (3, n)\n m = X_homo[: 3, :] - X\n average_norm = np.mean(np.sqrt(np.sum(X ** 2, 0)))\n scale = np.sqrt(3) / average_norm\n X = scale * X\n\n U = np.zeros((4, 4), dtype=np.float32)\n U[0, 0] = U[1, 1] = U[2, 2] = scale\n U[: 3, 3] = -mean * scale\n U[3, 3] = 1\n\n ###---- 2. equations\n A = np.zeros((n * 2, 8), dtype=np.float32)\n X_homo = np.vstack((X, np.ones((1, n)))).T\n A[: n, : 4] = X_homo\n A[n: , 4: ] = X_homo\n b = np.reshape(x, [-1, 1]) # (2n, 1)\n\n ###---- 3.solution\n p_8 = np.linalg.pinv(A).dot(b) # (8, 2n) x (2n, 1) -> (8, 1)\n p = np.zeros((3, 4), dtype=np.float32)\n p[0, :] = p_8[:4, 0]\n p[1, :] = p_8[4:, 0]\n p[-1, -1] = 1\n\n ###---- 4. denormalization\n P_Affine = np.linalg.inv(T).dot(p.dot(U))\n return P_Affine", "def __compose_transformation(self):\n s = self.scale\n rotR = self.rotation\n t = self.translation\n T = np.eye(4)\n T[0:3, 3] = t\n R = np.eye(4)\n R[0:3, 0:3] = rotR\n M = T.dot(R)\n if s == 1:\n M = T.dot(R)\n else:\n S = np.eye(4)\n S[0:3, 0:3] = np.diag([s, s, s])\n M = T.dot(R).dot(S)\n return M", "def get_affine_matrix2d(\n translations: torch.Tensor,\n center: torch.Tensor,\n scale: torch.Tensor,\n angle: torch.Tensor,\n sx: Optional[torch.Tensor] = None,\n sy: Optional[torch.Tensor] = None,\n) -> torch.Tensor:\n transform: torch.Tensor = get_rotation_matrix2d(center, -angle, scale)\n transform[..., 2] += translations # tx/ty\n\n # pad transform to get Bx3x3\n transform_h = convert_affinematrix_to_homography(transform)\n\n if any(s is not None for s in [sx, sy]):\n shear_mat = get_shear_matrix2d(center, sx, sy)\n transform_h = transform_h @ shear_mat\n\n return transform_h", "def AffineTransform( from_pts, to_pts ):\n \n # check that there are match points\n if len(from_pts) != len(to_pts) or len(to_pts)<1:\n print \"from_pts and to_pts must be of same size.\"\n return False\n\n # check the dimensions\n dim = len(from_pts[0]) # num of dimensions\n if len(from_pts) < dim:\n print \"Too few points => under-determined system.\"\n return False\n elif len(from_pts) > dim + 1:\n print \"Too many points => over-determined system.\"\n return False\n\n \n #segregate the x and y coordinages\n from_pts_x, from_pts_y = zip(*from_pts)\n to_pts_x, to_pts_y = zip(*to_pts)\n \n #create the Matricies for processing\n I = np.matrix([from_pts_x, from_pts_y, [1,1,1]])\n P = np.matrix([to_pts_x, to_pts_y])\n \n #Calculate the 2D affine transform matrix (A)\n A = P * linalg.pinv(I) \n\n # Make a result object\n class Transformation:\n \"\"\"Result object that represents the transformation\n from affine fitter.\"\"\"\n\n def To_Str(self):\n res = \"\"\n for j in range(dim):\n str1 = \"x%d' = \" % j\n for i in range(dim):\n str1 +=\"x%d * %f + \" % (i, A[i][j+dim+1])\n str1 += \"%f\" % A[dim][j+dim+1]\n res += str1 + \"\\n\"\n return res\n\n def Transform(self, pt_x, pt_y):\n pt_vector = np.matrix([[pt_x], [pt_y], [1]])\n transformed_pt = A * pt_vector\n return map(itemgetter(0), transformed_pt.tolist())\n return Transformation()", "def get_affine_matrix2d(\n translations: Tensor,\n center: Tensor,\n scale: Tensor,\n angle: Tensor,\n sx: Tensor | None = None,\n sy: Tensor | None = None,\n) -> Tensor:\n transform: Tensor = get_rotation_matrix2d(center, -angle, scale)\n transform[..., 2] += translations # tx/ty\n\n # pad transform to get Bx3x3\n transform_h = convert_affinematrix_to_homography(transform)\n\n if any(s is not None for s in [sx, sy]):\n shear_mat = get_shear_matrix2d(center, sx, sy)\n transform_h = transform_h @ shear_mat\n\n return transform_h", "def contract(tensor):\n temp = np.einsum('ikma, jlan', tensor, tensor)\n M = np.zeros((tensor.shape[0]**2, tensor.shape[1]**2, tensor.shape[2], tensor.shape[3]))\n for i,j,k,l,m,n in it.product(*[range(x) for x in temp.shape]):\n M[i + tensor.shape[0]*j, k + tensor.shape[1]*l, m, n] = temp[i,j,k,l,m,n]\n return M", "def transformAffine(self, coords):\n coordsshape = coords.shape\n dims = coordsshape[0] + 1\n coords = coords.reshape((len(coords), -1))\n coords = np.concatenate((coords, np.ones((1, len(coords[0])))), 0)\n affine = np.eye(dims)\n # now transform first to center:\n meanvec = np.mean(coords, 1)\n center = np.eye(dims)\n center[:-1, -1] = -meanvec[:-1]\n affine = np.matmul(center, affine)\n\n if np.sum(self.shift):\n affine[:-1, -1] += (self.deformrandomstate.rand(dims - 1) - 0.5) * np.float32(self.shift)\n if np.max(self.scaling) > 1:\n scales = np.ones(dims)\n # scales[:-1] = (self.deformrandomstate.rand(dims-1)-0.5)*(self.scaling-1.0/self.scaling)+(self.scaling+1/self.scaling)/2\n scales[:-1] = self.scaling ** (self.deformrandomstate.rand(dims - 1) * 2 - 1)\n scales = np.diag(scales)\n # print(scales)\n affine = np.matmul(scales, affine)\n if np.sum(self.rotation):\n affine = self._rotate(affine)\n # move back to location:\n center[:-1, -1] = -center[:-1, -1]\n affine = np.matmul(center, affine)\n # now appyl to coords:\n coords = np.matmul(affine, coords)\n coords = coords[:-1]\n coords = coords.reshape(coordsshape)\n return coords", "def transform(fn):\n def _(vec, dt):\n return np.einsum(\n 'ji,i,ki,k...->j...',\n evecs, fn(evals, dt), evecs, vec, optimize=True)\n\n return _", "def affine_matrix_from_points(v0, v1, shear=True, scale=True, usesvd=True):\n\n\n\n v0 = np.array(v0, dtype=np.float64, copy=True)\n v1 = np.array(v1, dtype=np.float64, copy=True)\n\n ndims = v0.shape[0]\n if ndims < 2 or v0.shape[1] < ndims or v0.shape != v1.shape:\n print(ndims < 2)\n print(v0.shape[1] < ndims)\n print(v0.shape != v1.shape)\n\n print(ndims)\n\n raise ValueError(\"input arrays are of wrong shape or type\")\n\n # move centroids to origin\n t0 = -np.mean(v0, axis=1)\n M0 = np.identity(ndims+1)\n M0[:ndims, ndims] = t0\n v0 += t0.reshape(ndims, 1)\n t1 = -np.mean(v1, axis=1)\n M1 = np.identity(ndims+1)\n M1[:ndims, ndims] = t1\n v1 += t1.reshape(ndims, 1)\n\n if shear:\n # Affine transformation\n A = np.concatenate((v0, v1), axis=0)\n u, s, vh = np.linalg.svd(A.T)\n vh = vh[:ndims].T\n B = vh[:ndims]\n C = vh[ndims:2*ndims]\n t = np.dot(C, np.linalg.pinv(B))\n t = np.concatenate((t, np.zeros((ndims, 1))), axis=1)\n M = np.vstack((t, ((0.0,)*ndims) + (1.0,)))\n elif usesvd or ndims != 3:\n # Rigid transformation via SVD of covariance matrix\n u, s, vh = np.linalg.svd(np.dot(v1, v0.T))\n # rotation matrix from SVD orthonormal bases\n R = np.dot(u, vh)\n if np.linalg.det(R) < 0.0:\n # R does not constitute right handed system\n R -= np.outer(u[:, ndims-1], vh[ndims-1, :]*2.0)\n s[-1] *= -1.0\n # homogeneous transformation matrix\n M = np.identity(ndims+1)\n M[:ndims, :ndims] = R\n else:\n # Rigid transformation matrix via quaternion\n # compute symmetric matrix N\n xx, yy, zz = np.sum(v0 * v1, axis=1)\n xy, yz, zx = np.sum(v0 * np.roll(v1, -1, axis=0), axis=1)\n xz, yx, zy = np.sum(v0 * np.roll(v1, -2, axis=0), axis=1)\n N = [[xx+yy+zz, 0.0, 0.0, 0.0],\n [yz-zy, xx-yy-zz, 0.0, 0.0],\n [zx-xz, xy+yx, yy-xx-zz, 0.0],\n [xy-yx, zx+xz, yz+zy, zz-xx-yy]]\n # quaternion: eigenvector corresponding to most positive eigenvalue\n w, V = np.linalg.eigh(N)\n q = V[:, np.argmax(w)]\n q /= vector_norm(q) # unit quaternion\n # homogeneous transformation matrix\n M = quaternion_matrix(q)\n\n if scale and not shear:\n # Affine transformation; scale is ratio of RMS deviations from centroid\n v0 *= v0\n v1 *= v1\n M[:ndims, :ndims] *= math.sqrt(np.sum(v1) / np.sum(v0))\n\n # move centroids back\n M = np.dot(np.linalg.inv(M1), np.dot(M, M0))\n M /= M[ndims, ndims]\n return M", "def affine_2Dtransform(img, t_mat, height, width, h_offset=0, w_offset=0, nh_flag=False, nw_flag=False):\n # transform matrix must be validated\n if(np.shape(t_mat) != (2, 2)):\n return img\n\n # implementing matrix multiplication to a default map of source data in order to apply transform\n # and to achieve coordination/location of transformed matrix according to source data(data map)\n coord_map = transform_calcualtion(\n height, width, t_mat, h_offset, w_offset, nh_flag, nw_flag)\n\n # transformed image data construction\n t_img = np.full((height+h_offset, width+w_offset, 3), 255, dtype='uint8')\n\n # applying new map to image inorder to complete the transform\n try:\n for i in range(height):\n for j in range(width):\n [i_new_coord, j_new_coord] = coord_map[i, j, :]\n # unhandled bound-jumpout\n t_img[i_new_coord, j_new_coord, :] = img[i, j, :]\n except:\n print(\"not enough offset/negative coordination pushed\")\n return img\n return t_img", "def complex_mul2d(a, b):\n op = partial(torch.einsum, \"bixy,ioxy->boxy\")\n return torch.stack([\n op(a[..., 0], b[..., 0]) - op(a[..., 1], b[..., 1]),\n op(a[..., 1], b[..., 0]) + op(a[..., 0], b[..., 1])\n ],\n dim=-1)", "def compose_transform(T1, T2):\n aux_vec = np.array([0, 0, 1]).reshape(1, 3)\n\n T1 = np.concatenate((T1, aux_vec), axis=0)\n T2 = np.concatenate((T2, aux_vec), axis=0)\n\n T1_inv = np.linalg.inv(T1)\n T = T1_inv@T2\n\n return T[0:2]", "def apply_transformation_np(source, transformation):\n source_homog = np.ones((source.shape[0], 4))\n source_homog[:, :-1] = source\n # source_homog = np.hstack(\n # (source, np.ones(source.shape[0], 1))\n # )\n\n source_transformed = np.matmul(transformation, source_homog.T).T[:, :-1]\n return source_transformed", "def affine_transform_2d(v, mapping, alpha = 1):\r\n p_wgt = vec2(0, 0)\r\n q_wgt = vec2(0, 0)\r\n w = len(mapping)*[None]\r\n w_sum = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n x = mp[0].x - v.x\r\n y = mp[0].y - v.y\r\n if (x == 0 and y == 0): return mp[1]\r\n w[i] = 1/((x*x + y*y) ** alpha)\r\n p_wgt += mp[0]*w[i]\r\n q_wgt += mp[1]*w[i]\r\n w_sum += w[i]\r\n p_wgt /= w_sum\r\n q_wgt /= w_sum\r\n M1 = mat2(0)\r\n M2 = mat2(0)\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n p_adj = mp[0] - p_wgt\r\n q_adj = mp[1] - q_wgt\r\n M1 += p_adj.transpose_multiply(p_adj)*w[i]\r\n M2 += p_adj.transpose_multiply(q_adj)*w[i]\r\n M1 = M1.inverse()\r\n M = M1*M2\r\n M = M.transpose()\r\n v_out = M*(v - p_wgt) + q_wgt\r\n return v_out", "def get_perspective_transform(points_src: Tensor, points_dst: Tensor) -> Tensor:\n KORNIA_CHECK_SHAPE(points_src, [\"B\", \"4\", \"2\"])\n KORNIA_CHECK_SHAPE(points_dst, [\"B\", \"4\", \"2\"])\n KORNIA_CHECK(points_src.shape == points_dst.shape, \"Source data shape must match Destination data shape.\")\n KORNIA_CHECK(points_src.dtype == points_dst.dtype, \"Source data type must match Destination data type.\")\n\n # we build matrix A by using only 4 point correspondence. The linear\n # system is solved with the least square method, so here\n # we could even pass more correspondence\n\n # create the lhs tensor with shape # Bx8x8\n B: int = points_src.shape[0] # batch_size\n\n A = torch.empty(B, 8, 8, device=points_src.device, dtype=points_src.dtype)\n\n # we need to perform in batch\n _zeros = zeros(B, device=points_src.device, dtype=points_src.dtype)\n _ones = torch.ones(B, device=points_src.device, dtype=points_src.dtype)\n\n for i in range(4):\n x1, y1 = points_src[..., i, 0], points_src[..., i, 1] # Bx4\n x2, y2 = points_dst[..., i, 0], points_dst[..., i, 1] # Bx4\n\n A[:, 2 * i] = stack([x1, y1, _ones, _zeros, _zeros, _zeros, -x1 * x2, -y1 * x2], -1)\n A[:, 2 * i + 1] = stack([_zeros, _zeros, _zeros, x1, y1, _ones, -x1 * y2, -y1 * y2], -1)\n\n # the rhs tensor\n b = points_dst.view(-1, 8, 1)\n\n # solve the system Ax = b\n X: Tensor = _torch_solve_cast(A, b)\n\n # create variable to return the Bx3x3 transform\n M = torch.empty(B, 9, device=points_src.device, dtype=points_src.dtype)\n M[..., :8] = X[..., 0] # Bx8\n M[..., -1].fill_(1)\n\n return M.view(-1, 3, 3) # Bx3x3", "def get_affine_transform(center, scale, rot, output_size, shift=(0.0, 0.0), inv=False):\n assert len(center) == 2\n assert len(scale) == 2\n assert len(output_size) == 2\n assert len(shift) == 2\n scale_tmp = scale * 200.0\n shift = np.array(shift)\n src_w = scale_tmp[0]\n dst_w = output_size[0]\n dst_h = output_size[1]\n rot_rad = np.pi * rot / 180\n src_dir = rotate_point([0.0, src_w * -0.5], rot_rad)\n dst_dir = np.array([0.0, dst_w * -0.5])\n src = np.zeros((3, 2), dtype=np.float32)\n src[0, :] = center + scale_tmp * shift\n src[1, :] = center + src_dir + scale_tmp * shift\n src[2, :] = _get_3rd_point(src[0, :], src[1, :])\n dst = np.zeros((3, 2), dtype=np.float32)\n dst[0, :] = [dst_w * 0.5, dst_h * 0.5]\n dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir\n dst[2, :] = _get_3rd_point(dst[0, :], dst[1, :])\n if inv:\n trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))\n else:\n trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))\n return trans", "def rigid_transform_2d(v, mapping, alpha = 1):\r\n p_wgt = vec2(0, 0)\r\n q_wgt = vec2(0, 0)\r\n w = len(mapping)*[None]\r\n w_sum = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n x = mp[0].x - v.x\r\n y = mp[0].y - v.y\r\n if (x == 0 and y == 0): return mp[1]\r\n w[i] = 1/((x*x + y*y) ** alpha)\r\n p_wgt += mp[0]*w[i]\r\n q_wgt += mp[1]*w[i]\r\n w_sum += w[i]\r\n p_wgt /= w_sum\r\n q_wgt /= w_sum\r\n A_fac = mat2([v.x - p_wgt.x, v.y - p_wgt.y, v.y - p_wgt.y, p_wgt.x - v.x])\r\n v_out = vec2(0, 0)\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n p_adj = mp[0] - p_wgt\r\n q_adj = mp[1] - q_wgt\r\n A = mat2([p_adj.x, p_adj.y, p_adj.y, -p_adj.x])*A_fac*w[i]\r\n A = A.transpose()\r\n v_out += A*q_adj\r\n r = math.sqrt(v_out.dot(v_out))\r\n v_out /= r\r\n v_sub = v - p_wgt\r\n r = math.sqrt(v_sub.dot(v_sub))\r\n v_out *= r\r\n v_out += q_wgt\r\n return v_out", "def _e_2d_(p, a):\r\n diff = a - p[np.newaxis, :]\r\n return np.einsum('ij,ij->i', diff, diff)", "def affineTransform(x,output_dim):\n w=tf.get_variable(\"w\", [x.get_shape()[1], output_dim])\n b=tf.get_variable(\"b\", [output_dim], initializer=tf.constant_initializer(0.0))\n return tf.matmul(x,w)+b", "def transform_calcualtion(height, width, t_mat, h_offset, w_offset, nh_flag, nw_flag):\n # default coordination/location of transformed matrix according to source data(data map)\n coord_map = default_coord(height, width, h_offset, w_offset)\n\n for i in range(height):\n for j in range(width):\n # base calculations\n result = [(t_mat[0][0])*(coord_map[i, j, 0])+int((t_mat[0][1])*(coord_map[i, j, 1])),\n (t_mat[1][0])*(coord_map[i, j, 0])+(t_mat[1][1])*(coord_map[i, j, 1])]\n # since all coordinations must not be negative\n # if happened also apply a translation by offset\n coord_map[i, j, :] = [(result[0], result[0]+h_offset)[nh_flag],\n (result[1], result[1]+w_offset)[nw_flag]]\n return coord_map", "def translate(self, x=0, y=0, z=0):\n\t\ttranslation = np.identity(4)\n\t\ttranslation[0, 3] += x\n\t\ttranslation[1, 3] += y\n\t\ttranslation[2, 3] += z\n\t\t\n\t\tself.matrix = np.matmul(self.matrix, translation)", "def scale_and_translate_points(points):\n x = points[0]\n y = points[1]\n center = points.mean(axis=1) # mean of each row\n cx = x - center[0] # center the points\n cy = y - center[1]\n dist = np.sqrt(np.power(cx, 2) + np.power(cy, 2))\n scale = np.sqrt(2) / dist.mean()\n norm3d = np.array([\n [scale, 0, -scale * center[0]],\n [0, scale, -scale * center[1]],\n [0, 0, 1]\n ])\n\n return np.dot(norm3d, points), norm3d", "def project(A):\n return A.T @ np.linalg.pinv(A @ A.T) @ A", "def apply_affine_transform(x, M):\n is1d = len(x.shape) == 1\n if is1d:\n x = np.expand_dims(x, axis=0)\n\n x_hom = np.concatenate(\n [x, np.ones((x.shape[0], 1), dtype=x.dtype)], axis=-1\n )\n x_out = x_hom @ M.T\n if is1d:\n x_out = np.squeeze(x_out, axis=0)\n return x_out", "def imageTransform(self):\n ims = self.imageShape\n acs = self.activeShape\n dx = self.colVector\n dy = self.rowVector\n\n p0 = self.activeOrigin\n p1 = p0 + acs[2] * dx\n p2 = p0 + acs[1] * dy\n\n # print p0, p1, p2\n # print acs, dx, dy\n\n localPts = list(map(pg.Vector, [[0,0], [ims[2],0], [0,ims[1]], [0,0,1]])) # w and h of data of image in pixels.\n globalPts = list(map(pg.Vector, [p0, p1, p2, [0,0,1]]))\n m = pg.solve3DTransform(localPts, globalPts)\n m[:,2] = m[:,3]\n m[2] = m[3]\n m[2,2] = 1\n tr = Qt.QTransform(*m[:3,:3].transpose().reshape(9))\n return tr", "def affine_mult(affine, coordinates):\n return np.dot(coordinates, affine[:3, :3].T) + affine[:3, -1]", "def magma_sgemv(trans, m, n, alpha, dA, ldda, dx, incx, beta,\n dy, incy, queue):\n\n _libmagma.magma_sgemv(trans, m, n, alpha, int(dA), ldda, dx, incx,\n beta, int(dy), incy, queue)", "def estimate_rigid_transform(points1, points2, translation_only=False):\n centroid1 = points1.mean(axis=0)\n centroid2 = points2.mean(axis=0)\n\n if translation_only:\n rotation = np.eye(2)\n translation = centroid2 - centroid1\n\n else:\n centered_points1 = points1 - centroid1\n centered_points2 = points2 - centroid2\n\n sigma = centered_points2.T @ centered_points1\n U, _, Vt = np.linalg.svd(sigma)\n\n rotation = U @ Vt\n translation = -rotation @ centroid1 + centroid2\n\n H = np.eye(3)\n H[:2,:2] = rotation\n H[:2, 2] = translation\n return H", "def trans_matrix_inv(m:numpy.ndarray):\n was2d = False\n if m.shape[1] == 3:\n was2d = True\n m = numpy.asarray([\n [1.0, 0.0, 0.0, 0.0],\n [0.0, m[0,0], m[0,1], m[0,2]],\n [0.0, m[1,0], m[1,1], m[1,2]],\n [0.0, 0.0, 0.0, 1.0]], numpy.float64)\n trans = m[0:3,3]\n rotate = numpy.zeros(3, numpy.float64)\n r = m[0:3,0:3]\n rc = numpy.linalg.cholesky(numpy.matmul(r.T, r)).T\n scale = numpy.diagonal(rc)\n if numpy.linalg.det(r) < 0.0:\n scale[0] *= -1.0\n rcd = rc * numpy.eye(3, dtype=numpy.float64)\n rc = numpy.linalg.solve(rcd, rc)\n shear = numpy.asarray([rc[0,1], rc[0,2], rc[1,2]], numpy.float64)\n r0 = trans_matrix({'rotate': rotate, 'scale': scale, 'shear': shear})[0:3,0:3]\n r0 = numpy.linalg.solve(numpy.linalg.inv(r), numpy.linalg.inv(r0))\n rotate[1] = numpy.arcsin(_frone(r0[0,2]))\n if numpy.abs((numpy.abs(rotate[1]) - (numpy.pi / 2.0))) < 1.0e-6:\n rotate[0] = 0.0\n rotate[2] = numpy.arctan2(-_frone(r0[1,0]), _frone(-r0[2,0] / r0[0,2]))\n else:\n rc = numpy.cos(rotate[1])\n rotate[0] = numpy.arctan2(_frone(r0[1,2] / rc), _frone(r0[2,2] / rc))\n rotate[2] = numpy.arctan2(_frone(r0[0,1] / rc), _frone(r0[0,0] / rc))\n if was2d:\n trans = trans[1:]\n rotate = rotate[0:1]\n scale = scale[1:]\n shear = shear[2:3]\n return (trans, rotate, scale, shear)", "def complex_mul1d(a, b):\n op = partial(torch.einsum, \"bix,iox->box\")\n return torch.stack([\n op(a[..., 0], b[..., 0]) - op(a[..., 1], b[..., 1]),\n op(a[..., 1], b[..., 0]) + op(a[..., 0], b[..., 1])\n ],\n dim=-1)", "def batch_affine_warp2d(imgs, theta):\n n_batch = tf.shape(imgs)[0]\n xlen = tf.shape(imgs)[1]\n ylen = tf.shape(imgs)[2]\n theta = tf.reshape(theta, [-1, 2, 3])\n matrix = tf.slice(theta, [0, 0, 0], [-1, -1, 2])\n t = tf.slice(theta, [0, 0, 2], [-1, -1, -1])\n\n grids = batch_mgrid(n_batch, xlen, ylen)\n coords = tf.reshape(grids, [n_batch, 2, -1])\n\n T_g = tf.batch_matmul(matrix, coords) + t\n T_g = tf.reshape(T_g, [n_batch, 2, xlen, ylen])\n output = batch_warp2d(imgs, T_g)\n return output", "def get_translation_matrix2d(translations: Tensor) -> Tensor:\n transform: Tensor = eye_like(3, translations)[:, :2, :]\n transform[..., 2] += translations # tx/ty\n\n # pad transform to get Bx3x3\n transform_h = convert_affinematrix_to_homography(transform)\n\n return transform_h", "def transformation_2d(vertices, kernels=KERNELS):\n\t# calculate the transpose matrix of vertices\n\ttranspose = vertices.transpose()\n\t# insert a row of ones in the transpose matrix's end, then insert the result in 'matrices' list\n\tkernels.append(np.append(transpose, [np.ones(len(transpose[0]))], axis=0))\n\t# multiply matrices into 'kernels' list,\n\t# remove the last row (of ones) and calculate the transpose matrix of the result\n\tfinal_transformation_result = np.delete(np.linalg.multi_dot(kernels), 2, 0).transpose()\n\tKERNELS.clear()\n\treturn final_transformation_result", "def gen_affine_map(Ab, img_sz, dim=3):\n Ab = Ab.view(Ab.shape[0], dim+1, dim)\n phi = gen_identity_map(img_sz).to(Ab.device)\n phi_cp = phi.view(dim, -1)\n affine_map = torch.matmul(Ab[:, :dim, :], phi_cp)\n affine_map = Ab[:, dim, :].contiguous().view(-1, dim, 1) + affine_map\n affine_map = affine_map.view([Ab.shape[0]] + list(phi.shape))\n return affine_map", "def bs_densmatrix_transform(input_matrix, t, r):\n size = len(input_matrix)\n output_matrix = np.zeros((size*2,) * 4, dtype=complex)\n\n for p1 in range(size):\n for p2 in range(size):\n for p1_ in range(size):\n for p2_ in range(size):\n\n for n in range(p1 + 1):\n for k in range(p2 + 1):\n for n_ in range(p1_ + 1):\n for k_ in range(p2_ + 1):\n d1 = p1 - n + k\n d2 = n + p2 - k\n d1_ = p1_ - n_ + k_\n d2_ = n_ + p2_ - k_\n\n coeff1 = t**(p1 - n + p2 - k) * (1j*r)**(n + k) * sqrt(factorial(d1) * factorial(d2) * factorial(p1) * factorial(p2)) / (factorial(n) * factorial(p1 - n) * factorial(k) * factorial(p2 - k))\n coeff2 = t**(p1_ - n_ + p2_ - k_) * (-1j*r)**(n_ + k_) * sqrt(factorial(d1_) * factorial(d2_) * factorial(p1_) * factorial(p2_)) / (factorial(n_) * factorial(p1_ - n_) * factorial(k_) * factorial(p2_ - k_))\n output_matrix[d1, d2, d1_, d2_] = output_matrix[d1, d2, d1_, d2_] + input_matrix[p1, p2, p1_, p2_] * coeff1 * coeff2\n\n return output_matrix", "def affine_trans(self):\n h, w, _ = self.img.shape\n\n \"\"\"\n pts1 = np.float32(\n [\n [randint(0, rows), randint(0, cols)],\n [randint(0, rows), randint(0, cols)],\n [randint(0, rows), randint(0, cols)],\n ]\n )\n pts2 = np.float32(\n [\n [randint(0, rows), randint(0, cols)],\n [randint(0, rows), randint(0, cols)],\n [randint(0, rows), randint(0, cols)],\n ]\n )\n \"\"\"\n\n pts1 = np.float32([[50, 50], [200, 50], [50, 200]])\n pts2 = np.float32([[10, 100], [200, 50], [100, 250]])\n\n M = cv2.getAffineTransform(pts1, pts2)\n\n self.img = cv2.warpAffine(self.img, M, (w, h))\n\n self.edits.append(\"affine\")\n return self", "def transformation_matrix(self, s1, s2, s3, t1, t2, t3):\n\n s1 = np.array(s1)\n s2 = np.array(s2)\n s3 = np.array(s3)\n t1 = np.array(t1)\n t2 = np.array(t2)\n t3 = np.array(t3)\n\n Q = np.array(\n [\n [t2[0] - t1[0], t2[1] - t1[1], t2[2] - t1[2]],\n [t3[0] - t1[0], t3[1] - t1[1], t3[2] - t1[2]],\n ]\n )\n\n P = np.array([[s2[0] - s1[0], s2[1] - s1[1]], [s3[0] - s1[0], s3[1] - s1[1]]])\n\n try:\n # Invert the P matrix\n Pinv = inv(P)\n\n # Build the dot product\n T = np.dot(Pinv, Q)\n\n # Offset\n V0 = np.subtract(t2, np.transpose(s2[0:2]).dot(T))\n except Exception as e:\n self.log.error(\"An error occured during the transformation.\", exc_info=True)\n return -1, -1\n\n return T, V0", "def _proj(u,v):\n return (np.einsum('i...,i...->...',u,v)/np.einsum('i...,i...->...',u,u))*u", "def _transform(\n self, x: \"torch.Tensor\", y: Optional[\"torch.Tensor\"], **kwargs\n ) -> Tuple[\"torch.Tensor\", Optional[\"torch.Tensor\"]]:\n import torch\n import torchvision.transforms.functional as F\n\n img_size = x.shape[:2]\n\n angle = float(\n torch.empty(1)\n .uniform_(float(self.degree_range[0]), float(self.degree_range[1]))\n .item()\n )\n\n max_dx = float(self.translate[0] * img_size[1])\n max_dy = float(self.translate[1] * img_size[0])\n tx = int(round(torch.empty(1).uniform_(-max_dx, max_dx).item()))\n ty = int(round(torch.empty(1).uniform_(-max_dy, max_dy).item()))\n translations = (tx, ty)\n\n scale = float(torch.empty(1).uniform_(self.scale[0], self.scale[1]).item())\n\n # x needs to have channel first\n x = x.permute(2, 0, 1)\n x = F.affine(\n img=x, angle=angle, translate=translations, scale=scale, shear=(0.0, 0.0)\n )\n x = x.permute(1, 2, 0)\n\n return torch.clamp(x, min=self.clip_values[0], max=self.clip_values[1]), y", "def get_projective_transform(center: Tensor, angles: Tensor, scales: Tensor) -> Tensor:\n if not (len(center.shape) == 2 and center.shape[-1] == 3):\n raise AssertionError(center.shape)\n if not (len(angles.shape) == 2 and angles.shape[-1] == 3):\n raise AssertionError(angles.shape)\n if center.device != angles.device:\n raise AssertionError(center.device, angles.device)\n if center.dtype != angles.dtype:\n raise AssertionError(center.dtype, angles.dtype)\n\n # create rotation matrix\n axis_angle_rad: Tensor = deg2rad(angles)\n rmat: Tensor = axis_angle_to_rotation_matrix(axis_angle_rad) # Bx3x3\n scaling_matrix: Tensor = eye_like(3, rmat)\n scaling_matrix = scaling_matrix * scales.unsqueeze(dim=1)\n rmat = rmat @ scaling_matrix.to(rmat)\n\n # define matrix to move forth and back to origin\n from_origin_mat = eye_like(4, rmat, shared_memory=False) # Bx4x4\n from_origin_mat[..., :3, -1] += center\n\n to_origin_mat = from_origin_mat.clone()\n to_origin_mat = _torch_inverse_cast(from_origin_mat)\n\n # append translation with zeros\n proj_mat = projection_from_Rt(rmat, torch.zeros_like(center)[..., None]) # Bx3x4\n\n # chain 4x4 transforms\n proj_mat = convert_affinematrix_to_homography3d(proj_mat) # Bx4x4\n proj_mat = from_origin_mat @ proj_mat @ to_origin_mat\n\n return proj_mat[..., :3, :] # Bx3x4", "def test_transform_2d(transform, alpha = 1):\r\n points = 20*[None]\r\n for i in range(20):\r\n x = random.randrange(-40, 41)\r\n y = random.randrange(-40, 41)\r\n points[i] = vec2(x, y)\r\n tr_x = random.randrange(-40, 41)\r\n tr_y = random.randrange(-40, 41)\r\n mapping = [(p, vec2(p.x + tr_x, p.y + tr_y)) for p in points]\r\n print(\"Translation\")\r\n print(\"Input\".ljust(20), \"Translation\".ljust(20), \"Transformation\".ljust(20))\r\n for i in range(20):\r\n x = random.randrange(-40, 41)\r\n y = random.randrange(-40, 41)\r\n v_in = vec2(x, y)\r\n v_translate = vec2(x + tr_x, y + tr_y)\r\n v_transform = transform(v_in, mapping, alpha)\r\n print(str(v_in).ljust(20), str(v_translate.str_repr(4)).ljust(20), str(v_transform.str_repr(4)).ljust(20))\r\n print()\r\n th = 2*math.pi*random.random()\r\n mapping = [(p, vec2(p.x*math.cos(th) - p.y*math.sin(th), p.x*math.sin(th) + p.y*math.cos(th))) for p in points]\r\n print(\"Rotation\")\r\n print(\"Input\".ljust(20), \"Rotation\".ljust(20), \"Transformation\".ljust(20))\r\n for i in range(20):\r\n x = random.randrange(-40, 41)\r\n y = random.randrange(-40, 41)\r\n v_in = vec2(x, y)\r\n v_rotate = vec2(x*math.cos(th) - y*math.sin(th), x*math.sin(th) + y*math.cos(th))\r\n v_transform = transform(v_in, mapping, alpha)\r\n print(str(v_in).ljust(20), str(v_rotate.str_repr(4)).ljust(20), str(v_transform.str_repr(4)).ljust(20))\r\n print()\r\n k = math.exp(2*random.random() - 1)\r\n mapping = [(p, vec2(k*p.x, k*p.y)) for p in points]\r\n print(\"Uniform scaling\")\r\n print(\"Input\".ljust(20), \"Scaling\".ljust(20), \"Transformation\".ljust(20))\r\n for i in range(20):\r\n x = random.randrange(-40, 41)\r\n y = random.randrange(-40, 41)\r\n v_in = vec2(x, y)\r\n v_scale = vec2(k*x, k*y)\r\n v_transform = transform(v_in, mapping, alpha)\r\n print(str(v_in).ljust(20), str(v_scale.str_repr(4)).ljust(20), str(v_transform.str_repr(4)).ljust(20))\r\n print()\r\n k_x = math.exp(2*random.random() - 1)\r\n k_y = 3*random.random() + 1\r\n if (k_x >= k_y + math.exp(-1)): k_y = k_x - k_y\r\n else: k_y = k_x + k_y\r\n mapping = [(p, vec2(k_x*p.x, k_y*p.y)) for p in points]\r\n print(\"Non-uniform scaling\")\r\n print(\"Input\".ljust(20), \"Scaling\".ljust(20), \"Transformation\".ljust(20))\r\n for i in range(20):\r\n x = random.randrange(-40, 41)\r\n y = random.randrange(-40, 41)\r\n v_in = vec2(x, y)\r\n v_scale = vec2(k_x*x, k_y*y)\r\n v_transform = transform(v_in, mapping, alpha)\r\n print(str(v_in).ljust(20), str(v_scale.str_repr(4)).ljust(20), str(v_transform.str_repr(4)).ljust(20))\r\n print()", "def similarity_transformation(rot, mat):\n return np.dot(rot, np.dot(mat, np.linalg.inv(rot)))", "def similarity_transformation(rot, mat):\n return np.dot(rot, np.dot(mat, np.linalg.inv(rot)))", "def estimate_rigid_transform(points1, points2, translation_only=False):\n centroid1 = points1.mean(axis=0)\n centroid2 = points2.mean(axis=0)\n\n if translation_only:\n rotation = np.eye(2)\n translation = centroid2 - centroid1\n\n else:\n centered_points1 = points1 - centroid1\n centered_points2 = points2 - centroid2\n\n sigma = centered_points2.T @ centered_points1\n U, _, Vt = np.linalg.svd(sigma)\n\n rotation = U @ Vt\n translation = -rotation @ centroid1 + centroid2\n\n H = np.eye(3)\n H[:2, :2] = rotation\n H[:2, 2] = translation\n return H", "def estimate_rigid_transform(points1, points2, translation_only=False):\n centroid1 = points1.mean(axis=0)\n centroid2 = points2.mean(axis=0)\n\n if translation_only:\n rotation = np.eye(2)\n translation = centroid2 - centroid1\n\n else:\n centered_points1 = points1 - centroid1\n centered_points2 = points2 - centroid2\n\n sigma = centered_points2.T @ centered_points1\n U, _, Vt = np.linalg.svd(sigma)\n\n rotation = U @ Vt\n translation = -rotation @ centroid1 + centroid2\n\n H = np.eye(3)\n H[:2, :2] = rotation\n H[:2, 2] = translation\n return H", "def similarity_transform_2d(v, mapping, alpha = 1):\r\n p_wgt = vec2(0, 0)\r\n q_wgt = vec2(0, 0)\r\n w = len(mapping)*[None]\r\n w_sum = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n x = mp[0].x - v.x\r\n y = mp[0].y - v.y\r\n if (x == 0 and y == 0): return mp[1]\r\n w[i] = 1/((x*x + y*y) ** alpha)\r\n p_wgt += mp[0]*w[i]\r\n q_wgt += mp[1]*w[i]\r\n w_sum += w[i]\r\n p_wgt /= w_sum\r\n q_wgt /= w_sum\r\n mu = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n p_adj = mp[0] - p_wgt\r\n mu += w[i]*(p_adj.dot(p_adj))\r\n A_fac = mat2([v.x - p_wgt.x, v.y - p_wgt.y, v.y - p_wgt.y, p_wgt.x - v.x])\r\n v_out = vec2(0, 0)\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n p_adj = mp[0] - p_wgt\r\n q_adj = mp[1] - q_wgt\r\n A = mat2([p_adj.x, p_adj.y, p_adj.y, -p_adj.x])*A_fac*w[i]\r\n A = A.transpose()\r\n v_out += A*q_adj/mu\r\n v_out += q_wgt\r\n return v_out", "def apply_transformation(self, points):\n assert (points.shape[0] == 3)\n n = points.shape[1]\n points_ = np.vstack((points, np.ones((1, n))))\n points_trans_ = np.matmul(self.pose_mat, points_)\n points_transformed = np.true_divide(points_trans_[:3, :], points_trans_[[-1], :])\n return points_transformed", "def transforms_multiply(t0s, t1s):\r\n \r\n return ut.matrix_multiply(t0s, t1s)", "def _dot_product_attention_inner_relative(x, y, z, transpose):\n batch_size, heads, length, _ = x.size()\n\n # xy_matmul is [batch_size, heads, length, length or depth]\n xy_matmul = torch.matmul(x, y if not transpose else y.transpose(-2, -1))\n # x_t is [length, batch_size, heads, length or depth]\n x_t = x.permute(2, 0, 1, 3)\n # x_t_r is [length, batch_size * heads, length or depth]\n x_t_r = x_t.view(length, batch_size * heads, -1)\n # x_tz_matmul is [length, batch_size * heads, length or depth]\n x_tz_matmul = torch.matmul(x_t_r, z if not transpose else z.transpose(-2, -1))\n # x_tz_matmul_r is [length, batch_size, heads, length or depth]\n x_tz_matmul_r = x_tz_matmul.view(length, batch_size, heads, -1)\n # x_tz_matmul_r_t is [batch_size, heads, length, length or depth]\n x_tz_matmul_r_t = x_tz_matmul_r.permute(1, 2, 0, 3)\n\n return xy_matmul + x_tz_matmul_r_t", "def getAffineTransform(self, coord1, coord2):\n # generate coord1 into A\n mat_A = np.zeros((2*coord1.shape[0], 6))\n coord1 = np.hstack([coord1, np.ones((coord1.shape[0], 1))])\n for i in range(coord1.shape[0]):\n row = coord1[i,:]\n row_block = block_diag(row, row)\n assert(row_block.shape == (2,6))\n mat_A[2*i:2*i+2, :] = row_block\n \n # generate coord2 into b\n vec_b = coord2.reshape(-1,1)\n\n # solve the least square\n pseudo_inv = np.linalg.inv(np.matmul(mat_A.T, mat_A))\n pseudo_inv = np.matmul(pseudo_inv, mat_A.T)\n affine_mat = np.matmul(pseudo_inv, vec_b)\n assert(affine_mat.shape == (6,1))\n \n return affine_mat.reshape(2,-1)", "def affine_to_shift(affine_matrix, volshape, shift_center=True, indexing='ij'):\n\n if isinstance(volshape, (tf.compat.v1.Dimension, tf.TensorShape)):\n volshape = volshape.as_list()\n \n if affine_matrix.dtype != 'float32':\n affine_matrix = tf.cast(affine_matrix, 'float32')\n\n nb_dims = len(volshape)\n\n if len(affine_matrix.shape) == 1:\n if len(affine_matrix) != (nb_dims * (nb_dims + 1)):\n raise ValueError('transform is supposed a vector of len ndims * (ndims + 1).'\n 'Got len %d' % len(affine_matrix))\n\n affine_matrix = tf.reshape(affine_matrix, [nb_dims, nb_dims + 1])\n\n if not (affine_matrix.shape[0] in [nb_dims, nb_dims + 1] and affine_matrix.shape[1] == (nb_dims + 1)):\n shape1 = '(%d x %d)' % (nb_dims + 1, nb_dims + 1)\n shape2 = '(%d x %s)' % (nb_dims, nb_dims + 1)\n true_shape = str(affine_matrix.shape)\n raise Exception('Affine shape should match %s or %s, but got: %s' % (shape1, shape2, true_shape))\n\n # list of volume ndgrid\n # N-long list, each entry of shape volshape\n mesh = ne.utils.volshape_to_meshgrid(volshape, indexing=indexing) \n mesh = [tf.cast(f, 'float32') for f in mesh]\n \n if shift_center:\n mesh = [mesh[f] - (volshape[f]-1)/2 for f in range(len(volshape))]\n\n # add an all-ones entry and transform into a large matrix\n flat_mesh = [ne.utils.flatten(f) for f in mesh]\n flat_mesh.append(tf.ones(flat_mesh[0].shape, dtype='float32'))\n mesh_matrix = tf.transpose(tf.stack(flat_mesh, axis=1)) # 4 x nb_voxels\n\n # compute locations\n loc_matrix = tf.matmul(affine_matrix, mesh_matrix) # N+1 x nb_voxels\n loc_matrix = tf.transpose(loc_matrix[:nb_dims, :]) # nb_voxels x N\n loc = tf.reshape(loc_matrix, list(volshape) + [nb_dims]) # *volshape x N\n # loc = [loc[..., f] for f in range(nb_dims)] # N-long list, each entry of shape volshape\n\n # get shifts and return\n return loc - tf.stack(mesh, axis=nb_dims)", "def affine_transform(x, output_dim, name=None):\n\n w = tf.get_variable(name + \"_w\", [x.get_shape()[1], output_dim], initializer=tf.truncated_normal_initializer(stddev=0.02))\n b = tf.get_variable(name + \"_b\", [output_dim], initializer=tf.constant_initializer(0.0))\n\n return tf.matmul(x, w) + b", "def img_map_transforms(ts):\n # XXX TODO: unchecked textures give error of variable referenced before assignment XXX\n # POV-Ray \"scale\" is not a number of repetitions factor, but ,its\n # inverse, a standard scale factor.\n # 0.5 Offset is needed relatively to scale because center of the\n # scale is 0.5,0.5 in blender and 0,0 in POV\n # Strange that the translation factor for scale is not the same as for\n # translate.\n # TODO: verify both matches with other blender renderers / internal in previous versions.\n image_map_transforms = \"\"\n image_map_transforms = \"scale <%.4g,%.4g,%.4g> translate <%.4g,%.4g,%.4g>\" % (\n ts.scale[0],\n ts.scale[1],\n ts.scale[2],\n ts.offset[0],\n ts.offset[1],\n ts.offset[2],\n )\n # image_map_transforms = (\" translate <-0.5,-0.5,0.0> scale <%.4g,%.4g,%.4g> translate <%.4g,%.4g,%.4g>\" % \\\n # ( 1.0 / ts.scale.x,\n # 1.0 / ts.scale.y,\n # 1.0 / ts.scale.z,\n # (0.5 / ts.scale.x) + ts.offset.x,\n # (0.5 / ts.scale.y) + ts.offset.y,\n # ts.offset.z))\n # image_map_transforms = (\n # \"translate <-0.5,-0.5,0> \"\n # \"scale <-1,-1,1> * <%.4g,%.4g,%.4g> \"\n # \"translate <0.5,0.5,0> + <%.4g,%.4g,%.4g>\" % \\\n # (1.0 / ts.scale.x,\n # 1.0 / ts.scale.y,\n # 1.0 / ts.scale.z,\n # ts.offset.x,\n # ts.offset.y,\n # ts.offset.z)\n # )\n return image_map_transforms", "def make_translations(dataset, labels):\n offset = 10\n translations = [\n (0, offset),\n (0, -offset),\n (offset, 0),\n (-offset, 0),\n (-offset, -offset),\n (-offset, offset),\n (offset, -offset),\n (offset, offset)\n ]\n\n was_flattened = (len(dataset[0].shape) == 1)\n augmented_dataset = []\n augmented_labels = []\n \n for image, label in zip(dataset, labels):\n if was_flattened:\n image = unflatten(image)\n \n height = image.shape[0]\n width = image.shape[1]\n \n for t_x, t_y in translations:\n new_image = np.zeros(image.shape)\n t_mat = np.array([[1,0,t_x],[0,1,t_y],[0,0,1]])\n\n for x in range(0, width):\n for y in range(0, height):\n old_coords = np.array([[x],[y],[1]])\n new_coords = t_mat.dot(old_coords) # translation here\n\n if new_coords[0] > 0 and new_coords[0] < width and new_coords[1] > 0 and new_coords[1] < height:\n new_image[new_coords[1], new_coords[0]] = image[y, x]\n \n if was_flattened:\n new_image.flatten()\n augmented_dataset.append(new_image)\n augmented_labels.append(label)\n\n return (augmented_dataset, augmented_labels)", "def matmul(x, y):\n return np.matmul(x, y)", "def TransformPoint(transform, x, y, z):\n result = np.matmul(transform, np.array([x, y, z, 1.]))\n return result[0], result[1], result[2]", "def get_perspective_transform(src, dst):\n if not isinstance(src, torch.Tensor):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(type(src)))\n\n if not isinstance(dst, torch.Tensor):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(type(dst)))\n\n if not src.shape[-2:] == (4, 2):\n raise ValueError(\"Inputs must be a Bx4x2 tensor. Got {}\".format(src.shape))\n\n if not src.shape == dst.shape:\n raise ValueError(\"Inputs must have the same shape. Got {}\".format(dst.shape))\n\n if not (src.shape[0] == dst.shape[0]):\n raise ValueError(\n \"Inputs must have same batch size dimension. Expect {} but got {}\".format(src.shape, dst.shape)\n )\n\n # we build matrix A by using only 4 point correspondence. The linear\n # system is solved with the least square method, so here\n # we could even pass more correspondence\n p = []\n for i in [0, 1, 2, 3]:\n p.append(_build_perspective_param(src[:, i], dst[:, i], 'x'))\n p.append(_build_perspective_param(src[:, i], dst[:, i], 'y'))\n\n # A is Bx8x8\n A = torch.stack(p, dim=1)\n\n # b is a Bx8x1\n b = torch.stack(\n [\n dst[:, 0:1, 0],\n dst[:, 0:1, 1],\n dst[:, 1:2, 0],\n dst[:, 1:2, 1],\n dst[:, 2:3, 0],\n dst[:, 2:3, 1],\n dst[:, 3:4, 0],\n dst[:, 3:4, 1],\n ],\n dim=1,\n )\n\n # solve the system Ax = b\n X, LU = _torch_solve_cast(b, A)\n\n # create variable to return\n batch_size = src.shape[0]\n M = torch.ones(batch_size, 9, device=src.device, dtype=src.dtype)\n M[..., :8] = torch.squeeze(X, dim=-1)\n\n return M.view(-1, 3, 3) # Bx3x3", "def convert_matmul(g, op, block):\n\n inputs = [g.get_node(op.input(\"X\")[0]), g.get_node(op.input(\"Y\")[0])]\n a_shape = infer_shape(inputs[0])\n b_shape = infer_shape(inputs[1])\n if op.has_attr(\"trans_x\"):\n # for matmul_v2\n trans_x = op.attr(\"trans_x\")\n trans_y = op.attr(\"trans_y\")\n else:\n # for matmul\n trans_x = op.attr(\"transpose_X\")\n trans_y = op.attr(\"transpose_Y\")\n if trans_x:\n perm = list(range(len(a_shape)))\n perm[-2] = len(a_shape) - 1\n perm[-1] = len(a_shape) - 2\n inputs[0] = _op.transpose(inputs[0], axes=perm)\n if trans_y:\n perm = list(range(len(b_shape)))\n perm[-2] = len(b_shape) - 1\n perm[-1] = len(b_shape) - 2\n inputs[1] = _op.transpose(inputs[1], axes=perm)\n\n # This implemention almost keeps same with ONNX\n # Need to check input shape as batch matmul must be supported.\n a_shape = shape_of(inputs[0], dtype=\"int32\")\n a_rank = infer_shape(a_shape)[0]\n b_shape = shape_of(inputs[1], dtype=\"int32\")\n b_rank = infer_shape(b_shape)[0]\n # When performing a batch matmul, we need to properly handle N-dim shapes.\n if a_rank > 2 or b_rank > 2:\n\n def flatten_to_nd(x, x_shape, nd=3):\n ndims = infer_shape(x_shape)[0]\n if ndims == nd:\n return x\n newshape = _op.concatenate(\n [\n _expr.const([-1], dtype=infer_type(x_shape).checked_type.dtype),\n _op.strided_slice(x_shape, [ndims - nd + 1], [ndims]),\n ],\n 0,\n )\n out = _op.reshape(x, fold_constant(newshape))\n return out\n\n b_type = infer_type(inputs[1])\n # Convert to dense if the second matrix is 2d and non-dynamic\n if b_rank == 2 and not _ty.is_dynamic(b_type.checked_type):\n a = flatten_to_nd(inputs[0], a_shape, 2)\n b = _op.transpose(inputs[1])\n output = _op.nn.dense(a, b)\n else:\n # Convert a and b into 3 dimensional tensors.\n a = flatten_to_nd(inputs[0], a_shape, 3)\n b = flatten_to_nd(inputs[1], b_shape, 3)\n # Transpose matrix dimensions of b.\n b = _op.transpose(b, [0, 2, 1])\n # Perform a batch matmul.\n output = _op.nn.batch_matmul(a, b)\n # Determine the output batch dimension.\n if a_rank > b_rank:\n out_batch = _op.strided_slice(a_shape, [0], [a_rank - 2])\n elif a_rank < b_rank:\n out_batch = _op.strided_slice(b_shape, [0], [b_rank - 2])\n # If its unclear how broadcasting should be applied, the output\n # shape is determined by choosing the maximum value from each input.\n else:\n out_batch = _op.concatenate(\n [\n _op.maximum(\n _op.strided_slice(a_shape, [i], [i + 1]),\n _op.strided_slice(b_shape, [i], [i + 1]),\n )\n for i in range(a_rank - 2)\n ],\n 0,\n )\n # Reshape output to original dimensions.\n final_shape = _op.concatenate(\n [\n out_batch,\n _op.strided_slice(\n a_shape, [infer_shape(a_shape)[0] - 2], [infer_shape(a_shape)[0] - 1]\n ),\n _op.strided_slice(\n b_shape, [infer_shape(b_shape)[0] - 1], [infer_shape(b_shape)[0]]\n ),\n ],\n 0,\n )\n out = _op.reshape(output, fold_constant(final_shape))\n else:\n if b_rank == 1:\n inputs[1] = _op.expand_dims(inputs[1], 1, 1)\n # Otherwise a simple dense op will get the job done.\n input_1_t = _op.transpose(inputs[1], axes=(1, 0))\n out = _op.nn.dense(inputs[0], input_1_t)\n if b_rank == 1:\n out = _op.squeeze(out, axis=[-1])\n if op.has_attr(\"alpha\"):\n alpha = op.attr(\"alpha\")\n if not np.isclose(alpha, 1.0):\n out = out * _expr.const(alpha).astype(\"float32\")\n g.add_node(op.output(\"Out\")[0], out)", "def small_transf(x, y, a, xc, yc, ac, iref):\n x += xc - x[iref]\n y += yc - y[iref]\n x, y = rotate_xy(x, y, x[iref], y[iref], ac - a[iref])\n a += ac - a[iref]\n return x, y, a", "def affine(params, x):\n return np.dot(params['w'], x) + params['b']", "def apply_transform_matrix(self, img: np.ndarray, transform_matrix):\n h, w = img.shape[0], img.shape[1]\n transform_matrix = transform_matrix_offset_center(transform_matrix, h, w)\n img = np.rollaxis(img, 2, 0)\n final_affine_matrix = transform_matrix[:2, :2]\n final_offset = transform_matrix[:2, 2]\n\n channel_images = [scipy.ndimage.interpolation.affine_transform(\n x_channel,\n final_affine_matrix,\n final_offset,\n order=1,\n mode=self.fill_mode,\n cval=self.cval) for x_channel in img]\n img = np.stack(channel_images, axis=0)\n img = np.rollaxis(img, 0, 2 + 1)\n # img = apply_affine_transform(img, transform_matrix, channel_axis=2, fill_mode=self.fill_mode, cval=self.cval) # apply_transform\n return img", "def rescale(A, d1, d2):\n \n A[0, 1] = A[0, 1] * (d2 / d1)\n A[1, 0] = A[1, 0] * (d1 / d2)\n \n return A", "def einsum(ops, *args):\n\n if len(args) != 2:\n raise ValueError(\"Currently only two operands are supported\")\n\n inops, outops = ops.split('->')\n inops = inops.split(',')\n\n # All indices that are in input AND in output are multiplies\n multiplies = sorted(list(set(inops[0]) & set(inops[1]) & set(outops)))\n # All indices that are in input BUT NOT in output are sum contractions\n sums = sorted(list((set(inops[0]) & set(inops[1])) - set(outops)))\n\n # Map sums and indices to axis integers\n multiplies = [[inop.find(x) for x in multiplies] for inop in inops]\n sums = [[inop.find(x) for x in sums] for inop in inops]\n\n # Find output axes in input axes for final transpose\n # Values very likely lie outside of output tensor shape, so\n # just map them values to their rank (index in ordered list)\n transpose = [''.join(inops).find(x) for x in outops]\n transpose = scipy.stats.rankdata(transpose).astype(int) - 1\n\n return tensordot2(*args, sum=sums, multiply=multiplies).transpose(transpose)", "def compose(scales, offsets, rotations, origin=None):\n\n preRotate = np.eye(4)\n postRotate = np.eye(4)\n\n rotations = np.array(rotations)\n\n if rotations.shape == (3,):\n rotations = axisAnglesToRotMat(*rotations)\n\n if origin is not None:\n preRotate[ 0, 3] = -origin[0]\n preRotate[ 1, 3] = -origin[1]\n preRotate[ 2, 3] = -origin[2]\n postRotate[0, 3] = origin[0]\n postRotate[1, 3] = origin[1]\n postRotate[2, 3] = origin[2]\n\n scale = np.eye(4, dtype=np.float64)\n offset = np.eye(4, dtype=np.float64)\n rotate = np.eye(4, dtype=np.float64)\n\n scale[ 0, 0] = scales[ 0]\n scale[ 1, 1] = scales[ 1]\n scale[ 2, 2] = scales[ 2]\n offset[ 0, 3] = offsets[0]\n offset[ 1, 3] = offsets[1]\n offset[ 2, 3] = offsets[2]\n\n rotate[:3, :3] = rotations\n\n return concat(offset, postRotate, rotate, preRotate, scale)", "def augment_translate(image, mask, trans_range):\n\n tr_x = trans_range * np.random.uniform() - trans_range / 2\n tr_y = trans_range * np.random.uniform() - trans_range / 2\n trans_M = np.array([[1, 0, tr_x], [0, 1, tr_y]], dtype=np.float32)\n width, height, _ = image.shape\n image = cv2.warpAffine(image, trans_M, (width, height))\n if mask is not None:\n mask = cv2.warpAffine(mask, trans_M, (width, height))\n return image, mask", "def estimate_translation_np(S, joints_2d, joints_conf, focal_length=5000, img_size=224):\n\n num_joints = S.shape[0]\n # focal length\n f = np.array([focal_length, focal_length])\n # optical center\n center = np.array([img_size / 2., img_size / 2.])\n\n # transformations\n Z = np.reshape(np.tile(S[:, 2], (2, 1)).T, -1)\n XY = np.reshape(S[:, 0:2], -1)\n O = np.tile(center, num_joints)\n F = np.tile(f, num_joints)\n weight2 = np.reshape(np.tile(np.sqrt(joints_conf), (2, 1)).T, -1)\n\n # least squares\n Q = np.array([F * np.tile(np.array([1, 0]), num_joints), F * np.tile(np.array([0, 1]), num_joints),\n O - np.reshape(joints_2d, -1)]).T\n c = (np.reshape(joints_2d, -1) - O) * Z - F * XY\n\n # weighted least squares\n W = np.diagflat(weight2)\n Q = np.dot(W, Q)\n c = np.dot(W, c)\n\n # square matrix\n A = np.dot(Q.T, Q)\n b = np.dot(Q.T, c)\n\n # solution\n trans = np.linalg.solve(A, b)\n\n return trans", "def scale_rotate_translate_coords(\n coords: np.array,\n m: np.array\n):\n return np.dot(\n m, np.vstack((coords.transpose(), np.ones(len(coords))))\n ).transpose()[:, :2]", "def translate(img, shift):\n\tgray = grayscale(img)\n\ttmp = img.copy()\n\trows, cols = gray.shape\n\tM = np.float32([[1, 0, shift[0]], [0, 1, shift[1]]]) # Translation Matrix\n\tdst = cv2.warpAffine(tmp, M, (cols, rows))\n\treturn dst", "def fit(self,data):\n A = data[:,0:self.dim]\n B = data[:,self.dim:]\n\n #Procrustean: CODE TO SOLVE FOR OPTIMAL (by least squares) EUCLIDEAN TRANSFORM (currently allows reflections):\n bReflection = True\n\n muA = np.mean(A,axis=0)\n muB = np.mean(B,axis=0)\n A0 = A - np.tile(muA,[A.shape[0],1])\n B0 = B - np.tile(muB,[B.shape[0],1])\n\n ssqA = np.sum(A0**2)\n ssqB = np.sum(B0**2)\n normA = np.sqrt(ssqA)\n normB = np.sqrt(ssqB)\n\n A0 = A0 / normA\n B0 = B0 / normB\n\n X = np.dot(A0.T,B0)\n [L, D, M] = np.linalg.svd(X)\n rotation = np.dot(M.T,L.T)\n\n traceTA = sum(D)\n if self.bScale:\n scale = traceTA * normA / normB\n else:\n scale = 1\n translation = muA - np.dot(scale*muB,rotation); \n\n self.scale = scale\n self.rot = rotation\n self.trans = translation\n return scale,rotation,translation\n #A - (np.dot((scale * B),rotation) + translation)", "def transform_space(normal_map, rotmat):\n rotmat = np.array(rotmat)\n orig_shape = normal_map.shape\n normal = normal_map.reshape(-1, 3).T # 3-by-N\n\n normal_trans = rotmat.dot(normal)\n\n normal_map_trans = normal_trans.T.reshape(orig_shape)\n return normal_map_trans", "def translate(geom, xoff=0.0, yoff=0.0, zoff=0.0):\n matrix = (1.0, 0.0, 0.0,\n 0.0, 1.0, 0.0,\n 0.0, 0.0, 1.0,\n xoff, yoff, zoff)\n return affine_transform(geom, matrix)", "def L1Uv2(A, d):\n n = shape(A)[0]\n for k in range(1,n):\n km = array([0, k - d]).max() # First index of r we need to update\n for r in range(km, k - 1):\n A[k, r] /= A[r, r]\n uk = array([k, r + d + 1]).min() # last index not included\n A[k, (r + 1):uk] -= A[r, (r + 1):uk]*A[k, r]\n A[k, k - 1] /= A[k - 1,k - 1] \n for r in range(km, k):\n uk = array([k + 1, r + d + 1]).min() # last index not included\n A[(r + 1):uk, k] -= A[(r + 1):uk, r]*A[r, k]", "def mca_transformer(transform_data):\n M, dims, index, v0v1 = transform_data\n def transform(dfp):\n # dims, index, v0v1\n P = np.zeros((len(dfp), dims), dtype=float)\n print(\"transforming\")\n for i, (_, row) in (enumerate(dfp.iterrows())):\n ivec = np.zeros(M)\n for col, val in zip(row.index, row):\n if (col, val) in index:\n ivec[index[col, val]] = 1\n proj = ivec.dot(v0v1)\n assert(all(proj.imag == 0))\n P[i,:] = proj.real\n return P\n return transform", "def transform_image(image, transform, mapping, alpha = 1, incr_x = 10, incr_y = 10):\r\n background = [255, 255, 255, 0]\r\n width, height = image.size\r\n image_in = np.array(image.convert(\"RGBA\"))\r\n image_out = [[background[:] for j in range(width)] for i in range(height)]\r\n transform_row = []\r\n for i in range(0, width + incr_x, incr_x):\r\n transform_row.append(transform(vec2(i, 0), mapping, alpha))\r\n for i in range(incr_y, height + incr_y, incr_y):\r\n p_ur = transform_row[0]\r\n p_lr = transform_row[0] = transform(vec2(0, i), mapping, alpha)\r\n for j in range(incr_x, width + incr_x, incr_x):\r\n p_ul = p_ur\r\n p_ll = p_lr\r\n p_ur = transform_row[j//incr_x]\r\n p_lr = transform_row[j//incr_x] = transform(vec2(j, i), mapping, alpha)\r\n a = p_ur - p_ul\r\n b = p_ll - p_ul\r\n det = a.x*b.y - a.y*b.x\r\n if (det != 0.0):\r\n for p in triangle(p_ul, p_ur, p_ll, width, height):\r\n c = p - p_ul\r\n rx = (b.y*c.x - b.x*c.y)/det\r\n ry = (a.x*c.y - a.y*c.x)/det\r\n image_out[p.y][p.x] = image_in[min(height - 1, max(0, round(i + (ry - 1)*incr_y)))][min(width - 1, max(0, round(j + (rx - 1)*incr_x)))]\r\n a = p_lr - p_ll\r\n b = p_lr - p_ur\r\n det = a.x*b.y - a.y*b.x\r\n if (det != 0.0):\r\n p_ulr = p_ur + p_ll - p_lr\r\n for p in triangle(p_ur, p_ll, p_lr, width, height):\r\n c = p - p_ulr\r\n rx = (b.y*c.x - b.x*c.y)/det\r\n ry = (a.x*c.y - a.y*c.x)/det\r\n image_out[p.y][p.x] = image_in[min(height - 1, max(0, round(i + (ry - 1)*incr_y)))][min(width - 1, max(0, round(j + (rx - 1)*incr_x)))]\r\n image_out = Image.fromarray(np.uint8(image_out))\r\n return image_out", "def compose_transforms(*transforms):\n from functools import reduce\n\n for transform in transforms:\n vg.shape.check(locals(), \"transform\", (4, 4))\n\n if len(transforms) == 0:\n return np.eye(4)\n\n return reduce(np.dot, reversed(transforms))", "def transform(pt, center, scale, res, invert=0, rot=0):\n t = get_transform_pose(center, scale, res, rot=rot)\n if invert:\n t = np.linalg.inv(t)\n new_pt = np.pad(pt,((0,0),(0,1)),mode = 'constant', constant_values = 1).T\n new_pt = np.dot(t, new_pt)\n return new_pt", "def apply(self,v):\n return np.tensordot(self._transform, v, axes=([1],[0])) \\\n + self._translation", "def TransformVector(self, *args):\n return _itkTranslationTransformPython.itkTranslationTransformD2_TransformVector(self, *args)", "def _inv22_vectorized(M):\n assert (M.ndim == 3)\n assert (M.shape[-2:] == (2, 2))\n M_inv = np.empty_like(M)\n delta_inv = np.reciprocal(M[:, 0, 0]*M[:, 1, 1] - M[:, 0, 1]*M[:, 1, 0])\n M_inv[:, 0, 0] = M[:, 1, 1]*delta_inv\n M_inv[:, 0, 1] = -M[:, 0, 1]*delta_inv\n M_inv[:, 1, 0] = -M[:, 1, 0]*delta_inv\n M_inv[:, 1, 1] = M[:, 0, 0]*delta_inv\n return M_inv", "def solve_rigid_transform(X, Y, debug=True):\n assert X.shape[0] == Y.shape[0] >= 3\n assert X.shape[1] == Y.shape[1] == 3\n A = X.T # (3,N)\n B = Y.T # (3,N)\n\n # Look for Inge Soderkvist's solution online if confused.\n meanA = np.mean(A, axis=1, keepdims=True)\n meanB = np.mean(B, axis=1, keepdims=True)\n A = A - meanA\n B = B - meanB\n covariance = B.dot(A.T)\n U, sigma, VH = np.linalg.svd(covariance) # VH = V.T, i.e. numpy transposes it for us.\n\n V = VH.T\n D = np.eye(3)\n D[2,2] = np.linalg.det( U.dot(V.T) )\n R = U.dot(D).dot(V.T)\n t = meanB - R.dot(meanA)\n RB_matrix = np.concatenate((R, t), axis=1)\n\n #################\n # SANITY CHECKS #\n #################\n\n print(\"\\nBegin debug prints for rigid transformation from A to B:\")\n print(\"meanA:\\n{}\\nmeanB:\\n{}\".format(meanA, meanB))\n print(\"Rotation R:\\n{}\\nand R^TR (should be identity):\\n{}\".format(R, (R.T).dot(R)))\n print(\"translation t:\\n{}\".format(t))\n print(\"RB_matrix:\\n{}\".format(RB_matrix))\n\n # Get residual to inspect quality of solution. Use homogeneous coordinates for A.\n # Also, recall that we're dealing with (3,N) matrices, not (N,3).\n # In addition, we don't want to zero-mean for real applications.\n A = X.T # (3,N)\n B = Y.T # (3,N)\n\n ones_vec = np.ones((1, A.shape[1]))\n A_h = np.concatenate((A, ones_vec), axis=0)\n B_pred = RB_matrix.dot(A_h)\n assert B_pred.shape == B.shape\n\n # Careful! Use raw_errors for the RF, but it will depend on pred-targ or targ-pred.\n raw_errors = B_pred - B # Use pred-targ, of shape (3,N)\n l2_per_example = np.sum((B-B_pred)*(B-B_pred), axis=0)\n frobenius_loss = np.mean(l2_per_example)\n\n if debug:\n print(\"\\nInput, A.T:\\n{}\".format(A.T))\n print(\"Target, B.T:\\n{}\".format(B.T))\n print(\"Predicted points:\\n{}\".format(B_pred.T))\n print(\"Raw errors, B-B_pred:\\n{}\".format((B-B_pred).T))\n print(\"Mean abs error per dim: {}\".format( (np.mean(np.abs(B-B_pred), axis=1))) )\n print(\"Residual (L2) for each:\\n{}\".format(l2_per_example.T))\n print(\"loss on data: {}\".format(frobenius_loss))\n print(\"End of debug prints for rigid transformation.\\n\")\n\n assert RB_matrix.shape == (3,4)\n return RB_matrix", "def get_rotation_matrix2d(center: Tensor, angle: Tensor, scale: Tensor) -> Tensor:\n if not isinstance(center, Tensor):\n raise TypeError(f\"Input center type is not a Tensor. Got {type(center)}\")\n\n if not isinstance(angle, Tensor):\n raise TypeError(f\"Input angle type is not a Tensor. Got {type(angle)}\")\n\n if not isinstance(scale, Tensor):\n raise TypeError(f\"Input scale type is not a Tensor. Got {type(scale)}\")\n\n if not (len(center.shape) == 2 and center.shape[1] == 2):\n raise ValueError(f\"Input center must be a Bx2 tensor. Got {center.shape}\")\n\n if not len(angle.shape) == 1:\n raise ValueError(f\"Input angle must be a B tensor. Got {angle.shape}\")\n\n if not (len(scale.shape) == 2 and scale.shape[1] == 2):\n raise ValueError(f\"Input scale must be a Bx2 tensor. Got {scale.shape}\")\n\n if not (center.shape[0] == angle.shape[0] == scale.shape[0]):\n raise ValueError(\n \"Inputs must have same batch size dimension. Got center {}, angle {} and scale {}\".format(\n center.shape, angle.shape, scale.shape\n )\n )\n\n if not (center.device == angle.device == scale.device) or not (center.dtype == angle.dtype == scale.dtype):\n raise ValueError(\n \"Inputs must have same device Got center ({}, {}), angle ({}, {}) and scale ({}, {})\".format(\n center.device, center.dtype, angle.device, angle.dtype, scale.device, scale.dtype\n )\n )\n\n shift_m = eye_like(3, center)\n shift_m[:, :2, 2] = center\n\n shift_m_inv = eye_like(3, center)\n shift_m_inv[:, :2, 2] = -center\n\n scale_m = eye_like(3, center)\n scale_m[:, 0, 0] *= scale[:, 0]\n scale_m[:, 1, 1] *= scale[:, 1]\n\n rotat_m = eye_like(3, center)\n rotat_m[:, :2, :2] = angle_to_rotation_matrix(angle)\n\n affine_m = shift_m @ rotat_m @ scale_m @ shift_m_inv\n return affine_m[:, :2, :] # Bx2x3", "def transform2h(self, x, y, m):\n A = torch.matmul(m, torch.stack([x, y, torch.ones(len(x))]))\n xt = A[0, :] / A[2, :]\n yt = A[1, :] / A[2, :]\n return xt, yt", "def two_bs2x4_transform_opt(t1, r1, t2, r2, input_state):\n size = len(input_state)\n out = np.zeros((size,) * 4, dtype=complex)\n\n def coef(k1, k2, k3, k4):\n return t1 ** k2 * (1j * r1) ** k1 * t2 ** k4 * (1j * r2) ** k3 / (factorial(k1) * factorial(k2) * factorial(k3) * factorial(k4))\n\n # index 'i' = (m,n,k,l)\n for i in np.ndindex(size, size, size, size):\n if i[2] <= i[0] and i[3] <= i[1] and i[0] + i[1] < size:\n out[i[2], i[0] - i[2], i[3], i[1] - i[3]] = coef(i[2], i[0] - i[2], i[3], i[1] - i[3]) * input_state[i[0], i[1]] * factorial(i[0]) * factorial(i[1])\n\n return out", "def translation_3D(img, trans_x, trans_y, trans_z, cval=0.):\n \n if trans_x > 0:\n img[trans_x:,...] = img[:-trans_x,...] \n img[:trans_x,...] = cval\n elif trans_x < 0:\n img[:trans_x,...] = img[-trans_x:,...] \n img[trans_x:,...] = cval\n \n if trans_y > 0:\n img[:,trans_y:,:,:] = img[:,:-trans_y,:,:] \n img[:,:trans_y,:,:] = cval\n elif trans_y < 0:\n img[:,:trans_y,:,:] = img[:,-trans_y:,:,:] \n img[:,trans_y:,:,:] = cval\n \n if trans_z > 0:\n img[...,trans_z:,:] = img[...,:-trans_z,:] \n img[...,:trans_z,:] = cval\n elif trans_z < 0:\n img[...,:trans_z,:] = img[...,-trans_z:,:] \n img[...,trans_z:,:,:] = cval\n \n return img", "def Translate(*args, **kwargs):\n return _gdi_.GraphicsMatrix_Translate(*args, **kwargs)", "def two_bs2x4_transform(t1, r1, t2, r2, input_state):\n size = len(input_state)\n output_state = np.zeros((size,) * 4, dtype=complex)\n for m in range(size):\n for n in range(size):\n\n for k in range(m + 1):\n for l in range(n + 1):\n # channels indexes\n ind1 = k\n ind2 = m - k\n ind3 = l\n ind4 = n - l\n coeff = input_state[m, n] * t1**(m - k) * (1j*r1)**k * t2**(n - l) * (1j*r2)**l * factorial(m) * factorial(n) / (factorial(k) * factorial(m - k) * factorial(l) * factorial(n - l))\n output_state[ind1, ind2, ind3, ind4] = output_state[ind1, ind2, ind3, ind4] + coeff\n\n return output_state", "def transform(self, x, y):\n # return self.transform_2D(x, y)\n return self.transform_perspective(x, y)", "def o2transform(self, x, w):\n\n o2t = lambda x, w: K.dot(w, K.dot(x, K.transpose(w)))\n return tf.map_fn(o2t, [x, w])", "def batch_vTAv(A: np.ndarray, v: np.ndarray) -> np.ndarray:\n\n \"\"\" Faster than\n Av = np.matmul(A, v[...,:,None]) # [B, X, 1]\n return np.matmul(v[...,None,:], Av).squeeze((-2, -1)) # [B]\n \"\"\"\n\n return np.einsum(\"...k,...kl,...l->...\", v, A, v)", "def transform(tvec1, rvec1, tvec2, rvec2):\n op = localToGlobal(np.squeeze(tvec2), np.squeeze(rvec2))\n tvec3 = []\n for tvec in tvec1:\n #tvec = tvec.squeeze()\n tvec3.append(np.matmul(op, tvec))\n tvec3 = np.array(tvec3)\n return tvec3", "def estimate_translation(points1, points2):\n xs = points1[:,0]\n ys = points1[:,1]\n x2s = points2[:,0]\n y2s = points2[:,1]\n N = len(xs)\n \n # build b\n b = np.empty((N+N, 1))\n b[::2,0] = xs\n b[1::2,0] = ys\n \n # build A\n A = np.empty((N+N, 3))\n A[::2, 0] = x2s\n A[1::2,0] = y2s\n A[::2, 1] = np.ones(N)\n A[1::2, 1] = np.zeros(N)\n A[::2, 2] = np.zeros(N)\n A[1::2, 2] = np.ones(N)\n \n A = np.linalg.lstsq(A, b)[0][:,0]\n M = [[1, 0, A[1]],\n [0, 1, A[2]]]\n return (A[0], A[1], A[2], np.array(M))", "def translate(dx,dy,Mat):\r\n # MT is the Translation (3 X 3) Matrix\r\n MT=[[1,0,dx],[0,1,dy],[0,0,1]]\r\n Translated= Multiply(MT,Mat)\r\n # Translated[0][0] is the updated x coordinate\r\n # Translated[1][0] is the updated y coordinate\r\n return Translated[0][0],Translated[1][0],Translated[2][0]", "def transl(x, y, z):\n displace_vector = [[x],\n [y],\n [z]]\n return np.matrix(displace_vector)", "def _apply_transformations(structure, rotations, translations):\n # Additional first dimension for 'structure.repeat()'\n assembly_coord = np.zeros((len(rotations),) + structure.coord.shape)\n\n # Apply corresponding transformation for each copy in the assembly\n for i, (rotation, translation) in enumerate(zip(rotations, translations)):\n coord = structure.coord\n # Rotate\n coord = matrix_rotate(coord, rotation)\n # Translate\n coord += translation\n assembly_coord[i] = coord\n\n return repeat(structure, assembly_coord)", "def _z2matmul(self, left, right):\n prod = np.mod(np.dot(left, right), 2)\n return prod", "def get_4x4_transform(scale_x, scale_y, trans_x, trans_y, trans_z):\r\n transform = [[scale_x, 0.0, 0.0, trans_x],\r\n [0.0, scale_y, 0.0, trans_y],\r\n [0.0, 0.0, 1.0, trans_z],\r\n [0.0, 0.0, 0.0, 1.0]]\r\n return transform", "def rigid_transform_3d(xs,ys):\n assert xs.shape == ys.shape\n assert xs.shape[0] == 3, 'The points must be of dimmensionality 3'\n\n # find centroids and H\n x_centroid = np.mean(xs, axis=1)[:, np.newaxis]\n y_centroid = np.mean(ys, axis=1)[:, np.newaxis]\n \n H = (xs - x_centroid)@(ys - y_centroid).T\n\n # find rotation\n U, S, Vt = np.linalg.svd(H)\n rotation = [email protected]\n\n # handling reflection\n if np.linalg.det(rotation) < 0:\n Vt[2, :] *= -1\n rotation = np.dot(Vt.T, U.T)\n \n # find translation\n translation = y_centroid - rotation@x_centroid\n \n return translation, rotation" ]
[ "0.65614295", "0.634234", "0.61342865", "0.61295545", "0.61271703", "0.59918904", "0.5945617", "0.5937629", "0.5931673", "0.59223235", "0.5891538", "0.58714736", "0.5866033", "0.58649784", "0.58559966", "0.5835464", "0.5825817", "0.58026475", "0.5768168", "0.575795", "0.5744753", "0.5701919", "0.56939435", "0.56847537", "0.5679792", "0.5620446", "0.5610276", "0.55997044", "0.55902857", "0.55864346", "0.55767435", "0.55688345", "0.5565349", "0.55588764", "0.55565774", "0.55440617", "0.5542827", "0.55395", "0.5529177", "0.55218875", "0.55043364", "0.5492529", "0.54862", "0.5481359", "0.5471187", "0.5471187", "0.5470529", "0.5470529", "0.5469175", "0.5460456", "0.54527116", "0.54435337", "0.5439173", "0.5433005", "0.54272115", "0.54128236", "0.54107213", "0.540762", "0.54030246", "0.53978175", "0.5395745", "0.53893244", "0.53821987", "0.53804195", "0.5371596", "0.53561425", "0.53515184", "0.5351028", "0.5347509", "0.5338666", "0.5337644", "0.53340507", "0.53336066", "0.5332591", "0.5330443", "0.53278005", "0.532304", "0.53145146", "0.5314213", "0.5310391", "0.53097415", "0.53078336", "0.5304553", "0.5302978", "0.53003705", "0.5297984", "0.52976555", "0.52917415", "0.52877676", "0.5285267", "0.5285039", "0.5282912", "0.52769226", "0.5272112", "0.5267057", "0.5262855", "0.52626705", "0.52594995", "0.5254268", "0.5253577" ]
0.5372871
64
Filter multiple iterable at once, selecting values at index i such that func(iterables[0][i], iterables[1][i], ...) is True
def sync_filter(func, *iterables): return tuple(zip(*tuple(i for i in zip(*iterables) if func(*i)))) or ((),) * len( iterables )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filtern(func: Callable, iterable: Iterable):\n return next(filter(func, iterable))", "def cfilter(func,iterable):\n result = []\n\n for i in iterable:\n\n if func(i) == True:\n result.append(i)\n\n return result", "def filter(function, iterable):\n\n if function is bool:\n return [x for x in iterable if x]\n\n return [x for x in iterable if function(x)]", "def ft_filter(function_to_apply, list_of_inputs):\n if not callable(function_to_apply):\n exit(\"First param should be a Function\")\n try:\n object_iter = iter(list_of_inputs)\n except TypeError:\n exit(\"Second Argument must be iterable\")\n lst = []\n for item in list_of_inputs:\n if function_to_apply(item) == True: \n lst.append(item)\n return lst", "def custom_filter(function, iterable):\n map_list = []\n\n for i in iterable:\n if function(i):\n map_list.append(i)\n\n return map_list", "def filter(self, fn: Callable[[Tuple[K, List[V]]], bool]) -> Iterator[Tuple[K, List[V]]]:\n raise NotImplementedError", "def custom_filter(some_func, iterator_list):\n\n local_iterator = from_input_to_list(iterator_list)\n func_map = [some_func(i) for i in local_iterator]\n true_list = [j for j in func_map if j > 100] # here we can hardcode any condition\n\n return true_list", "def filter_(f: Callable[[A], Maybe[bool]], iterable: Iterable[A]\n ) -> Maybe[Iterable[A]]:\n return cast(Maybe[Iterable[A]], filter_m_(Just, f, iterable))", "def filter_n(function, iterable, **kwargs) -> iter:\n n_pass, n_fail = 0, 0\n\n for item in iterable:\n if function(item, **kwargs):\n yield item\n n_pass += 1\n else:\n n_fail += 1\n\n LOGGER.info(\"Filter %s: output %s rows (dropped %s rows)\", function.__name__, n_pass, n_fail)", "def filter(iterable, filter_func):\n for item in iterable:\n item = filter_func(item)\n if item is not None:\n yield item", "def my_filter(function,lst):\n return list(x for x in lst if function(x))", "def every(lst, fn):\n return reduce(lambda acc, elem: acc and fn(elem), lst, True)", "def filter_compose(*fns: T.Callable[[T.Any], bool]):\n def composite(x):\n for f in fns:\n if not f(x):\n return False\n return True\n\n return composite", "def filter(self, fn: Callable[[Tuple[K, List[V]]], bool]) -> Iterator[Tuple[K, List[V]]]:\n return (entry for entry in iter(self) if fn(entry))", "def filter_generic(mt_list, func):\r\n return [mt for mt in mt_list if func(mt)]", "def filter(iterable, predicate):\n\n for x in iterable:\n if predicate(x):\n yield x", "def filter(iteratee, seq):\n return _filter(fnc.iteratee(iteratee), seq)", "def __map_and_filter(_input: MutableSequence[T],\n _map: Callable[[T], Any] = lambda x: x,\n _filter: Callable[[T], bool] = lambda x: True) -> MutableSequence[Any]:\n\n return [_map(x) for x in _input if _filter(x)]", "def filter_and(filters):\n def filt(item):\n for f in filters:\n if not f(item):\n return False\n return True\n return filt", "def split_cond(f, iterable):\n split_point = [i for i, e in enumerate(iterable) if f(e)]\n split_point += [len(iterable)]\n return [iterable[i:j] for i, j in zip(split_point[:-1], split_point[1:])]", "def filter_or(filters):\n def filt(item):\n for f in filters:\n if f(item):\n return True\n return False\n return filt", "def ifilter_c(func):\n return functools.partial(ifilter, func)", "def partition(iterable : Iterable[T], predicate : Callable[[T], bool]) -> Tuple[Iterable[T], Iterable[T]]:\n\n iter1, iter2 = tee(iterable)\n return filterfalse(predicate, iter1), filter(predicate, iter2)", "def filter(x):\r\n # Is `x` a container we can iterate on?\r\n iter_on = None\r\n if isinstance(x, list) or isinstance(x, tuple):\r\n iter_on = x\r\n elif isinstance(x, dict):\r\n iter_on = x.iteritems()\r\n if iter_on is not None:\r\n return all(filter(y) for y in iter_on)\r\n else:\r\n return (isinstance(x, theano.Variable) or\r\n isinstance(x, theano.scan_module.until))", "def filtered(self, keys, lst=None, func=\"all\"):\n lst = self if lst is None else lst\n if len(lst) == 0:\n raise ValueError(\"No rows in list\")\n return [row for row in self.filter(keys, lst, func=func)]", "def multimap(funcs, iterable):\n\n for f in funcs:\n iterable = map(f, iterable)\n\n return iterable", "def filterRows(function, rows):\n return [y for y in rows if function(y)]", "def cofilter(function, iterator):\n results = []\n\n def checkFilter(notfiltered, item):\n if notfiltered == True:\n results.append(item)\n\n def dofilter(item):\n d = maybeDeferred(function, item)\n d.addCallback(checkFilter, item)\n return d\n\n d = _CoFunCaller(resultCollector=dofilter).coiterate(iterator)\n d.addCallback(lambda _: results)\n return d", "def filter(self, func=bool, *args, **kwargs):\n return self.apply(func, *args, **kwargs).apply(bool) == True", "def filter(self, func: Callable[[Tuple[keyType, valueType]], Tuple[keyType, valueType]]) -> List[Tuple[keyType, valueType]]:\n result = []\n it = self.__iter__()\n while True:\n try:\n key, value = next(it)\n pair = (key, value)\n tmp = func(pair)\n if not (tmp is None):\n result.append(tmp)\n except StopIteration:\n break\n return result", "def any_values(*values):\n values = [_normalize(v) for v in values]\n for v in zip(*values):\n yield any(v)", "def array_filter(item, func):\n return filter(func, item)", "def ANY(*R):\n return lambda l, i: any(r(l, i) for r in R)", "def test_filter_mixed_function(self):\n for none_type in (False, True):\n for all_type in (False, True):\n for any_type in (False, True, None):\n result = none_type is False and all_type is True \\\n and (any_type is None or any_type is True)\n self._test_filter(none_type, all_type, any_type, result)", "def filter_collection(collection, filter_tuples):\n\n for filter_tuple in filter_tuples:\n collection = collection[collection[filter_tuple[0]] == filter_tuple[1]]\n\n return collection", "def simple_filter(f, l):\n # a list comprehension with an 'if' clause goes the job nicely\n return [ item for item in l if f(item) ]", "def closure(attributes: Set[A],\n fds: List[FunctionalDependency]) -> Set[A]:\n for i, (left, right) in enumerate(fds):\n if left.issubset(attributes):\n return closure(attributes.union(right), fds[:i] + fds[i + 1:])\n return attributes", "def filtered(self, func):\n return PSetList(list(filter(func, self.sets)))", "def filter(self, func):\r\n\r\n d = self.data\r\n f = []\r\n for i in d:\r\n if func(i):\r\n f.append(i)\r\n return Records(f)", "def zip_strict(*iterables) -> Iterator[Tuple[Any, ...]]:\n for values in itertools.zip_longest(*iterables, fillvalue=_NO_VALUE):\n if any(value is _NO_VALUE for value in values):\n msg = f'all iterables must have the same length'\n raise ValueError(msg)\n yield values", "def filter(self, func):\n self._sets.filter(key=func)", "def every(predicate: Predicate[_O]) -> Predicate[Iterable]:\n\n def compare(iterable: Iterable, /) -> bool:\n return all(predicate(item) for item in iterable)\n\n return compare", "def filter_func(interface):\n return (\n all(getattr(interface, key) for key in args) and\n all(getattr(\n interface, key) == val for key, val in kwargs.items())\n )", "def split(func, iterable):\n falsy, truthy = [], []\n for e in iterable:\n if func(e):\n truthy.append(e)\n else:\n falsy.append(e)\n return tuple(falsy), tuple(truthy)", "def items():\n for point in boolfunc.iter_points(inputs):\n # pylint: disable=C0103\n ab = self.restrict(point).pcdata[0]\n cd = other.restrict(point).pcdata[0]\n # a | c, b & d\n yield ((ab | cd) & 2) | ((ab & cd) & 1)", "def union(iterable, *iterables):\n return iter(it.chain(iterable, *iterables))", "def items():\n for point in boolfunc.iter_points(inputs):\n # pylint: disable=C0103\n ab = self.restrict(point).pcdata[0]\n cd = other.restrict(point).pcdata[0]\n # a & c, b | d\n yield ((ab & cd) & 2) | ((ab | cd) & 1)", "def partition(pred, iterable):\n stream = list(iterable)\n matched = list(itertools.takewhile(pred, stream))\n unmatched = list(itertools.dropwhile(pred, stream))\n return matched, unmatched", "def all(seq, pred=None):\n for elem in itertoos.ifilterfalse(pred, seq):\n return False\n return True", "def any_yields(functions, value):\n return any(f(value) for f in functions)", "def filter(self, func: Callable[[T], bool]) -> 'List[T]':\n return [v for v in self.array if func(v)]", "def combination2_with_pruning(items: Sequence[U], condition: Callable[[U, U], bool]) -> Iterator[Tuple[U, U]]:\n for i in range(len(items) - 1):\n item1 = items[i]\n if not condition(item1, item1):\n break\n for j in range(i + 1, len(items)):\n item2 = items[j]\n if not condition(item1, item2):\n break\n yield item1, item2", "def Filter(\r\n data: Iterable,\r\n filter_fct: Callable,\r\n return_none: bool = True,\r\n lazy: bool = True,\r\n workers: int = 1,\r\n buffer_len: int = 3,\r\n *arg: List,\r\n **kwargs: Dict\r\n) -> Union[FilterAbstract, DataAbstract, np.ndarray, List]:\r\n if lazy:\r\n return FilterAbstract(data, filter_fct, *arg, return_none=return_none, **kwargs)\r\n else:\r\n # ToDo: replace by a list and np equivalent\r\n tmp = DataAbstract(\r\n FilterAbstract(data, filter_fct, *arg, return_none=True, **kwargs),\r\n output_datatype=\"list\",\r\n workers=workers,\r\n buffer_len=buffer_len,\r\n )[:]\r\n if return_none:\r\n return tmp\r\n else:\r\n return DataAbstract(\r\n SelectAbstract(tmp, lambda x, k: x[k] is not None),\r\n workers=workers,\r\n buffer_len=buffer_len,\r\n )[:]", "def apply_funs(x, funs) :\n res = True\n for f in funs :\n res = f(x)\n if not res :\n break\n return res", "def filterfalse(iterable, predicate):\n for x in iterable:\n if not predicate(x):\n yield x", "def multi_apply(func, *args, **kwargs):\n\n pfunc = partial(func, **kwargs) if kwargs else func\n map_results = map(pfunc, *args)\n return tuple(map(list, zip(*map_results)))", "def simple_filter_2(f, l):\n # alternative implementation: the same as above, but without comprehension.\n filtered_l = []\n for item in l:\n if f(item):\n filtered_l.append(item)\n return filtered_l\n # I think the list comprehension is not only shorter, but also more\n # readable.", "def filter_f(fns, ltaper, lowerp, upperp, utaper, eqband, eqltaper, equtaper, npow, bindir):\n # filtercmd = bindir+\"/filter4\"\n filtercmd = bindir + \"/filter4 1>/dev/null\"\n for src, tar, eqtar in fns:\n p = sp.Popen(filtercmd, shell=True, bufsize=0, stdin=sp.PIPE, stdout=None)\n child = p.stdin\n print >> child, ltaper, lowerp, upperp, utaper, npow, src, tar + '_tmp'\n err = child.close()\n ret = p.wait()\n if err or ret != 0:\n raise RuntimeError, '%r failed with exit code %d' % (filtercmd, err)\n p = sp.Popen(filtercmd, shell=True, bufsize=0, stdin=sp.PIPE, stdout=None)\n child = p.stdin\n print >> child, eqltaper, eqband[0], eqband[1], equtaper, npow, tar + '_tmp', eqtar + '_tmp'\n err = child.close()\n ret = p.wait()\n if err or ret != 0:\n raise RuntimeError, '%r failed with exit code %d' % (filtercmd, err)\n return 1", "def filter(self, inputs: Iterable[Chunk]) -> Iterable[Chunk]:", "def pipe(*functions):\n\n return reduce(compose, functions, identity)", "def apply_filter(atom, isofilters):\n if 'None' in isofilters[0][0]:\n return True\n\n functionfilters = [isofilter for isofilter in isofilters if not isofilter[-1] == 'None']\n functionfilters = ['{}(atom.{}){}={}'.format(f[3], f[0], f[2], f[1]).replace('True', '=').replace('False', '!') for\n f in functionfilters]\n\n if all(getattr(atom, isofilter[0]) == isofilter[1] for isofilter in isofilters if\n isofilter[2] == 'True' and isofilter[-1] == 'None'):\n if all(getattr(atom, isofilter[0]) != isofilter[1] for isofilter in isofilters if\n isofilter[2] == 'False' and isofilter[-1] == 'None'):\n for functionfilter in functionfilters:\n if not eval(functionfilter):\n return False\n return True\n else:\n return False", "def eager_map(func, iterable):\n for _ in map(func, iterable):\n continue", "def _filter(self, probs: Tensor, ids: Tensor) -> Tuple[Tensor, List[int]]:\n raise NotImplementedError", "def fd_projection(attributes: Set[A],\n fds: List[FunctionalDependency]) -> \\\n Iterator[FunctionalDependency]:\n for x in powerset(attributes):\n for b in attributes.intersection(closure(x, fds) - x):\n yield FunctionalDependency(x, {b})", "def ifilter(self, func: Callable[[T], bool]) -> '_[T]':\n return _(filter(func, self.array))", "def process_list(_func, iterator, *args, **kwargs):\n return [_func(i, *args, **kwargs) for i in iterator]", "def all_values(*values):\n print(\"here\")\n values = [_normalize(v) for v in values]\n for v in zip(*values):\n yield all(v)", "def filter(self, op):\n def op_filter(seqs):\n r = [s for s in seqs if op(s)]\n if len(r) == 0:\n return None\n else:\n return r\n return self.element_wise(op_filter)", "def filter_fn(arr):\n return lambda l: ([n for n in arr if n == l])", "def filter(self, func):\n return AnyPyProcessOutputList(filter(func, self))", "def map(function, iterable):\n\n return [function(x) for x in iterable]", "def filter(self, func):\n n = len(self.data['id'])\n new_table = []\n for i in range(n):\n row = dict([(col, self.data[col][i]) for col in self.cols])\n if func(row):\n new_table.append(row)\n for col in self.cols:\n self.data[col] = []\n for row in new_table:\n self.data[col].append(row[col])\n return self", "def unique(seen, *iterables):\n _add = seen.add\n # return a generator of the unique items and the set of the seen items\n # the seen set will mutate when the generator is iterated over\n return (i for i in chain(*iterables) if i not in seen and not _add(i))", "def any(seq, pred=None):\n for elem in itertools.ifilter(pred, seq):\n return True\n return False", "def _operate_recursive(\n function: Callable[..., V], iterables: RecursiveIterable[V], result: RecursiveList[V]\n) -> RecursiveList[V]:\n for items in zip(*iterables): # type: ignore\n if any(isinstance(item, Iterable) for item in items): # pylint: disable=W1116\n sub_result = [] # type: ignore\n _operate_recursive(function, items, sub_result)\n else:\n sub_result = function(*items) # type: ignore\n result.append(sub_result)\n return result", "def partition(iterable, predicate):\n passes = list()\n fails = list()\n for element in iterable:\n if predicate(element):\n passes.append(element)\n else:\n fails.append(element)\n return passes, fails", "def piecewise(x, condlist, funclist, *args, **kw):\n x = asanyarray(x)\n n2 = len(funclist)\n if isscalar(condlist) or \\\n not (isinstance(condlist[0], list) or\n isinstance(condlist[0], ndarray)):\n condlist = [condlist]\n condlist = [asarray(c, dtype=bool) for c in condlist]\n n = len(condlist)\n if n == n2-1: # compute the \"otherwise\" condition.\n totlist = condlist[0]\n for k in range(1, n):\n totlist |= condlist[k]\n condlist.append(~totlist)\n n += 1\n if (n != n2):\n raise ValueError, \"function list and condition list \" \\\n \"must be the same\"\n\n zerod = False\n # This is a hack to work around problems with NumPy's\n # handling of 0-d arrays and boolean indexing with\n # numpy.bool_ scalars\n if x.ndim == 0:\n x = x[None]\n zerod = True\n newcondlist = []\n for k in range(n):\n if condlist[k].ndim == 0:\n condition = condlist[k][None]\n else:\n condition = condlist[k]\n newcondlist.append(condition)\n condlist = newcondlist\n\n y = zeros(x.shape, x.dtype)\n for k in range(n):\n item = funclist[k]\n if not callable(item):\n y[condlist[k]] = item\n else:\n y[condlist[k]] = item(x[condlist[k]], *args, **kw)\n return y", "def find(function, iterable):\n for x in iterable:\n if function(x) == True:\n return x", "def items():\n for point in boolfunc.iter_points(inputs):\n # pylint: disable=C0103\n ab = self.restrict(point).pcdata[0]\n cd = other.restrict(point).pcdata[0]\n # a & d | b & c, a & c | b & d\n a, b, c, d = ab >> 1, ab & 1, cd >> 1, cd & 1\n yield ((a & d | b & c) << 1) | (a & c | b & d)", "def all_of(*conditions):\n def check():\n for c in conditions:\n if not c():\n return False\n return True\n return check", "def filter(self, keys, lst=None, func=\"all\"):\n f = all if func == \"all\" else any\n\n if lst is None:\n lst = self\n if DEP in lst[0] and INDEP in lst[0]:\n filt_dep = True\n else:\n filt_dep = False\n\n def filt_func(d):\n if filt_dep:\n return f([k in d[INDEP] or k in d[DEP] for k in listify(keys)])\n else:\n return f([k in d for k in listify(keys)])\n\n return filter(filt_func, lst)", "def filter(\n self, items: Iterable[Any], spec: Specification\n ) -> Generator[Any, None, None]:", "def partition(functions: Sequence[FilterFN],\n values: chex.ArrayTree,\n strict: bool = False):\n\n vals, struct = jax.tree_util.tree_flatten(values)\n\n def get_name(k, v):\n del v\n return k\n\n keys = jax.tree_util.tree_leaves(map_named(get_name, \"\", values))\n keys = [str(i) for i, v in enumerate(vals)]\n if not strict:\n functions = list(functions) + [lambda k, v: True]\n\n partitions = [[] for _ in functions]\n names = [[] for _ in functions]\n\n for k, v in zip(keys, vals):\n has_got = False\n for fi, f in enumerate(functions):\n if f(k, v):\n partitions[fi].append(v)\n names[fi].append(k)\n has_got = True\n break\n assert has_got, f\"No matching found for: {k}\"\n data_to_restore = (tuple(keys), tuple(names), struct)\n return partitions, PartitionUnflatten(data_to_restore)", "def filter_all(_):\n return True", "def test_apply_filter_multiple(app):\n with app.app_context():\n filters = [{'column': 'id', 'type': 'geq',\n 'value': '1'}, {'column': 'last_seen', 'type': 'leq',\n 'value': 121212121}]\n users = User.query\n for filter_ in filters:\n users = apply_filter(users, User, filter_)\n\n assert str(users.whereclause) == \\\n 'users.id >= :id_1 AND users.last_seen <= :last_seen_1'", "def conj(fs):\n def feature(s, i):\n return all(f(s, i) for f in fs)\n return feature", "def ft_filter(fnct, tab):\n res = []\n for i in tab:\n if fnct:\n if fnct(i):\n res.append(i)\n else:\n if i:\n res.append(i)\n return res", "def chained(func):\n def wrapper(*args, **kwargs):\n for xs in func(*args, **kwargs):\n for x in xs:\n yield x\n return wrapper", "def intersection_signature(*sets):\n return reduce(operator.and_, sets)", "def map(iterable, function):\n for x in iterable:\n yield function(x)", "def negate_all(f):\n return lambda *args, **kwargs: [-y for y in f(*args, **kwargs)]", "def negate_all(f):\n return lambda *args, **kwargs: [-y for y in f(*args, **kwargs)]", "def forall(seq,cond):\n for x in seq:\n if not cond(x): return False\n return True", "def forall(seq,cond):\n for x in seq:\n if not cond(x): return False\n return True", "def filter_keys_c(func):\n return partial(filter_keys, func)", "def filter_reads(f, condition, riter):\n for r in riter:\n # TODO: looks like we don't need 'fpass'\n new_r = tuple(dict(mate, fpass=f(mate) and mate['fpass']) for mate in r)\n if condition(tuple(mate['fpass'] for mate in new_r)):\n yield new_r", "def takewhile(iterable, predicate):\n return iter(it.takewhile(predicate, iterable))", "def flatmap(func, *iterable) -> Iterator:\n return map(func, chain(*chain(*iterable)))", "def merge(*iterables):\n return map(None, _IMerge(iterables))", "def starfilter(\n predicate: Callable[..., bool]\n) -> Callable[[AsyncObservable[Any]], AsyncObservable[Any]]:\n\n def handler(\n next: Callable[[Iterable[Any]], Awaitable[None]], args: Iterable[Any]\n ) -> Awaitable[None]:\n if predicate(*args):\n return next(args)\n return aiotools.empty()\n\n return transform(handler)" ]
[ "0.7254669", "0.70515805", "0.6633785", "0.6542129", "0.65260065", "0.6445253", "0.6422799", "0.6301492", "0.628052", "0.6261402", "0.6222613", "0.62073445", "0.6094451", "0.60292643", "0.60198414", "0.5971072", "0.59694105", "0.5959575", "0.5932348", "0.5888483", "0.5888037", "0.5815205", "0.5801351", "0.57704926", "0.5685822", "0.5672363", "0.5665609", "0.5664323", "0.5649664", "0.5608547", "0.55876184", "0.5584935", "0.55525845", "0.554348", "0.5530763", "0.5505836", "0.55039877", "0.55036116", "0.5470476", "0.54651326", "0.5453986", "0.54305315", "0.54141897", "0.5395935", "0.53902763", "0.5388808", "0.53837585", "0.5356086", "0.53296626", "0.532313", "0.5303931", "0.53009236", "0.52973706", "0.52934873", "0.52895236", "0.52647114", "0.52623165", "0.5253925", "0.5250255", "0.524224", "0.52305835", "0.521046", "0.5210385", "0.5207948", "0.52031195", "0.51928824", "0.51922053", "0.5181672", "0.5166836", "0.5161261", "0.5151476", "0.5148335", "0.5138348", "0.5134584", "0.5132696", "0.51181364", "0.51161677", "0.51090664", "0.5102635", "0.5097169", "0.5093273", "0.50771904", "0.5063245", "0.5056905", "0.5045452", "0.50404274", "0.50356346", "0.5031255", "0.5027508", "0.5020666", "0.5018904", "0.5018904", "0.5017644", "0.5017644", "0.5015085", "0.50130063", "0.5007588", "0.49968696", "0.49866658", "0.49814418" ]
0.781177
0
This will log a user out and redirect them to log in again via the AuthN server.
def logout_redirect(request): logout(request) # Build the URL login_url = furl(login_redirect_url(request, next_url=request.build_absolute_uri())) # Check for branding if hasattr(settings, 'SCIAUTH_BRANDING'): logger.debug('SciAuth branding passed') # Encode it and pass it branding = base64.urlsafe_b64encode(json.dumps(settings.SCIAUTH_BRANDING).encode('utf-8')).decode('utf-8') login_url.query.params.add('branding', branding) # Set the URL and purge cookies response = redirect(login_url.url) response.delete_cookie('DBMI_JWT', domain=dbmi_settings.JWT_COOKIE_DOMAIN) logger.debug('Redirecting to: {}'.format(login_url.url)) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logout():\n logout_user()\n return redirect(url_for('auth.index'))", "def signout():\n session.pop('oauth2_state', None)\n session.pop('oauth2_token', None)\n session.pop('discord_user', None)\n return redirect('/')", "def logout_user(request):\r\n # We do not log here, because we have a handler registered\r\n # to perform logging on successful logouts.\r\n logout(request)\r\n if settings.FEATURES.get('AUTH_USE_CAS'):\r\n target = reverse('cas-logout')\r\n else:\r\n target = '/'\r\n response = redirect(target)\r\n response.delete_cookie(\r\n settings.EDXMKTG_COOKIE_NAME,\r\n path='/', domain=settings.SESSION_COOKIE_DOMAIN,\r\n )\r\n return response", "def signout(self):\n username = cherrypy.session.get('username')\n if username is None:\n pass\n else:\n cherrypy.lib.sessions.expire()\n raise cherrypy.HTTPRedirect('/')", "def logout():\n logout_user()\n return redirect(url_for(\".login\"))", "def logout():\n\n # remove the username from the session if it is there\n out_user = current_user.get_id()\n logout_user()\n logger.info(out_user + ' has been logged out.')\n return redirect(url_for('home'))", "def logOut(self):\n self.client.logout()", "def log_out_user(self):\n flask_login.logout_user()", "def ldap_logout():\n timed_out = request.args.get('timed_out', False)\n logout_user()\n create_auth_event(\n auth_event_type=event_type.USER_FAILED_LOG_IN,\n user_guid=session[\"user_id\"],\n new_value={\n 'success': True,\n 'type': current_app.config['AUTH_TYPE'],\n 'timed_out': timed_out\n }\n )\n session.clear()\n if timed_out:\n flash(\"Your session timed out. Please login again\", category=\"info\")\n return redirect(url_for(\"main.index\"))", "def logout():\n if \"username\" in session.keys():\n del session[\"username\"]\n if not app.config[\"DISABLE_AUTH\"]:\n return redirect(url_for(\"login\") + \"?slo\")\n else:\n return redirect(url_for(\"index\"))", "def logout(self):\n with self.client.post(\"/logout\", catch_response=True) as response:\n for r_hist in response.history:\n if r_hist.status_code > 200 and r_hist.status_code < 400:\n response.success()\n self.user.username = None\n # go to UnauthenticatedTasks\n self.interrupt()", "def logout():\n session.pop('userinfo', None)\n # no more steps necessary, because we don't keep the token around\n if 'target' not in session.keys():\n return redirect(\"/\")\n return redirect(session['target'])", "def logout():\n logout_user()\n flash('You have successfully been logged out.')\n\n # redirect to the login page\n return redirect(url_for('auth.login'))", "def logout():\n logout_user()\n flash('You have successfully been logged out.')\n\n # redirect to the login page\n return redirect(url_for('auth.login'))", "def logout():\n\n logout_user()\n return redirect(url_for('login'))", "def logout():\n logout_user()\n flash('You have successfully been logged out')\n\n # redirect to login page\n return redirect(url_for('auth.login'))", "def logout_user(request):\n if request.user.is_authenticated:\n print(f\"Logging out user {request.user.username}\")\n logout(request)\n else:\n print(\"No authenticated users found\")\n\n return redirect('index')", "def logout():\n\n logout_user()\n flash('You have successfully been logged out.')\n\n return redirect(url_for('auth.login'))", "def logout():\n logout_user()\n return redirect(\"/\")", "def logout():\n\n logout_user()\n return redirect('/')", "def logout():\n logout_user()\n return redirect(url_for('main.index'))", "def logout_user():\n\n session.clear()\n\n return redirect(\"/\")", "def log_out():\n if 'name' in session:\n PLAN.logout_user(session['name'])\n session.pop('name', None)\n return redirect(url_for('log_in'))\n return redirect(url_for('log_in'))", "def logout():\n logout_user()\n return redirect(url_for('default.home'))", "def log_out(request):\n logout(request)\n return redirect('user_login')", "def logout(self):\r\n # should redirect\r\n check_for_get_code(self, 302, reverse('logout'))", "def logout():\n user = g.user\n do_logout(user)\n\n flash(\"You have successfully logged out.\", 'success')\n return redirect(\"/login\")", "def logout():\n session['user_id'] = None\n session['user_email'] = None\n return redirect(url_for('main'))", "def logout():\n logout_user()\n return redirect(url_for('home'))", "def process_logout():\n\n print \" LOGGED OUT USER \"\n\n del session[\"user_id\"]\n\n flash(\"You have Successfully Logged Out!\")\n\n return redirect(\"/\")", "def process_logout():\n\n print \" LOGGED OUT USER \"\n\n del session[\"user_id\"]\n\n flash(\"You have Successfully Logged Out!\")\n\n return redirect(\"/\")", "def logout():\n response.cookies['curr_user_id'] = -1\n response.cookies['curr_user_id']['expires'] = -10\n response.cookies['curr_user_id']['path'] = '/'\n redirect(URL('default', 'index'))", "def logout():\n logout_user()\n flash(\"Successfully signed out\", category='info')\n return redirect(url_for('url.index'))", "def gdisconnect():\n try:\n access_token = login_session['credentials']\n except KeyError:\n flash('Failed to get access token')\n return redirect(url_for('home'))\n print(\"User's name was {}.\".format(login_session['name']))\n if access_token is None:\n print('Access Token is None')\n response = make_response(\n json.dumps('Current user not connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n del login_session['credentials']\n del login_session['user_id']\n del login_session['name']\n del login_session['email']\n print('Successfully logged out.')\n flash('Successfully logged out.')\n return redirect(url_for('home'))", "def logout():\n logout_user()\n return redirect(url_for('index'))", "def logout():\n logout_user()\n return redirect(url_for('index'))", "def signout():\r\n logout_user()\r\n flash(gettext('You are now signed out'), 'success')\r\n return redirect(url_for('home.home'))", "def log_out(request):\n\n if request.user.is_authenticated:\n logout(request)\n\n return redirect(\"/\")", "def logout():\n do_logout()\n return redirect('/login')", "def local_logout(timed_out=False):\n logout_user()\n create_auth_event(\n auth_event_type=event_type.USER_FAILED_LOG_IN,\n user_guid=session[\"user_id\"],\n new_value={\n 'success': True,\n 'type': current_app.config['AUTH_TYPE']\n }\n )\n session.clear()\n if timed_out:\n flash(\"Your session timed out. Please login again\", category=\"info\")\n return redirect(url_for(\"main.index\"))", "def logout():\n\n session.pop(\"leader_logged_in\", False)\n session.pop(\"leader_id\", None)\n session.pop(\"leader_email\", None)\n\n return redirect(f\"{BASEPATH}/login\")", "def logout():\r\n logout_user()\r\n flash('You were logged out.')\r\n return redirect(url_for('index'))", "def logout():\n flash('You were logged out')\n session.pop('user_id', None)\n return redirect(url_for('leaderboard'))", "def logout():\n logout_user()\n flash('Successfully logged out.')\n return redirect(request.referrer)", "def logout():\n timeout = request.args.get(\"timeout\", False)\n forced_logout = request.args.get(\"forced_logout\", False)\n\n if current_app.config[\"USE_LDAP\"]:\n return redirect(\n url_for(\"auth.ldap_logout\", timeout=timeout, forced_logout=forced_logout)\n )\n\n elif current_app.config[\"USE_SAML\"]:\n return redirect(\n url_for(\n \"auth.saml\", slo=\"true\", timeout=timeout, forced_logout=forced_logout\n )\n )\n\n elif current_app.config[\"USE_LOCAL_AUTH\"]:\n logout_user()\n session.clear()\n if timeout:\n flash(\"Your session timed out. Please login again\", category=\"info\")\n return redirect(url_for(\"main.index\"))\n\n return abort(404)", "def signout(self):\r\n return self.app.get('/account/signout', follow_redirects=True)", "def logout_user():\n pass", "def logout_user(request):\n\tlogout(request)\n\treturn HttpResponseRedirect('/')", "def sign_out():\n next_url = request.args.get('next')\n session.pop(\"user\")\n flash(\"Sign Out Successful\", \"success\")\n return redirect(next_url or url_for('index'))", "def logout(self):\n self.change_user(self.username, None, None)", "def logout():\n logout_user()\n flash('You have successfully been logged out.')\n # redirect to the login page\n return redirect(url_for('view.login'))", "def user_logout(request):\r\n logout(request)\r\n return redirect('accounts:login')", "def logout():\n flash(\"You have been logged out\")\n session.pop(\"user\")\n return redirect(url_for(\"login\"))", "def logout():\n session.pop('user', None)\n return redirect(url_for('index'))", "def logout():\n session.pop('user', None)\n return redirect(url_for('index'))", "def sign_out():\n\n session.clear()\n response = make_response(redirect('/'))\n response.delete_cookie(\"logged-in\")\n return response", "def log_out():\n\n del session[\"user_id\"]\n # print session[\"user_id\"]\n flash('You were successfully logged out')\n return render_template('homepage.html')\n\n #Additional reference for log in/log out can be found in project tracker project", "def logout_user():\n\n # Delete session data to log out\n del session[\"user_id\"]\n flash(\"Successfully logged out!\")\n\n return redirect(\"/\")", "def logout(self):\n user = self.get_user()\n if user:\n with atomic(self.conf['auth.dbfile']) as cursor:\n logout_user(cursor, user.username)\n request.user = self.tpls['user'] = None\n response.set_cookie(self.conf['auth.cookie_key'], '',\n secret=self.conf['auth.cookie_secret'], path='/')", "def logout(self):\r\n session.clear()\r\n return redirect(\"/user/login\")", "def signout():\n logout_user()\n flash('Logged Out Successfully')\n return redirect(url_for('home.welcome'))", "def sign_out():\n session.clear()\n return redirect(url_for('index'))", "def auth_logout(request):\n logout(request)\n return HttpResponseRedirect( reverse('startpage') )", "def logout():\n # Log user out if they are authenticated\n if current_user.is_authenticated:\n logout_user()\n # Redirect to index page\n flash(\"Successfully logged out.\", category=\"success\")\n # Redirect back to index\n return redirect(url_for('main.index'))", "def logout_user():\n\n print \"Logging out.\"\n session.clear()\n flash(\"You are now logged out.\")\n\n return redirect('/')", "def logout():\n \n # using the method from the flask module\n logout_user()\n return redirect(url_for('home'))", "def log_out():\n session.pop('logged_in', None)\n flash('You were logged out.')\n\n return redirect(url_for('blog.show_posts'))", "def logout():\n flash(u'Zostałeś wylogowany')\n session.pop('user_id', None)\n return redirect(url_for('index'))", "def logout():\n\tsession.pop(\"username\", None)\n\treturn redirect(url_for(\"default\"))", "def logout():", "def logout():\n session.pop(\"user\")\n return redirect(url_for(\"home\"))", "def logout_view(request):\n if request.user.is_authenticated:\n logout(request)\n callback_url = \"https://login.cern.ch/adfs/ls/?wa=wsignout1.0&ReturnUrl=\"\n callback_url += \"http%3A//\"\n callback_url += request.META[\"HTTP_HOST\"]\n callback_url += reverse(\"certhelper:logout_status\")\n return HttpResponseRedirect(callback_url)\n return HttpResponseRedirect(\"/\")", "def user_logout():\n\n session.pop('logged_in', None)\n flash('You are now logged out')\n\n return redirect('/')", "def logout():\n # remove user from session cookies\n flash(\"You have been logged out\")\n session.pop(\"user\")\n return redirect(url_for(\"login\"))", "def logout():\n session.pop(\"username\")\n\n return redirect(\"/\")", "def sign_out(self):\n self.auth.log_out(self._user)\n self._user = None\n print(\"Signed out successfully\")\n return self.logging_page()", "def get_sign_out():\n log_out_url = get_survey_config().account_service_log_out_url\n\n # Check for GET as we don't want to log out for HEAD requests\n if request.method == \"GET\":\n logout_user()\n\n return redirect(log_out_url)", "def logout():\n return logout_user()", "def logout():\n flash('You were logged out')\n session.pop('user_id', None)\n return redirect(url_for('public_timeline'))", "def logout():\n flash(_('You were logged out'))\n session.pop('user_id', None)\n return redirect(url_for('index'))\n #return redirect(url_for('public_timeline'))", "def logout():\n # Remove session data, this will log the user out\n session.pop('loggedin', None)\n session.pop('userid', None)\n session.pop('username', None)\n # Redirect to login page\n return redirect(url_for('site.login'))", "def logout():\n\n do_logout()\n flash('successfully logged out')\n return redirect(\"/\")", "def logout_redirect():\n login_session.clear()\n flash('You have logged out')\n return redirect(url_for('show_homepage'))", "def logout() -> Response:\n if \"zeus_token\" in session:\n session.pop(\"zeus_token\", None)\n logout_user()\n return redirect(url_for(\"general_bp.home\"))", "def logout():\n login()", "def logout():\n # Remove credentials key and user id from session\n session_helper = SessionHelper(session)\n session_helper.delete_credentials_from_session()\n session_helper.delete_user_from_session()\n return redirect(url_for('homepage.home_page_route'))", "def logout_user():\n session.pop('username')\n return redirect('/login')", "def account_logout(request):\n logout(request)\n return redirect('/')", "def logout(user_id):\n if CURRENT_USER_KEY not in session or session[CURRENT_USER_KEY] != user_id:\n raise Unauthorized()\n do_logout()\n return redirect('/')", "def logoutuser(request):\n logout(request)\n return redirect('login')", "def logout():\n if \"username\" in session:\n session.pop(\"username\", None)\n flash(\"You have been logged out.\")\n return redirect(url_for(\"index\"))", "def logout():\n\n session.pop(\"username\")\n return redirect(\"/login\")", "def logoutUser(request):\n logout(request)\n return redirect('login')", "def logout():\n flash('You were logged out')\n session.pop('username', None)\n return redirect(url_for('welcome_page'))", "def logout():\n session.pop('username', None)\n return redirect('/')", "def logout():\n user = current_user\n user.authenticated = False\n db.session.add(user)\n db.session.commit()\n logout_user()\n return redirect(url_for('index'))", "def logout():\n user = current_user\n user.authenticated = False\n db.session.add(user)\n db.session.commit()\n logout_user()\n return redirect(url_for('index'))", "def logout():\n # clear user data from session and flag as logged out\n for x in ['provider', 'state', 'user']:\n if x in flask.session:\n del flask.session[x]\n flask.session['logged_in'] = False\n\n flash('logout successful', 'info')\n return redirect(request.referrer or url_for('catalog.index'))", "def logout():\r\n form = LoginForm()\r\n user = current_user\r\n user.authenticated = False\r\n db.session.add(user)\r\n db.session.commit()\r\n logout_user()\r\n return redirect(url_for('hello'))", "def log_out(self):\n self.__is_logged_in = False", "def logout():\n session.pop('microsoft_token', None)\n session.pop('state', None)\n return redirect(url_for('index'))" ]
[ "0.74786663", "0.7398715", "0.7309837", "0.7262442", "0.7236717", "0.72231877", "0.72231495", "0.7213908", "0.72030735", "0.7196016", "0.71688986", "0.71640676", "0.7163589", "0.7163589", "0.7159972", "0.71333206", "0.71177167", "0.7101278", "0.709512", "0.7080758", "0.70622957", "0.70619804", "0.70518124", "0.7048902", "0.7047533", "0.70436454", "0.7042506", "0.7041001", "0.70382464", "0.703031", "0.703031", "0.7026971", "0.70203054", "0.7017529", "0.70174754", "0.70174754", "0.7016223", "0.701253", "0.70117897", "0.70097095", "0.70071757", "0.7005287", "0.70049345", "0.6988702", "0.6987223", "0.6974969", "0.69597787", "0.6958145", "0.6952998", "0.6950597", "0.6950349", "0.6944666", "0.69402254", "0.69374025", "0.69374025", "0.69349843", "0.69334227", "0.69254476", "0.6918375", "0.6907361", "0.68931466", "0.6885426", "0.68829924", "0.68812174", "0.68797654", "0.68771106", "0.68763465", "0.68741906", "0.6872497", "0.68723005", "0.68612707", "0.68534946", "0.68507034", "0.6850427", "0.6848891", "0.6842743", "0.68359977", "0.6830859", "0.682779", "0.6825041", "0.6820646", "0.6814114", "0.6813266", "0.68078786", "0.6799622", "0.6793648", "0.67917746", "0.67888343", "0.67846024", "0.6780611", "0.677589", "0.6765902", "0.6764193", "0.6763408", "0.6760628", "0.675786", "0.675786", "0.67439824", "0.6741452", "0.6737437", "0.6734151" ]
0.0
-1
Given the positions of a list of the indices, create a unique key to register the position.
def placementKey( geo): def diagcmp( xyA, xyB): """ Compare two positions based on x + y. If x + y is the same for the two, compare based on x. """ return cmp(xyA[0] + xyA[1], xyB[0] + xyB[1]) or cmp(xyA[0], xyB[0]) sorted = [ tuple(geo[i]) for i in xrange(geo.shape[0]) ] sorted.sort( diagcmp) return hash(tuple(sorted))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_new_key(idx, key, d):\n\n new_key = \"%s_%d\" % (key, idx)\n if new_key in d:\n return make_new_key(idx + 1, key, d)\n return new_key", "def _make_key(self):\n all_position_values = (chromosome_sort_key(self.chromosome), self.min_position, self.max_position, \n self.strand, self.position_before, self.position_after)\n return all_position_values", "def _create_idx(self):\n self._idx = {}\n for idx, (L, M, N) in enumerate(self.modes):\n if L not in self._idx:\n self._idx[L] = {}\n if M not in self._idx[L]:\n self._idx[L][M] = {}\n self._idx[L][M][N] = idx", "def create_indices():\n conn = connect()\n c = conn.cursor()\n\n # To prevent rematch btw players\n c.execute(\n \"\"\"\n CREATE UNIQUE INDEX matches_uniq_idx ON matches\n (greatest(winner, loser), least(winner, loser));\n \"\"\")\n conn.commit()\n conn.close()", "def gen_keys():", "def create_key_index(name):\r\n global _existing_indices\r\n _existing_indices = _existing_indices or execute_query('g.getIndexedKeys(Vertex.class)')\r\n if name not in _existing_indices:\r\n execute_query(\r\n \"g.createKeyIndex(keyname, Vertex.class); g.stopTransaction(SUCCESS)\",\r\n {'keyname':name}, transaction=False)\r\n _existing_indices = None", "def make_key(*values, **kwargs):\n if len(kwargs) == 0:\n key = tuple(v.key for v in values)\n else:\n res = [v.key for v in values]\n for k, v in sorted(kwargs.items()):\n if isinstance(v, (int, float, str)):\n res.append(k)\n res.append(v)\n else:\n raise TypeError(\n f\"Type {type(v)} is not yet supported, \"\n f\"v={v} and parameter {k!r}.\")\n key = tuple(res)\n return key", "def _key_generated(self, key, index):\n self.keys[self.get_address(key)] = key\n self.last_generated_index = index", "def make_key(*args, **kwargs) -> Hashable:\n if len(args) == 1 and isinstance(args[0], (int, str)):\n return args[0]\n if kwargs:\n args = sum(kwargs.items(), (*args, _KWD_MARK))\n return _HashedSeq(args)", "def setUniqueId(self, idsOfElementaryExpressions):\n if self.name in idsOfElementaryExpressions:\n self.uniqueId = idsOfElementaryExpressions[self.name]\n else:\n error_msg = (\n f'No index is available for expression {self.name}.'\n f' List of available indices: '\n f'{[n for n, i in idsOfElementaryExpressions.items() ]}'\n )\n raise excep.biogemeError(error_msg)", "def initiate_new_key (self,key,index):\r\n\r\n #with shelf\r\n if self.using_shelf:\r\n\r\n\r\n self.key_dict[key] = {str(index)}\r\n\r\n #with database\r\n if self.using_database:\r\n\r\n value_tuple = (notebookname, key,)\r\n db_cursor.execute(\"INSERT OR REPLACE \"\r\n +\"INTO all_keys (keyword, notebook)\"\r\n +\" VALUES (?,?);\",\r\n value_tuple)\r\n value_tuple = (notebookname, key, str(index))\r\n db_cursor.execute(\"INSERT OR REPLACE \"\r\n +\"INTO keys_to_indexes\"\r\n +\" (notebook, keyword, note_index)\"\r\n +\" VALUES (?,?,?);\",\r\n value_tuple)", "def create_keys(i):\n sk = elgamal.create_sk()\n secret_keys.append(sk)\n\n keys = [0, 0]\n\n keys[x[i]] = elgamal.gen(sk)\n keys[1 - x[i]] = elgamal.o_gen()\n\n public_keys.append(keys)", "def table_key(self, reindex_dict):\n reindexed_marks = []\n for m in self.component1.marks:\n new_m = reindex_dict.get(m)\n if new_m == None:\n if len(reindex_dict) == 0:\n new_m = 0\n else:\n new_m = max(reindex_dict.values())+1\n reindex_dict[m] = new_m\n reindexed_marks.append(new_m)\n return tuple( [self.component1.genus] + sorted(reindexed_marks) )", "def set_index(self, list):\n for key in list:\n self.find_label_by_id(key).index = True", "def create_key ():", "def create_unique_index(name, data_type):\r\n global _existing_indices\r\n _existing_indices = _existing_indices or execute_query('g.getIndexedKeys(Vertex.class)')\r\n \r\n if name not in _existing_indices:\r\n execute_query(\r\n \"g.makeType().name(name).dataType({}.class).functional().unique().indexed().makePropertyKey(); g.stopTransaction(SUCCESS)\".format(data_type),\r\n {'name':name}, transaction=False)\r\n _existing_indices = None", "def create_index(args, client):\n policy = {}\n client.index_geo2dsphere_create(args.nspace, args.set,\n LOCBIN, LOCNDX, policy)\n client.index_integer_create(args.nspace, args.set,\n HSHBIN, HSHNDX, policy)", "def crearIndices(self):\n l = self.encontrarCaracteres()\n i=0\n for c in l:\n self.indices[c] = i\n i+=1", "def create_index():", "def update_id2idx(self):\n self._id2idx = {}\n for n, cell in enumerate(self._cell_list):\n self._id2idx[cell.id()] = n", "def new_player_id(index):\n new_player_id.index = index\n def new_id(users):\n \"\"\"\n Generates a unique identifier for a\n list of users. If the list has\n only one user, then the id is mantained\n else, a new user id is created for the\n whole list.\n\n Parameters\n ----------\n users: list of int\n List of 1 or more user's identifiers.\n Precondition: all elements of users\n are smaller than index.\n Returns\n -------\n int\n The old identifier if in the list\n there was only one player\n and or the next new consecutive\n identifier if there where more\n than one.\n\n \"\"\"\n\n #nonlocal index\n if len(users) > 1:\n new_index = new_player_id.index\n new_player_id.index += 1\n else:\n new_index = users[0]\n\n return new_index\n\n return new_id", "def __generate_key_from_list_of(self, list_of_keys):\r\n list_of_keys = list(list_of_keys)\r\n list_of_keys.sort()\r\n return \",\".join(list_of_keys)", "def generate_prototype_key(self, *args):\n return str(uuid.uuid5(UUID_XYZ_NAMESPACE, str((self.X, self.Y, self.Z, *args))))", "def setPositionKey(self, time, index, value, id, view) -> None:\n ...", "def __setKeyIDFromFlaglist( self , flaglist ):\n\t\tif 'idx0' in flaglist:\n\t\t\tself.keyID = 'idx0'\n\t\tif 'idx1' in flaglist:\n\t\t\tself.keyID = 'idx1'\n\t\tif 'idx2' in flaglist:\n\t\t\tself.keyID = 'idx2'", "def CreateIndicesForBasisFilter(indices):\n indices = np.array(indices, dtype = \"uint32\")\n for ind, it in enumerate(indices):\n indices[ind] = ind * it\n return indices", "def keyIndex(self, key):\n key ^= bsr(key, 33)\n key *= 0xff51afd7ed558ccdL\n key ^= bsr(key, 33)\n key *= 0xc4ceb9fe1a85ec53L\n key ^= bsr(key, 33)\n return key", "def make_unique_index(config, stash_code):\n for data in get_section_new_indices(config):\n section_base, old_index, new_index = data\n key = config.value.keys()\n isec_item = stash_code['section']+stash_code['item']\n old_index_sections = old_index.split('_')\n old_section = SECTION_FORMAT.format(section_base, old_index_sections[0], old_index_sections[1])\n new_section = SECTION_FORMAT.format(section_base, isec_item, new_index)\n print 'old_section, new', old_section, new_section\n\n old_node = config.unset([old_section])\n old_id_opt_values = []\n for opt, node in old_node.value.items():\n old_id = rose.CONFIG_DELIMITER.join([old_section, opt])\n old_id_opt_values.append((old_id, opt, node.value))\n # update key value\n config.value.update({new_section: old_node})", "def addPositionKey(\n self,\n time: float,\n positionOrDim: Union[CVec3, int],\n view: Optional[Str] = ...,\n ) -> None:\n ...", "def scatter_embedding_vector(values, indices, bucket_num):\n ps_ids = {}\n indices_list = indices.tolist()\n for i, item_id in enumerate(indices_list):\n ps_id = int_to_id(item_id, bucket_num)\n if ps_id not in ps_ids:\n ps_ids[ps_id] = [(i, item_id)]\n else:\n ps_ids[ps_id].append((i, item_id))\n results = {}\n for ps_id, i_item_id in ps_ids.items():\n i = [v[0] for v in i_item_id]\n item_id = [v[1] for v in i_item_id]\n results[ps_id] = (values[i, :], item_id)\n return results", "def test_duplicates_indices(self):\r\n\r\n no_dups = ['1', '2', '3', '4']\r\n\r\n results = duplicates_indices(no_dups)\r\n\r\n expected_results = defaultdict(list)\r\n\r\n self.assertEqual(results, expected_results)\r\n\r\n dups = ['1', '2', '3', '4', '2']\r\n\r\n results = duplicates_indices(dups)\r\n\r\n expected_results = defaultdict(list)\r\n expected_results['2'] = [1, 4]\r\n\r\n self.assertEqual(results, expected_results)", "def add_key(self,key,index):\r\n\r\n #with shelf\r\n\r\n if self.using_shelf:\r\n\r\n if key in self.key_dict:\r\n\r\n self.key_dict[key].add(str(index))\r\n\r\n else:\r\n self.key_dict[key] = {str(index)}\r\n\r\n #with database\r\n if self.using_database:\r\n\r\n value_tuple = (notebookname, key,)\r\n db_cursor.execute(\"INSERT OR REPLACE\"\r\n +\" INTO all_keys (keyword, notebook)\"\r\n +\" VALUES (?,?);\",\r\n value_tuple)\r\n value_tuple = (notebookname, key, str(index))\r\n db_cursor.execute(\"INSERT OR REPLACE\"\r\n +\" INTO keys_to_indexes\"\r\n +\" (notebook, keyword, note_index)\"\r\n +\" VALUES (?,?,?);\",\r\n value_tuple)", "def faiss_index(vectors, ids=None):\n index = faiss.IndexFlatL2(vectors.shape[1])\n if ids:\n index = faiss.IndexIDMap(index)\n index.add_with_ids(vectors, np.array([i for i in ids]))\n else:\n index.add(vectors)\n\n return index", "def test_integer_map_key_index():\n\tlib.backup_and_restore(\n\t\tlambda context: create_indexes(lib.create_integer_map_key_index),\n\t\tNone,\n\t\tlambda context: check_indexes(lib.check_map_key_index, 12345)\n\t)", "def id_index_map(self):\n result = {}\n for index, component_data in iteritems(self):\n result[id(component_data)] = index\n return result", "def _id_for_index(prefix, index):\r\n return \"%s%d\" % (prefix, index + 1)", "def generate_bitmap_to_linear_index_map(bladeTupList, firstIdx):\n bitmap_map = np.zeros(len(bladeTupList), dtype=int)\n for ind, blade in enumerate(bladeTupList):\n bitmap_map[compute_bitmap_representation(blade, firstIdx)] = ind\n return bitmap_map", "def build_index(self, dict_pg_info, list_insert):\n flag_exit = True\n if flag_exit is False:\n self.create_new_index(dict_pg_info)\n self.insert_index(dict_pg_info, list_insert)", "def _make_key(args, kwds, typed,\r\n kwd_mark = (object(),),\r\n fasttypes = {int, str, frozenset, type(None)},\r\n tuple=tuple, type=type, len=len):\r\n # All of code below relies on kwds preserving the order input by the user.\r\n # Formerly, we sorted() the kwds before looping. The new way is *much*\r\n # faster; however, it means that f(x=1, y=2) will now be treated as a\r\n # distinct call from f(y=2, x=1) which will be cached separately.\r\n key = args\r\n if kwds:\r\n key += kwd_mark\r\n for item in kwds.items():\r\n key += item\r\n if typed:\r\n key += tuple(type(v) for v in args)\r\n if kwds:\r\n key += tuple(type(v) for v in kwds.values())\r\n elif len(key) == 1 and type(key[0]) in fasttypes:\r\n return key[0]\r\n return _HashedSeq(key)", "def makewordindex(wordset):\n indexmap = {}\n sortwords = sorted(list(wordset))\n for i in range(len(sortwords)):\n word = sortwords[i]\n indexmap[word] = i\n return indexmap", "def makeIndexMap(self):\n\t\tn = self.numRects\n\t\thalfList = [[(j,n-1-i+j) for j in range(i+1)] for i in range(n)]\n\t\tfullList = halfList + [[(j[1],j[0]) for j in i] for i in halfList[n-2::-1]]\n\t\treturn fullList", "def generate_new_key(self, index):\n new_key = self.chain_key.subkey(index)\n self._key_generated(new_key, index)", "def make_consistent(self):\r\n\r\n for key in self.get_keys():\r\n self.eliminate_key(key)\r\n\r\n for i_temp in self.indexes(): #i will be a note index\r\n for j_temp in self.get_keys_from_note(i_temp):\r\n if self.key_dict_contains(j_temp):\r\n self.add_key(j_temp,Index(i_temp))\r\n## self.key_dict[j_temp].add(str(Index(i_temp)))\r\n else:\r\n self.initiate_new_key(j_temp,Index(i_temp))", "def format_as_index(indices):\r\n\r\n if not indices:\r\n return \"\"\r\n return \"[%s]\" % \"][\".join(repr(index) for index in indices)", "def obs_key_func(state, **hx_kwargs):\n n = hx_kwargs[\"n\"]\n index = hx_kwargs[\"index\"]\n \n key = np.zeros(n)\n key[index] +=2\n \n return key", "def add_self_loops_to_indexlist(indices):\r\n max_ind = np.max(indices)\r\n self_loops = np.arange(max_ind+1,dtype=np.int)\r\n self_loops = np.concatenate([np.expand_dims(self_loops,axis=-1),np.expand_dims(self_loops,axis=-1)],axis=-1)\r\n added_loops = np.concatenate([indices,self_loops],axis=0)\r\n clean_index = np.unique(added_loops,axis=0)\r\n index_order = np.argsort(clean_index[:,0])\r\n out_indices = clean_index[index_order]\r\n return out_indices", "def _create_two_group_jackknife_indexes(x0, x1, is_paired):\n\n if is_paired and len(x0) == len(x1):\n out = list(zip([j for j in create_jackknife_indexes(x0)],\n [i for i in create_jackknife_indexes(x1)]\n )\n )\n else:\n jackknife_c = list(zip([j for j in create_jackknife_indexes(x0)],\n [i for i in create_repeated_indexes(x1)]\n )\n )\n\n jackknife_t = list(zip([i for i in create_repeated_indexes(x0)],\n [j for j in create_jackknife_indexes(x1)]\n )\n )\n out = jackknife_c + jackknife_t\n del jackknife_c\n del jackknife_t\n\n return out", "def key(self, patterns=None, indices=None):\n\n key = []\n\n # if the user doesn't provide indices, get indices from the pattern\n if not indices and patterns:\n indices = get_column_indices(patterns, self.columns)\n\n if indices:\n # if we have indices, use them to build the key\n for i in indices:\n if i < len(self.values):\n key.append(hxl.datatypes.normalise(self.values[i], self.columns[i]))\n else:\n # if there are still no indices, use the whole row for the key\n for i, value in enumerate(self.values):\n key.append(hxl.datatypes.normalise(value, self.columns[i]))\n\n return tuple(key) # make it into a tuple so that it's hashable", "def CreateIndex(self, arg0: 'unsigned long long') -> \"void\":\n return _itkQuadEdgeCellTraitsInfoPython.itkMapContainerULLQEMPF2GQEULLULLBBT_CreateIndex(self, arg0)", "def makeKey(text):\n key, n = {}, 0\n for i in text:\n key[i] = str(n)\n n += 1\n return key", "def __compound_key(key):\n x_int = int(key[0])\n y_int = int(key[1])\n zeros = len(str(y_int))\n key = x_int * (10 ** zeros) + y_int\n\n return key", "def _index_key(self, sig, codegen):\n return (sig, codegen.magic_tuple())", "def create_index(log_df, column):\n temp_list = log_df[[column]].values.tolist()\n subsec_set = {(x[0]) for x in temp_list}\n subsec_set = sorted(list(subsec_set))\n alias = dict()\n for i, _ in enumerate(subsec_set):\n alias[subsec_set[i]] = i + 1\n return alias", "def make_cache_keys(self, identifiers):\n\n raise NotImplementedError", "def CreateIndex(self, arg0: 'unsigned long long') -> \"void\":\n return _itkQuadEdgeCellTraitsInfoPython.itkMapContainerULLQEMPF3GQEULLULLBBT_CreateIndex(self, arg0)", "def generate_unique_key(*args, **kwargs):\n hashed_args = ['%s' % hash(arg) for arg in args]\n hashed_kwargs = ['%s ' % hash((key, value)) for (key, value) in kwargs.items()]\n # this is md5 hashed again to avoid the key growing too large for memcached\n return hashlib.md5(':'.join(hashed_args + hashed_kwargs)).hexdigest()", "def _create_indices(cls):\r\n from thunderdome.connection import _hosts, _index_all_fields, create_key_index\r\n \r\n if not _hosts: return\r\n for column in cls._columns.values():\r\n if column.index or _index_all_fields:\r\n create_key_index(column.db_field_name)", "def hoggar_indices():\n return list(product([0,1], repeat=6))", "def map_ord_to_index(origin_char_list, save_path):\n ord_2_index_dict = {str(i) + '_index': str(ord(c)) for i, c in\n enumerate(CharDictBuilder._read_chars(origin_char_list))}\n index_2_ord_dict = {str(ord(c)) + '_ord': str(i) for i, c in\n enumerate(CharDictBuilder._read_chars(origin_char_list))}\n total_ord_map_index_dict = dict(ord_2_index_dict)\n total_ord_map_index_dict.update(index_2_ord_dict)\n CharDictBuilder._write_json(save_path, total_ord_map_index_dict)", "def map_geo_hashed_value(l):\n \n l = sorted(l)\n return {k: index for index, k in enumerate(l)}", "def generate_inv_index(people):\n pass", "def createkey(*args): # {{{2\n return '-'.join(map(simplifyname, args))", "def create_label_map(label_lists, trailing_piece_tag=\"X\"):\n\n label_set = set()\n for labels in label_lists:\n label_set.update(labels)\n\n label_map = {label: i for i, label in enumerate(label_set)}\n\n if trailing_piece_tag not in label_set:\n label_map[trailing_piece_tag] = len(label_set)\n return label_map", "def create_index(shapes_with_props):\n index = rtree.index.Index()\n for id, shape_with_props in enumerate(shapes_with_props):\n index.insert(id, shape_with_props.shape.bounds)\n return index", "def createindexes():\n index = [{}, {}, {}, {}]\n readcorpus(index)\n buildindex4(index[2], index[3])\n writeindextofile(index)\n return index", "def __init__(self, positions, actives):\r\n self.positions = positions\r\n self.actives = actives\r\n self.hash = hash(self.actives.tobytes()) + 31 * hash(self.positions.tobytes())", "def map_position(pos):\n\n posiction_dict = dict(zip(range(1, 17), [i for i in range(30, 62) if i % 2]))\n return posiction_dict[pos]", "def _index_document(index_list):\n if isinstance(index_list, abc.Mapping):\n raise TypeError(\"passing a dict to sort/create_index/hint is not \"\n \"allowed - use a list of tuples instead. did you \"\n \"mean %r?\" % list(index_list.items()))\n elif not isinstance(index_list, (list, tuple)):\n raise TypeError(\"must use a list of (key, direction) pairs, \"\n \"not: \" + repr(index_list))\n if not len(index_list):\n raise ValueError(\"key_or_list must not be the empty list\")\n\n index = SON()\n for (key, value) in index_list:\n if not isinstance(key, str):\n raise TypeError(\"first item in each key pair must be a string\")\n if not isinstance(value, (str, int, abc.Mapping)):\n raise TypeError(\"second item in each key pair must be 1, -1, \"\n \"'2d', 'geoHaystack', or another valid MongoDB \"\n \"index specifier.\")\n index[key] = value\n return index", "def _indices_to_coords(c,r):\n\n column = _index_to_column(c)\n row = r + 1\n\n return {'c': column, 'r': row, 'coord': f'{column}{row}'}", "def make_DBL_index(angle_tube_pos, BDs, VTP2BD_func):\n # must have equal length arrays\n msg = 'Number of angle tube positions != number of BD values'\n assert len(angle_tube_pos) == 2, 'Formatting error: angled tube positions'\n assert len(angle_tube_pos[0]) == len(BDs), msg\n assert len(angle_tube_pos[1]) == len(BDs), msg\n\n # converting angle_tube_pos values to BD values\n low_pos_BD = VTP2BD_func(angle_tube_pos[0])\n high_pos_BD = VTP2BD_func(angle_tube_pos[1])\n \n # making a dict of BD : np.random.uniform(low_pos_BD, high_pos_BD)\n DBL_index = {}\n for i in xrange(len(low_pos_BD)):\n BD = round(BDs[i], 3)\n if np.isnan(low_pos_BD[i]) or np.isnan(high_pos_BD[i]):\n pass\n else:\n # note: lower tube position = higher BD \n DBL_index[BD] = partial(np.random.uniform,\n high = low_pos_BD[i], \n low = high_pos_BD[i], \n size=1)\n return DBL_index", "def add_position_map(lst, number_from=0):\r\n return map(lambda (val, ind): val + ind + number_from, enumerate(lst))", "def new_id(users):\n\n #nonlocal index\n if len(users) > 1:\n new_index = new_player_id.index\n new_player_id.index += 1\n else:\n new_index = users[0]\n\n return new_index", "def create_index(cluster_times, cluster_geometries):\n\n lookup = {} # create dict for key-value lookup\n for ct, cg in zip(cluster_times, cluster_geometries):\n if ct in lookup: # Check if STR-tree is drawn for t\n lookup[ct] = STRtree(lookup[ct]._geoms+[cg]) # Redraw STR_tree if record exists\n else:\n lookup[ct] = STRtree([cg]) # Create STR-tree from geometry list\n\n return lookup", "def __createkey__(self):\n return str(self.currentCol) + \",\" + str(self.currentRow)", "def _BuildEventTagIndex(self):\n self._event_tag_index = {}\n for event_tag in self.GetEventTags():\n event_identifier = event_tag.GetEventIdentifier()\n lookup_key = event_identifier.CopyToString()\n self._event_tag_index[lookup_key] = event_tag.GetIdentifier()", "def _create_subscript_mapping():\n # Create the normal and subscript digits list.\n normal_digits = [i for i in range(10)]\n subscript_digits = [chr(0x2080 + i) for i in range(10)]\n\n # Convert the normal digits to strings.\n normal_digits = [str(i) for i in normal_digits]\n\n # Create a dict mapping the two.\n return DefaultDictionary(zip(normal_digits, subscript_digits))", "def points_to_index(points, points_dict):\r\n index_locations = ''\r\n for point in points:\r\n index_locations += str(points_dict[point]) + ' '\r\n return index_locations", "def generate_prototype_key(self):\n return str(uuid.uuid5(UUID_XYZ_NAMESPACE, str((self.X, self.Y, self.Z))))", "def _add_pk(self, conn, *, tblname, pk_columns):\n idx_metadatum = partition_utils.IndexMetadatum(idx_cols=pk_columns, is_unique=True)\n partition_utils.add_indices(conn, tbl_name=tblname, idx_metadata=[idx_metadatum])", "def __hash__(self):\n index_list = [allele.index for allele in self.genes]\n return hash(tuple(index_list))", "def create_location_index():\n get_rsvps_database().groups.create_index([(\"location\", GEOSPHERE)])", "def create_position_ids_from_input_ids(self, x):\r\n mask = x.ne(self.padding_idx).long()\r\n incremental_indicies = torch.cumsum(mask, dim=1) * mask\r\n return incremental_indicies + self.padding_idx", "def setUniqueId(self, idsOfElementaryExpressions):\n if self.elementaryName in idsOfElementaryExpressions:\n self.elementaryIndex = idsOfElementaryExpressions[\n self.elementaryName\n ]\n else:\n error_msg = (\n f'No index is available for elementary '\n f'expression {self.elementaryName}.'\n )\n raise excep.biogemeError(error_msg)\n self.child.setUniqueId(idsOfElementaryExpressions)", "def from_list(hash, list):\r\n for k, v in enumerate(list):\r\n put(hash, k, v)", "def word_indexer(word_lst):\n unique_words = list(set(word_lst))\n word_index = {}\n for i in range(len(unique_words)):\n word_index[unique_words[i].lower()] = i + 4\n word_index['<PAD>'] = 0\n word_index['<START>'] = 1\n word_index['<UNK>'] = 2\n word_index['<UNUSED>'] = 3\n return word_index", "def getGlobalIndices( self, indices: list):\n result = indices.copy()\n for i,toAdd in enumerate(self._layout.starts):\n result[self._layout.dims_order[i]]=indices[i]+toAdd\n return result", "def vector_indx_to_map_matrix_indx(index,senzory_map):\n xs = dict(zip(np.unique(senzory_map[:,0]), it.count()))\n ys = dict(zip(np.negative(np.unique(senzory_map[:,1])), it.count()))\n x, y = senzory_map[index]\n return ys[y],xs[x]", "def unique_coordinate_map(\n coordinates: torch.Tensor,\n tensor_stride: Union[int, Sequence, np.ndarray] = 1,\n) -> Tuple[torch.IntTensor, torch.IntTensor]:\n assert coordinates.ndim == 2, \"Coordinates must be a matrix\"\n assert isinstance(coordinates, torch.Tensor)\n if not coordinates.is_cuda:\n manager = MEB.CoordinateMapManagerCPU()\n else:\n manager = MEB.CoordinateMapManagerGPU_c10()\n tensor_stride = convert_to_int_list(tensor_stride, coordinates.shape[-1] - 1)\n key, (unique_map, inverse_map) = manager.insert_and_map(\n coordinates, tensor_stride, \"\"\n )\n return unique_map, inverse_map", "def indices_from_subtensor(idx_list, indices):\n return tuple(\n tuple(convert_indices(list(indices), idx) for idx in idx_list) if idx_list else indices\n )", "def export_character_indices(self, indices):\n clone = self.__class__(self)\n # clone.clone_from(self)\n for vec in clone.taxon_seq_map.values():\n for cell_idx in range(len(vec)-1, -1, -1):\n if cell_idx not in indices:\n del(vec[cell_idx])\n return clone", "def update_vectors_by_position(data, val, positions):\n positions = positions.astype(np.int32)\n # batch_idx.shape = (batch_size, 1) as [[0], [1], [2], ...]\n batch_idx = np.expand_dims(npx.arange_like(positions, axis=0),\n axis=1).astype(np.int32)\n batch_idx = batch_idx + np.zeros_like(positions)\n indices = np.stack([batch_idx.reshape((-1,)), positions.reshape((-1,))])\n\n out = npx.index_update(data, indices, npx.reshape(val, (-5, -4)))\n return out", "def index_object(idxs=None):", "def encode_pos(i, j):\n return 3 * i + j", "def _make_unique(name, idx):\n p = re.compile(\".[aA-zZ]+_x[0-9]+\")\n if p.match(name):\n tags = name[1:].split(\"_x\")\n return \">%s_%s_x%s\" % (tags[0], idx, tags[1])\n return name.replace(\"@\", \">\")", "def _make_index_list(self, used_sample_id_list, num_id_repeats=1):\n if used_sample_id_list is None:\n self.index_list = [i for i in range(self.data.shape[0])]\n\n else:\n self.index_list = [i for i in range(self.data.shape[0])\n if self.data[i][DATA_ID_INDEX] in used_sample_id_list\n ]\n\n if len(self.index_list) != len(used_sample_id_list):\n warnings.warn(\"Not all images found. \\\n Found: {}, requested: {}\".format(len(self.index_list),\n len(used_sample_id_list))\n )\n\n # for small datasets,\n # the ids can be repeated to get a reasonable batch size working\n self.index_list = self.index_list*num_id_repeats", "def create_key_index_object(key_index_name: str, iterables: Mapping[str, Any]) -> Any:\n # Validation\n # We are going to use the iterators when determining the fields, so we need to notify if an iterator was\n # passed, as this will cause a problem later. Instead of passing an iterator, a iterable should be passed,\n # which can recreate the iter.\n # See: https://effectivepython.com/2015/01/03/be-defensive-when-iterating-over-arguments/\n for name, iterable in iterables.items():\n if iter(iterable) == iter(iterable):\n raise TypeError(\n f\"Iterable {name} is in iterator which can be exhausted. Please pass the iterable\"\n f\" in a container that can recreate the iterable. See the comments here for more info.\"\n )\n\n # We need the types of the fields to create the dataclass. However, we are provided with iterables\n # in the values of the iterables dict. Thus, we need to look at one value of each iterable, and use\n # that to determine the type of that particular iterable. This is safe to do because the iterables\n # must always have at least one entry (or else they wouldn't be one of the iterables).\n # NOTE: The order here matters when we create the ``KeyIndex`` later, so we cannot just take all\n # objects from the iterables and blindly use set because set won't preserve the order.\n fields = [(name, type(next(iter(iterable)))) for name, iterable in iterables.items()]\n KeyIndex = dataclasses.make_dataclass(key_index_name, fields, frozen=True)\n # Allow for iteration over the key index values\n KeyIndex.__iter__ = _key_index_iter # type: ignore\n\n return KeyIndex", "def _index_key(self, sig, codegen):\n codebytes = self._py_func.__code__.co_code\n if self._py_func.__closure__ is not None:\n cvars = tuple([x.cell_contents for x in self._py_func.__closure__])\n # Note: cloudpickle serializes a function differently depending\n # on how the process is launched; e.g. multiprocessing.Process\n cvarbytes = dumps(cvars)\n else:\n cvarbytes = b''\n\n hasher = lambda x: hashlib.sha256(x).hexdigest()\n return (sig, codegen.magic_tuple(), (hasher(codebytes),\n hasher(cvarbytes),))", "def __scatterRepeated ( self, posList ):\n\n #-- 1 --\n # [ numNonReps := len(self.posSpecs) - 1\n # numReps := len(posList) - (len(self.posSpecs) - 1) ]\n # NB: numNonReps is the total number of non-repeating required\n # arguments, and numReps is the number of positionals from posList\n # that correspond to the repeated argument.\n numNonReps = len(self.posSpecs) - 1\n numReps = len(posList) - numNonReps\n\n #-- 2 --\n # [ if numReps < 0 ->\n # sys.stderr +:= (usage message) + (error message)\n # stop execution\n # else -> I ]\n if numReps < 0:\n usage ( self.switchSpecs, self.posSpecs,\n \"Only %d positional arguments were supplied, \"\n \"need at least %d.\" %\n ( len(posList), len(self.posSpecs) - 1 ) )\n\n #-- 3 --\n # [ self.posMap +:= entries mapping keys of\n # self.posSpecs[0:self.__repx] |-> poslist[0:self.__repx] ]\n for posx in range ( self.__repx ):\n self.posMap[self.posSpecs[posx].key] = posList[posx]\n\n #-- 4 --\n # [ self.posMap +:= an entry mapping the key of\n # self.posSpecs[self.__repx].key |-> the list\n # posList[self.__repx:self__repx+numReps] ]\n self.posMap[self.posSpecs[self.__repx].key] = (\n posList[self.__repx:self.__repx+numReps] )\n\n #-- 5 --\n # [ self.posMap +:= entries mapping keys of\n # self.posSpecs[self.__repx+1:] |->\n # posList[self.__repx+numReps:] ]\n for spex in range ( self.__repx+1, len(self.posSpecs)):\n sourcex = spex - 1 + numReps\n self.posMap[self.posSpecs[spex].key] = posList[sourcex]", "def __init__(self, key):\n self.key = [int_mapping(k) for k in key]", "async def build_secret_index(self):\n pass" ]
[ "0.6364943", "0.59690577", "0.58550936", "0.58094066", "0.5801118", "0.5798134", "0.5770239", "0.57411945", "0.57236964", "0.5721956", "0.56699497", "0.56316316", "0.5616401", "0.5541196", "0.55281365", "0.5510795", "0.5509949", "0.54963547", "0.5487912", "0.5483186", "0.54695016", "0.54673046", "0.54664016", "0.54646", "0.5451394", "0.54313177", "0.5409055", "0.5400094", "0.53891605", "0.5377564", "0.5375951", "0.5373333", "0.53706557", "0.5360952", "0.53558743", "0.5352769", "0.5346403", "0.5344646", "0.53445137", "0.53354806", "0.52998906", "0.52956444", "0.52916014", "0.5284692", "0.52783716", "0.52728117", "0.5266184", "0.5255147", "0.5254929", "0.52391416", "0.52294207", "0.5228083", "0.52138203", "0.5210031", "0.52090615", "0.52008414", "0.52000403", "0.5199269", "0.5194044", "0.51875746", "0.51856565", "0.5179044", "0.5178672", "0.5165031", "0.5163238", "0.5153162", "0.51524544", "0.5147619", "0.51469517", "0.5142718", "0.5138888", "0.5127746", "0.5114802", "0.5113369", "0.5111165", "0.5110894", "0.511055", "0.5094354", "0.5092128", "0.50916964", "0.50879806", "0.507794", "0.5077165", "0.5073647", "0.5072265", "0.5040428", "0.50317156", "0.5029345", "0.50258076", "0.5016489", "0.50135684", "0.5010062", "0.5007419", "0.500642", "0.50043786", "0.4996942", "0.49967125", "0.4995857", "0.49860457", "0.49827707" ]
0.53552693
35
Updates the environment according to action and returns a `TimeStep`. See `step(self, action)` docstring for more details.
def _step(self, action: types.NestedArray) -> ts.TimeStep:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def step(self, action: types.NestedArray) -> ts.TimeStep:\n if self._current_time_step is None or self.should_reset(\n self._current_time_step\n ):\n return self.reset()\n\n self._current_time_step = self._step(action)\n return self._current_time_step", "def step(\n self,\n action: Union[Dict[str, Any], np.ndarray],\n env_id: Optional[np.ndarray] = None,\n ) -> Union[TimeStep, Tuple]:", "def step(self, action):\n self.action = action\n return self.env.step(action)", "def step(self, action):\n return self._env.step(action)", "def step(self, action):\n if self._reset_next_step:\n return self.reset()\n\n # Apply the game_rules\n for rule in self.game_rules:\n rule.step(self._state, self._meta_state)\n\n # Apply the action\n self.action_space.step(self._state, action)\n\n # Step the physics\n self.physics.step(self._state)\n\n # Compute reward\n self.step_count += 1\n reward, should_reset = self.task.reward(\n self._state, self._meta_state, self.step_count)\n\n # Take observation\n observation = self.observation()\n\n # Return transition\n if should_reset:\n self._reset_next_step = True\n return dm_env.termination(reward=reward, observation=observation)\n else:\n return dm_env.transition(reward=reward, observation=observation)", "def step(self, action):\n self.t += 1\n state, reward, done, info = self.env.step(action)\n if self.use_action_masking:\n self.update_avail_actions(state)\n obs = {\n \"action_mask\": self.action_mask,\n \"actual_obs\": state,\n }\n else:\n obs = state\n \n if self.t >= 200: # ------ change if using v1\n done = True\n return obs, reward, done, info", "def step(self, action):\n return self.env.step(action)", "def step_env(self, action):\n return self.env.step(action)", "def apply_action(self, action):\n return self.__environment.step(action)", "def step(self, action):\n # Implement your step method here\n # return (observation, reward, done, info)\n self._state = self._state + action\n # print('Step state:', self._state)\n x, y = self._state\n reward = - (x ** 2 + y ** 2) ** 0.5\n done = abs(x) < 0.01 and abs(y) < 0.01\n next_observation = np.copy(self._state)\n return Step(observation=next_observation, reward=reward, done=done)", "def step(self, action):\n self.t += 1\n if self.use_run_time_assurance:\n probe_state, unsafe = self.probe_step(action)\n # switch to safe controller if unsafe\n if unsafe:\n x, x_dot, theta, theta_dot = probe_state\n # go right\n if x <= -self.x_threshold: # go right\n action = 1\n elif x>= self.x_threshold: # go left\n action = 0 \n \n state, reward, done, info = self.env.step(action)\n # Could make a custom reward here if you want\n if self.use_action_masking:\n self.update_avail_actions(state)\n obs = {\n \"action_mask\": self.action_mask,\n \"actual_obs\": state,\n }\n else:\n obs = state\n \n if self.t >= 200: # ------ change if using v1\n done = True\n return obs, reward, done, info", "def _step(self, action):\n if self._episode_ended:\n # The last action ended the episode. Ignore the current action and start a new episode\n return self.reset()\n\n env_action = self.y_train[self.id[self.episode_step]]\n self.episode_step += 1\n\n if action == env_action: # Correct action\n if env_action: # Minority\n reward = 1 # True Positive\n else: # Majority\n reward = self.imb_rate # True Negative\n\n else: # Incorrect action\n if env_action: # Minority\n reward = -1 # False Negative\n self._episode_ended = True # Stop episode when minority class is misclassified\n else: # Majority\n reward = -self.imb_rate # False Positive\n\n if self.episode_step == self.X_len - 1: # If last step in data\n self._episode_ended = True\n\n self._state = self.X_train[self.id[self.episode_step]] # Update state with new datapoint\n\n if self._episode_ended:\n return ts.termination(self._state, reward)\n else:\n return ts.transition(self._state, reward)", "def step(self, action):\n if self._reset_next_step:\n return self.reset()\n\n self._step_count += 1\n \n reward = self._action_space.step(\n action, self._sprites, keep_in_frame=self._keep_in_frame)\n\n # Update sprite positions from their velocities\n for sprite in self._sprites:\n sprite.update_position(keep_in_frame=self._keep_in_frame)\n\n reward += self._task.reward(self._sprites)\n observation = self.observation()\n\n if self.should_terminate():\n self._reset_next_step = True\n return dm_env.termination(reward=reward, observation=observation)\n else:\n return dm_env.transition(reward=reward, observation=observation)", "def step(self, action: list) -> None:\n self._input = np.array(\n [self._thrust_surge(action[0]), self._moment_steer(action[1])]\n )\n w, q = odesolver45(\n self._state_dot, self._state, self.config.simulation.t_step_size\n )\n\n self._state = q\n self._state[2] = geom.princip(self._state[2])\n\n self._prev_states = np.vstack([self._prev_states, self._state])\n self._prev_inputs = np.vstack([self._prev_inputs, self._input])\n\n self._step_counter += 1", "def step(self, action):\n self.move_step(action) # Move.\n r, d = self.check_goal() # Check the reward and done state, and create\n # new environment.\n s_new= self.render_env() # Render the new environment.\n return s_new, r, d", "def step(self, action):\n observation, reward, done, _ = self.env.step(action)\n return np.array(observation), reward, done", "def step(self, action):\n assert self.action_space.contains(action), \"%r (%s) invalid\"%(action, type(action))\n self.microgridPolicy.improveAction(action);\n\n self.microgrid.update();\n\n self.updateState();\n done = self.microgridPolicy.verifyStopConditions();\n reward = self.microgridPolicy.computeReward(done)\n if done: \n if self.steps_beyond_done is None:\n self.steps_beyond_done = 0\n else:\n logger.warn(\"You are calling 'step()' even though this environment has already returned done = True. You should always call 'reset()' once you receive 'done = True' -- any further steps are undefined behavior.\")\n self.steps_beyond_done += 1\n self.clock.increaseTimeStep();\n return self.state, reward, done, {}", "def step(self, action: np.ndarray) -> 'EnvStep':\n ...", "def step(self, action):\n # THIS WILL BE CALLED FROM ALL STEP DRIVERS\n self._world = self._action_wrapper.action_to_behavior(world=self._world,\n action=action)\n # 1. move the agent we set the action for\n controlled_agent_id = self._scenario._eval_agent_ids[self._action_wrapper._input_count-1]\n self._world.stepAgent(self._step_time, controlled_agent_id)\n\n # length of agents\n if self._action_wrapper._input_count >= len(self._scenario._eval_agent_ids):\n # CANNOT STEP WORLD IF NOT ALL ACTIONS ARE SET\n self._action_wrapper._input_count = 0\n \n # 2. move all other agent\n self._world.step(self._step_time)\n if self._render:\n self.render()\n\n # TODO needs to know the agents id\n return self.snapshot(\n world=self._world,\n controlled_agents=controlled_agent_id,\n action=action)", "def step(self, action: np.ndarray) -> Tuple[np.ndarray, np.float64, bool, dict]:\n next_state, reward, done, info = self.env.step(action)\n return next_state, reward, done, info", "def step(self, action):\n (self.state, self.reward, self.terminal, self.truncated,\n self.info) = self.env.step(action)\n\n return self.state, self.reward, self.terminal, self.truncated, self.info", "def takeAction(self, action):\n return self.env.step(action)", "def step(self, action: Union[np.ndarray, torch.Tensor]):\n if type(action) == torch.Tensor:\n action = action.squeeze().numpy()\n\n if not type(action) is np.ndarray:\n raise Exception(\"The action must be a Numpy array but is of type %s (value = %s)\" % (type(action), action))\n\n if self.increment_actions and not self.action_space.contains(action):\n action = action.clip(self.action_space.low, self.action_space.high)\n\n # Additionally, we must make sure the value will stay in the range\n # min <= x + action <= max\n if self.increment_actions:\n current_values = self.x[np.array([0, 1, 3, 5])]\n new_flow_values = current_values + action\n else:\n new_flow_values = action\n\n new_flow_values = np.clip(new_flow_values, self.flows_lower_bounds, self.flows_upper_bounds)\n self.update_all_flows(new_flow_values)\n\n if any([x < 0 for x in self.x]):\n pass\n # TODO: should I clip the actions to ensure the flows are always positive?\n # raise Exception(f\"Negative flows! x = {[round(x, 4) for x in self.x]}\")\n\n self.update_fitness()\n\n self.step_number += 1\n\n # reward = self.fitness - self.previous_fitness\n reward = self.fitness\n observation = self.get_observation()\n\n done = (self.step_number == self.total_number_of_episodes)\n info = {}\n return observation, reward, done, info", "def step(self, action):\n if self.space is None or self.spacecraft is None:\n raise NotImplementedError(\"The spacecraft must be initialized in the environment implementation\")\n\n self._simulate(action)\n\n obs = self.observation.observe()\n reward = self._reward(action)\n terminal = self._is_terminal()\n\n info = {\n \"velocity\": self.spacecraft.velocity,\n \"crashed\": self.spacecraft.crashed,\n \"action\": action,\n }\n try:\n info[\"cost\"] = self._cost(action)\n except NotImplementedError:\n pass\n\n return obs, reward, terminal, info", "def step(self, action):\n self._robot.send_command(action)\n\n obs = self.get_observation()\n\n reward = self.reward(obs.achieved_goal, self.goal)\n done = self.done(obs.achieved_goal, self.goal)\n next_observation = obs.observation\n return Step(observation=next_observation, reward=reward, done=done)", "def step(self, action):\n if self.platform is None:\n raise RuntimeError(\"Call `reset()` before starting to step.\")\n\n if not self.action_space.contains(action):\n raise ValueError(\n \"Given action is not contained in the action space.\")\n\n num_steps = self.frameskip\n\n # ensure episode length is not exceeded due to frameskip\n step_count_after = self.step_count + num_steps\n if step_count_after > self.episode_length:\n excess = step_count_after - self.episode_length\n num_steps = max(1, num_steps - excess)\n\n reward = 0.0\n for _ in range(num_steps):\n self.step_count += 1\n if self.step_count > self.episode_length:\n raise RuntimeError(\"Exceeded number of steps for one episode.\")\n\n # send action to robot\n robot_action = self._gym_action_to_robot_action(action)\n t = self.platform.append_desired_action(robot_action)\n\n # Use observations of step t + 1 to follow what would be expected\n # in a typical gym environment. Note that on the real robot, this\n # will not be possible\n observation = self._create_observation(t + 1)\n\n reward += self.compute_reward(observation, self.info)\n\n is_done = self.step_count == self.episode_length\n\n return observation, reward, is_done, self.info", "def performAction(self, action):\n self.action = action\n self.t += self.dt \n self.step()", "def step(self, action):\n done = self.cur_step >= self.max_steps_per_episode\n\n if done:\n raise RuntimeError(\"Episode is done\")\n\n self.cur_step += 1\n\n # Compute new state based on previous state and action\n new_state = self._take_action(action)\n\n # Compute reward value based on previous state and action\n reward = self._get_reward(action)\n\n # Update current state to new state\n self.cur_state = new_state\n\n # Compute observation from current state\n ob = self._get_obs() # Has to come after new state update\n\n # Update action, observation and reward histories\n self.action_episode_memory[self.cur_episode].append(action)\n self.observation_episode_memory[self.cur_episode].append(ob)\n self.reward_episode_memory[self.cur_episode].append(reward)\n\n # Recompute done since action may have modified it\n done = self.cur_step >= self.max_steps_per_episode\n\n return ob, reward, done, {}", "def step(self, action):\n obs, r, done, info = self.env.step(action)\n obs = self.get_observation(obs)\n return obs, r, self.is_done(), info", "def step(self, action):\n # check if suggested action is valid\n valid = self._take_action(action)\n if not valid:\n _, _ = self._simulate()\n response = self.worst_response\n target = 6*60\n else:\n # simulate until a TS response is needed\n response = np.inf\n while response == np.inf:\n response, target = self._simulate()\n if np.isnan(target): # prio 2 or 3 incident: no target exists\n target = response\n\n self.last_action = action if self.action_type == \"tuple\" else self.action_num_to_tuple[action]\n # calculate reward and new state\n self.reward = self._get_reward(response, target, valid=valid)\n self.state, self.is_done = self._extract_state()\n return self.state, self.reward, self.is_done, {\"note\": \"nothing to report\"}", "def step(self, action: np.ndarray) -> Tuple[np.ndarray, np.float64, bool]:\n next_state, reward, done, _ = self.env.step(action, resize=RESIZE, size = RESIZE_SIZE)\n\n if not self.is_test:\n self.transition += [reward, next_state, done]\n \n # N-step transition\n if self.use_n_step:\n one_step_transition = self.memory_n.store(*self.transition)\n # 1-step transition\n else:\n one_step_transition = self.transition\n\n # add a single step transition\n if one_step_transition:\n self.memory.store(*one_step_transition)\n \n return next_state, reward, done", "def step(self, action, update=True) -> tuple:\n if self.state.is_terminal():\n raise Exception('Cannot perform action on terminal state!')\n s = self.state if update else self.state.copy()\n if self.render:\n self.env.render()\n s.observation, reward, s.terminal, info = self.env.step(action)\n\n return s.copy() if update else s, reward", "def step(self, action: ActionType) -> EnvResponse:\n action = self.action_space.clip_action_to_space(action)\n if self.action_space and not self.action_space.contains(action):\n raise ValueError(\"The given action does not match the action space definition. \"\n \"Action = {}, action space definition = {}\".format(action, self.action_space))\n\n # store the last agent action done and allow passing None actions to repeat the previously done action\n if action is None:\n action = self.last_action\n self.last_action = action\n if self.visualization_parameters.add_rendered_image_to_env_response:\n current_rendered_image = self.get_rendered_image()\n\n self.current_episode_steps_counter += 1\n if self.phase != RunPhase.UNDEFINED:\n self.total_steps_counter += 1\n\n # act\n self._take_action(action)\n\n # observe\n self._update_state()\n\n if self.is_rendered:\n self.render()\n\n self.total_reward_in_current_episode += self.reward\n\n if self.visualization_parameters.add_rendered_image_to_env_response:\n self.info['image'] = current_rendered_image\n\n self.last_env_response = \\\n EnvResponse(\n reward=self.reward,\n next_state=self.state,\n goal=self.goal,\n game_over=self.done,\n info=self.info\n )\n\n # store observations for video / gif dumping\n if self.should_dump_video_of_the_current_episode(episode_terminated=False) and \\\n (self.visualization_parameters.dump_mp4 or self.visualization_parameters.dump_gifs):\n self.last_episode_images.append(self.get_rendered_image())\n\n return self.last_env_response", "def step(self, action):\n if self._backend_agent:\n self._backend_agent._on_gym_step_begin(self, action)\n\n result = self.env.step(action)\n (state, reward, done, info) = result\n self.steps_done_in_episode += 1\n self.steps_done_in_instance += 1\n self.total.steps_done_inc()\n if self.max_steps_per_episode and self.steps_done_in_episode >= self.max_steps_per_episode:\n done = True\n result = (state, reward, done, info)\n if not self.is_episode_done and done:\n self.is_episode_done = True\n self.episodes_done += 1\n self.total.episodes_done_inc()\n\n if self._backend_agent:\n self._backend_agent._on_gym_step_end(self, action, result)\n return result", "def step(self, action: ActionType) -> None:\n raise NotImplementedError", "def step(self, state, action, reward, done):\n\n self.memory.add(state, action, reward, done)\n if done and self.n_tau % self.update_freq == 0:\n self.n_tau += 1\n return self.update()\n return None", "def step(self, action: int):\n assert self.action_space.contains(action)\n loc = action\n if self.done:\n return self._get_obs(), 0, True, None\n\n reward = NO_REWARD\n # update bord\n self.board[loc] = to_code(self.mark)\n\n # check if game has ended\n status = check_game_status(self.board)\n if status >= 0:\n self.done = True\n if status in [1, 2]:\n reward = O_REWARD if self.mark == 'O' else X_REWARD\n\n # update mark\n self.mark = next_mark(self.mark)\n\n return self._get_obs(), reward, self.done, None", "def step(self, action):\n action = self.randomization.action_randomizer.randomize(\n action, self._random_state\n )\n\n robot_exception = None\n try:\n self._act(action)\n except RobotException as re:\n logger.error(\n f\"Robot raised exception: {str(re)}. This will finish the current episode.\"\n )\n robot_exception = re\n\n if not self.constants.physical:\n # We don't need to do stepping for physical roll out.\n self.mujoco_simulation.step()\n\n self._synchronize_step_time()\n self.t += 1\n\n obs, reward, done, info = self.get_observation(robot_exception=robot_exception)\n obs, reward, done, info = self.step_finalize(obs, reward, done, info)\n return obs, reward, done, info", "def step(self, action):\n assert self.action_space.contains(\n action), \"%r (%s) invalid\" % (action, type(action))\n self.time_step += 1\n reward = float(0)\n self.is_episode_done = False\n\n # For testing code\n current_edge_agg_num = self.time_step\n\n # Rescale the action from [-1, 1] to [1, 2, ... , 9]\n # The action is the number of aggregations on edge servers\n # current_edge_agg_num = int((action + 2) * (action + 2))\n\n logging.info(\"RL Agent: Start time step #%s...\", self.time_step)\n logging.info(\n \"Each edge server will run %s rounds of local aggregation.\",\n current_edge_agg_num)\n\n # Pass the tuned parameter to RL agent\n self.rl_agent.get_tuned_para(current_edge_agg_num, self.time_step)\n\n # Wait for state\n current_loop = asyncio.get_event_loop()\n get_state_task = current_loop.create_task(self.wait_for_state())\n current_loop.run_until_complete(get_state_task)\n #print('State:', self.state)\n\n self.normalize_state()\n #print('Normalized state:', self.state)\n\n reward = self.get_reward()\n info = {}\n\n self.rl_agent.cumulative_reward += reward\n\n # Signal the RL agent to start next time step (next round of FL)\n self.step_done.set()\n\n return np.array([self.state]), reward, self.is_episode_done, info", "def step(self, action):\n obs = self.gym.get_observations()\n all_actions = self.gym.act(obs)\n all_actions.insert(self.gym.training_agent, action)\n state, reward, terminal, info = self.gym.step(all_actions)\n agent_state = self.featurize(state[self.gym.training_agent])\n\n # agent_state_history = self.make_observation(agent_state, self.step)\n agent_reward = reward[self.gym.training_agent]\n\n # self.step += 1\n return agent_state, agent_reward, terminal, info", "def step(self, action):\n\n if not self._is_action_legal(action):\n return self.current_state, self.reward_illegal_action, self._is_terminal_state(), None\n else:\n # Change action passed if environment should behave random\n if self.stochastic:\n if not np.random.choice([True, False], 1, p=[self.p, 1 - self.p]):\n action = np.random.choice(self.possible_actions)\n\n # Needed for reward calculation (must be done before updating data structures)\n number_of_shifts = self._get_number_of_shifts(action)\n is_cargo_mandatory = int(self.vehicle_data[2][action] == 1)\n\n slot = self.end_of_lanes[self.current_Lane]\n self.loading_sequence += \"{}. Load Vehicle Type \\t {} \\t in Lane: \\t {} \\t Row: \\t {} \\n\" \\\n .format(self.sequence_no, action, self.current_Lane, slot)\n\n self.end_of_lanes[self.current_Lane] += self.vehicle_data[4][action]\n\n if self.vehicle_data[1][action] == -1 or \\\n self.number_of_vehicles_loaded[action] < self.vehicle_data[1][action]:\n self.number_of_vehicles_loaded[action] += 1\n\n self.loaded_vehicles[self.current_Lane][self.vehicle_Counter[self.current_Lane]] = action\n self.vehicle_Counter[self.current_Lane] += 1\n\n # Update grids\n for i in range(self.vehicle_data[4][action]):\n self.grid.T[self.current_Lane][slot + i] = self.sequence_no\n self.grid_destination.T[self.current_Lane][slot + i] = self.vehicle_data[3][action]\n self.grid_vehicle_type.T[self.current_Lane][slot + i] = self.vehicle_data[0][action]\n\n # Update lowest destination data structure\n if self.vehicle_data[3][action] < self.lowest_destination[self.current_Lane]:\n self.lowest_destination[self.current_Lane] = self.vehicle_data[3][action]\n\n self.sequence_no += 1\n # Update according to lane selection heuristic\n self.current_Lane = self._get_minimal_lanes()[0]\n\n self.possible_actions = self.get_possible_actions_of_state()\n self.current_state = self._get_current_state()\n\n if self._is_terminal_state():\n # Calculate reward for terminal state\n free_spaces = np.sum(self._get_free_capacity()) / np.sum(self.total_capacity)\n mandatory_vehicles_left_to_load = np.sum(self.vehicle_data[1][self.mandatory_cargo_mask]\n - self.number_of_vehicles_loaded[self.mandatory_cargo_mask])\n reward_features = np.array(\n [is_cargo_mandatory, number_of_shifts, free_spaces, mandatory_vehicles_left_to_load])\n reward = np.dot(self.reward_system, reward_features) + self.zeta\n\n return self.current_state, reward, True, {}\n else:\n # Calculate reward\n reward_features = np.array([is_cargo_mandatory, number_of_shifts, 0, 0])\n reward = np.dot(self.reward_system, reward_features) + self.zeta\n\n return self.current_state, reward, False, {}", "def step(\n self, action: Union[numpy.ndarray, int], state: numpy.ndarray = None, dt: int = None\n ) -> tuple:\n data = super(AtariEnvironment, self).step(action=action, state=state, dt=dt)\n if state is None:\n observ, reward, terminal, info = data\n observ = self.gym_env.unwrapped.ale.getRAM() if self.obs_ram else observ\n return observ, reward, terminal, info\n else:\n state, observ, reward, terminal, info = data\n observ = ale_to_ram(self.gym_env.unwrapped.ale) if self.obs_ram else observ\n return state, observ, reward, terminal, info", "def step(self, action):\n new_speed = self._state + action\n new_speed[np.where(new_speed < 0)] = 0\n for car_idx in range(self.num_cars):\n # almost instantaneous\n traci.vehicle.slowDown(self.controllable[car_idx], new_speed[car_idx], 1)\n traci.simulationStep()\n self._state = np.array([traci.vehicle.getSpeed(vID) for vID in self.controllable])\n reward = self.compute_reward(self._state)\n # done = np.all(abs(self._state-self.GOAL_VELOCITY) < self.delta)\n next_observation = np.copy(self._state)\n return Step(observation=next_observation, reward=reward, done=False)", "def step(self, action, agent_index=0):\n return self.env.step(action)", "def step(self):\n updating_env = {} if self.independent_update else self.env\n for a in self.agents:\n if self.i % a.period == 0:\n action = a(self.env)\n if a.name is not None:\n updating_env[a.name] = action\n if self.independent_update:\n self.env.update(updating_env)\n self.i += 1", "def step(self, action):\n force = self.force_mag if action else -self.force_mag\n costheta = math.cos(self.theta)\n sintheta = math.sin(self.theta)\n temp = (\n force + self.polemass_length * self.theta_dot ** 2 * sintheta\n ) / self.total_mass\n thetaacc = (self.gravity * sintheta - costheta * temp) / (\n self.length\n * (4.0 / 3.0 - self.masspole * costheta * costheta / self.total_mass)\n )\n xacc = temp - self.polemass_length * thetaacc * costheta / self.total_mass\n self.x += self.tau * self.x_dot\n self.x_dot += self.tau * xacc\n self.theta += self.tau * self.theta_dot\n self.theta_dot += self.tau * thetaacc\n\n return self.state", "def step(self, _action):\n action = np.hstack((np.zeros(6), _action/10.))\n self.ref_skel.set_positions(self.ref_state.angles)\n for i in range(self.step_per_frame):\n # self.skel.set_forces(self.skel.get_spd(self.ref_skel.q + action, self.world.time_step(), self.Kp, self.Kd))\n self.skel.set_forces(self.skel.get_spd(self.ref_state.angles + action, self.world.time_step(), self.Kp, self.Kd))\n self.world.step()\n\n self.ref_state_time += self.step_per_frame * self.world.time_step()\n if self.ref_state_time >= self.ref_state.dt:\n self.ref_state_time -= self.ref_state.dt\n self.ref_state = self.ref_state.get_next()\n\n return tuple([self.state(), self.reward(), self.is_done(), dict()])", "def step(self, action) -> (list, float, bool):\n if len(self.curr_stim) == 0:\n self.curr_stim += [action[0]] * action[1] + [-action[0]] * action[1]\n self.system_step()\n self.frame += 1 / self.config[\"Fs\"]\n self.history.append(self.x2-self.x1)\n return self.get_state(), 0, False", "def step(self, action: Action) -> Tuple[Observation, float, bool, bool, dict]:\n if self.road is None or self.vehicle is None:\n raise NotImplementedError(\"The road and vehicle must be initialized in the environment implementation\")\n\n self.time += 1 / self.config[\"policy_frequency\"]\n self._simulate(action)\n\n obs = self.observation_type.observe()\n reward = self._reward(action)\n terminated = self._is_terminated()\n truncated = self._is_truncated()\n info = self._info(obs, action)\n if self.render_mode == 'human':\n self.render()\n\n return obs, reward, terminated, truncated, info", "def step(self, action):\n # if self.current_turn<self.MAX_TURNS-1:\n # self.current_turn += 1\n \n\n self.current_turn += 1\n system_action = self.parseAction(action)\n \n # Used for logging and evaluation\n self.updateMetaState(system_action)\n\n self.processSystemAction(system_action)\n\n reward = self.calculateReward()\n\n user_action = self.user.respond(system_action)\n self.processUserAction(user_action)\n observation = self.generateObservation()\n done = self.isDone()\n if done:\n info = { \"successful\": self.user.goals[\"satisfied\"], \n \"first-appearance\": self.first_appearance, \n \"turn-penalty\": self.current_turn,\n \"sugg-all-penalty\":self.sugg_penalty,\n \"info-all-penalty\": self.info_penalty,\n \"eli-kw-used\": self.eli_kw_observed,\n \"eli-query-used\": self.eli_query_observed,\n }\n else:\n info = {}\n if self.training:\n if done and self.user.goals[\"satisfied\"]: reward+=30\n return observation, reward, done, info", "def step(self, state, action, reward, next_state, done):\n # Save experience in replay memory\n self.memory.add(state, action, reward, next_state, done)\n\n # Learn every UPDATE_EVERY time steps.\n self.t_step = (self.t_step + 1) % PARAM.UPDATE_EVERY\n if self.t_step == 0:\n if len(self.memory) > PARAM.BATCH_SIZE:\n experiences = self.memory.sample()\n self.learn(experiences, PARAM.GAMMA)", "def step(self, action):\n raise NotImplementedError", "def step(self, action):\n self.timestep += 1\n self.actions = action.ravel()\n\n # Figure out which action was taken\n self.acted = False\n self.eat = False\n self.discard = False\n if action[0] > .5:\n self.eat = True\n self.acted = True\n elif action[1] > .5:\n self.discard = True\n self.acted = True\n\n # Check whether the appropriate action was taken, and assign reward.\n # There is a small punishment for doing nothing.\n self.reward = -.1\n if ((self.eat and self.edible) or\n (self.discard and not self.edible)):\n self.reward = 1.\n elif ((self.eat and not self.edible) or\n (self.discard and self.edible)):\n self.reward = -.9\n\n if self.acted:\n self.grab_fruit()\n\n return self.sensors, self.reward", "def env_step(self, action):\n state, reward, done, info = self.env.step(action)\n state = self.feature_extractor.extract_features(state)\n\n return state, reward, done, info", "def step(self, action: Action) -> Feedback: # type: ignore\n self._action_counter += 1\n step_id = self._action_counter\n\n self._encode_and_send_action(action, step_id)\n\n # Wait (blocking!) for the response envelope from the environment\n in_envelope = self._queue.get(block=True, timeout=None) # type: Envelope\n\n msg = self._decode_percept(in_envelope, step_id)\n\n observation, reward, done, info = self._message_to_percept(msg)\n\n return observation, reward, done, info", "def step(self, action):\n\n \"\"\"\n Here we should convert the action num to movement action, execute the action in the\n simulation and get the observations result of performing that action.\n \"\"\"\n #if self.step_number > 200:\n #self.reset()\n rospy.logdebug(\"START STEP OpenAIROS\")\n\n self.gazebo.unpauseSim()\n self._set_action(action)\n #self._prey_step()\n self.gazebo.pauseSim()\n obs = self._get_obs()\n done = self._is_done(obs)\n info = {}\n reward = self._compute_reward(obs, done)\n \n self.cumulated_episode_reward = self.cumulated_episode_reward+ reward\n self.step_number += 1\n rospy.logdebug(\"END STEP OpenAIROS\")\n\n return obs, reward, done, info", "def step(self, action: nx.Graph):\n # Get the SMILES string associated with this action\n self._state = action\n if self.record_path:\n self._path.append(self._state)\n\n # Update the action space\n self.action_space.update_actions(self._state, self.observation_space)\n self._counter += 1\n\n # Check if we have finished\n # Out of steps or no more moves\n done = len(self.action_space.get_possible_actions()) == 0\n\n # Compute the fingerprints for the state\n return self._state, self.reward(), done, {}", "def step(self, action):\n raise NotImplementedError()", "def step(self, action):\r\n new_img, reward, done, info = self.env.step(action)\r\n self.update_buffer(new_img)\r\n return self.framebuffer, reward, done, info", "def step(self, action):\n assert self.action_space.contains(action)\n\n if self.done:\n return self._get_obs(), 0, True, None\n\n reward = NO_REWARD\n\n # place\n col = action\n row = get_row(self.board, col)\n\n self.board[row, col] = tocode(self.mark)\n self.turn += 1\n self.status = check_game_status(self.board, row, col)\n\n if self.status >= 0:\n self.done = True\n if self.status in [1, 2]:\n # always called by self\n reward = O_REWARD if self.mark == 'O' else X_REWARD\n\n # switch turn\n self.mark = next_mark(self.mark)\n return self._get_obs(), reward, self.done, None", "def step(self, state, action, reward, next_state, done):\n \n # Save experience in replay memory\n self.memory.add(state, action, reward, next_state, done)\n\n # Learn every UPDATE_EVERY time steps.\n self.t_step = (self.t_step + 1) % self.params.update_every\n if self.t_step == 0:\n # If enough samples are available in memory, get random subset and learn\n if len(self.memory) > self.params.batch_size:\n experiences = self.memory.sample()\n self.learn(experiences, self.params.gamma)", "def step(self, action):\n if self._reset_next_step:\n self._reset_next_step = False\n return self.reset()\n\n self._hooks.before_step(self._physics_proxy, action, self._random_state)\n self._observation_updater.prepare_for_next_control_step()\n\n try:\n for i in range(self._n_sub_steps):\n self._hooks.before_substep(self._physics_proxy, action,\n self._random_state)\n self._physics.step()\n self._hooks.after_substep(self._physics_proxy, self._random_state)\n # The final observation update must happen after all the hooks in\n # `self._hooks.after_step` is called. Otherwise, if any of these hooks\n # modify the physics state then we might capture an observation that is\n # inconsistent with the final physics state.\n if i < self._n_sub_steps - 1:\n self._observation_updater.update()\n physics_is_divergent = False\n except control.PhysicsError as e:\n if not self._raise_exception_on_physics_error:\n logging.warning(e)\n physics_is_divergent = True\n else:\n raise\n\n self._hooks.after_step(self._physics_proxy, self._random_state)\n self._observation_updater.update()\n\n if not physics_is_divergent:\n reward = self._task.get_reward(self._physics_proxy)\n discount = self._task.get_discount(self._physics_proxy)\n terminating = (\n self._task.should_terminate_episode(self._physics_proxy)\n or self._physics.time() >= self._time_limit\n )\n else:\n reward = 0.0\n discount = 0.0\n terminating = True\n\n obs = self._observation_updater.get_observation()\n\n if not terminating:\n return dm_env.TimeStep(dm_env.StepType.MID, reward, discount, obs)\n else:\n self._reset_next_step = True\n return dm_env.TimeStep(dm_env.StepType.LAST, reward, discount, obs)", "def step(self, action):\r\n reward = self.__execute(action)\r\n self.__totalReward += reward\r\n status = self.__status()\r\n state = self.__observe()\r\n logging.debug(\"action: {:10s} | reward: {: .2f} | status: {}\".format(Action(action).name, reward, status))\r\n return state, reward, status", "def step(self, action):\n assert action in self.infoset.legal_actions\n self.players[self._acting_player_position].set_action(action)\n self._env.step()\n self.infoset = self._game_infoset\n done = False\n reward = 0.0\n if self._game_over:\n done = True\n reward = {\n \"play\": {\n \"landlord\": self._get_reward(\"landlord\"),\n \"landlord_up\": self._get_reward(\"landlord_up\"),\n \"landlord_down\": self._get_reward(\"landlord_down\")\n },\n \"bid\": {\n \"landlord\": self._get_reward_bidding(\"landlord\")*2,\n \"landlord_up\": self._get_reward_bidding(\"landlord_up\"),\n \"landlord_down\": self._get_reward_bidding(\"landlord_down\")\n }\n }\n obs = None\n else:\n obs = get_obs(self.infoset)\n return obs, reward, done, {}", "def update(self, action):\n self._update_noise()\n self._update_state(action)\n self._update_status()\n self.steps += 1\n # Return current state and error\n return self.observation, self.error # returns two unctions when called", "def step(self, state, action, reward, next_state, done):\n # Save experience in replay memory\n self.memory.add(state, action, reward, next_state, done)\n\n # Learn every UPDATE_EVERY time steps.\n self.t_step = (self.t_step + 1) % UPDATE_EVERY\n if self.t_step == 0:\n # If enough samples are available in memory, get random subset and learn\n if len(self.memory) > BATCH_SIZE:\n experiences = self.memory.sample()\n self.learn(experiences, GAMMA)", "def _step(self, action: np.ndarray):\n self.robot.step({\n 'dkitty': action,\n })", "def step(self, action):\n # get the instruction indicated by the action\n instr = self.instrs[action]\n # extend the program\n self.program.inst(instr)\n # run and get some measured bitstrings\n self.bitstrings, info = self._run_program(self.program)\n # compute the avg score of the bitstrings\n reward = self._prob_score(self.bitstrings)\n self.running_episode_reward += reward\n\n info[\"instr\"] = instr\n info[\"reward-nb\"] = reward\n self.current_step += 1\n\n # are we done yet?\n done = False\n if self.current_step >= MAX_PROGRAM_LENGTH:\n done = True\n if reward >= self.reward_threshold:\n reward += MAX_PROGRAM_LENGTH - self.current_step\n done = True\n\n return self.observation, reward, done, info", "def step(self):\n old_state = self.state\n action = self.get_next_action()\n new_state, reward, failed = self.env.step(action)\n if self.training_mode:\n if self.violated_soft_constraint or failed:\n self.Q_model.update(old_state, action, new_state, reward,\n failed)\n self.safety_model.update(old_state, action, new_state, reward,\n failed)\n self.updated_safety = True\n else:\n self.Q_model.update(old_state, action, new_state, reward,\n failed)\n self.updated_safety = False\n else:\n self.updated_safety = False\n self.state = new_state\n self.last_action = action\n return new_state, reward, failed", "def step(self, action):\n self._action = action\n if action[0] < 0: # Only allow forward direction\n action[0] = 0\n\n # Publish action via ROS\n msg = ackermann_msgs.msg.AckermannDriveStamped()\n msg.drive.speed = action[0]\n msg.drive.steering_angle = action[1]\n msg.header.stamp = self._sensor_stamp\n self.publisher.publish(msg)\n\n # Wait for next state readings\n self._num_states_received = self._num_states_needed\n while self._num_states_received > 0:\n time.sleep(0.00001)\n nextstate = self._current_state\n next_observation = self._state_to_observation(nextstate)\n self._state = nextstate\n\n reward, info = self.get_reward(nextstate, action)\n return Step(observation=next_observation, reward=reward,\n done=False, dist=info['dist'], vel=info['vel'],\n kappa=self._model.kappa)", "def step(self):\n self.step_n += 1\n self.step_t += 1\n # TODO: directly calling agent.act will by-pass BaseDeepAgent, which\n # checks and assigns 'sess' arugment. So we manually set sess here. But\n # is there a better way to do this?\n self.action = self.agent.act(\n state=self.state, sess=self.agent.sess\n )\n next_state, vec_reward, done, _ = self.env.step(self.action)\n reward, done = func_compile_exp_agent(self.action, vec_reward, done)\n self.total_reward = reward + self.reward_decay * self.total_reward\n info = self.agent.step(\n state=self.state, action=self.action, reward=reward,\n next_state=next_state, episode_done=done\n )\n self.record(info)\n flag_success = True if done and reward > 0.0 else False\n if self.savedir is not None:\n self.steps_saver.save(self.episode_n, self.step_t, self.state, self.action,\n vec_reward, reward, done, self.total_reward, flag_success)\n self.state = next_state\n if done:\n self.step_t = 0\n return done", "def _step(self, action):\n\n # action is generated from the action_policy (external to the environment)\n if len(action) == 4:\n object_index, new_location, action_means, action_stds = action\n if len(action) == 2:\n \"\"\"\n Action is not generated from a Gaussian distribution\n \"\"\"\n object_index, new_location = action\n action_means = action_stds = None\n \n position = new_location[:2]\n rotation = new_location[2]\n\n prev_transform = self.e.objects[object_index].transform\n\n if len(self.action_storage) > 0:\n last_progress = self.action_storage[-1][4]\n else:\n last_progress = 0\n\n info = {}\n if self.e.act(object_index, Command(position, rotation)):\n # print ('Action accepted')\n cur_transform = self.e.objects[object_index].transform\n # I need to call self.action_storage.append before get_observation_and_progress\n self.action_storage.append( [object_index, prev_transform, cur_transform, None, None, True, action_means, action_stds] )\n observation, progress = self.get_observation_and_progress()\n self.action_storage[-1][3:5] = [observation, progress]\n\n info['action_accepted'] = True\n else:\n \"\"\"\n Action failed\n We can reduce the progress to avoid falling out of the table\n \"\"\"\n if len(self.action_storage) > 0:\n # Just return observation and progress of last action\n _, _, _, observation, progress, _, _, _ = self.action_storage[-1]\n progress -= self.config.failed_action_penalty\n else:\n # First action failed\n observation, _ = self.get_observation_and_progress()\n progress = -self.config.failed_action_penalty\n \n self.action_storage.append( [object_index, prev_transform, prev_transform, observation, progress, False, action_means, action_stds] )\n\n \n info['action_accepted'] = False\n\n # Typical threshold approach\n if progress > self.progress_threshold:\n # Finish action\n done = True\n else:\n done = False\n \n reward = progress - last_progress\n #print ('Progress = %.2f ; reward = %.2f' % (progress, reward))\n\n return (observation, reward, done, info)", "def step(self, action):\n reward = 0\n pose_all = []\n self.rotor_speeds = np.array([action]*4)\n for _ in range(self.action_repeat):\n done = self.sim.next_timestep(self.rotor_speeds) # update the sim pose and velocities\n reward += self.get_reward()\n pose_all += [self.sim.pose]\n if self.sim.crashed:\n reward = -5\n done = True\n #if (np.square(self.sim.pose[:3] - self.target_pos)).sum() < 1: # Close enough!\n #done = True\n next_state = np.concatenate(pose_all)\n return next_state, reward, done", "def step(self, action):\n x, y = self._move(action, *self._currentPos)\n\n if chr(self._grid[x, y]) == CASE_TYPES.Wall:\n # error - previous state was already a wall\n self._done = True\n self._trajectory.append(self._currentPos)\n return self._currentPos, -1, self._done, {}\n\n reward = {\n CASE_TYPES.Water: self.waterReward,\n CASE_TYPES.Sand: self.sandReward,\n CASE_TYPES.Open: self.stepReward,\n CASE_TYPES.Termination: self.successReward,\n CASE_TYPES.Trap: (\n -(self.maxSteps - len(self._trajectory)) + self.failureReward +\n self.trapReward)\n }[chr(self._grid[x, y])]\n\n # termination state\n if chr(self._grid[x, y]) in [CASE_TYPES.Termination, CASE_TYPES.Trap]:\n self._done = True\n\n self._currentPos = (x, y)\n\n self._trajectory.append(self._currentPos)\n self._nbSteps += 1\n\n if self._nbSteps >= self.maxSteps and not self._done:\n reward += self.failureReward\n\n return self._currentPos, reward, self._done, {}", "def step(self, state, action, reward, next_state, done):\n\n # Save experience in replay memory\n self.memory.add(state, action, reward, next_state, done)\n \n # Learn every self.update_every time steps\n self.t_step = (self.t_step + 1) % self.update_every\n\n # Get random subset & learn if enough samples available in memory\n if self.t_step == 0:\n if len(self.memory) > self.batch_size:\n experiences = self.memory.sample()\n self.learn(experiences, self.gamma)\n \n return", "def step(self, action):\r\n s = self.get_state()\r\n\r\n elements = np.arange(self.S)\r\n # weights = np.squeeze(self.nextStateProbability[s,action])\r\n weights = self.nextStateProbability[s, action]\r\n nexts = choices(elements, weights, k=1)[0]\r\n\r\n # p = self.nextStateProbability[s,action]\r\n # reward = self.rewardsTable[s,action, nexts][0]\r\n reward = self.rewardsTable[s, action, nexts]\r\n\r\n # fully observable MDP: observation is the actual state\r\n self.currentObservation = nexts\r\n\r\n gameOver = False\r\n if self.currentIteration > np.Inf:\r\n ob = self.reset()\r\n gameOver = True # game ends\r\n else:\r\n ob = self.get_state()\r\n\r\n history = {\"time\": self.currentIteration, \"state_t\": s, \"action_t\": action,\r\n \"reward_tp1\": reward, \"state_tp1\": nexts}\r\n # history version with actions and states, not their indices\r\n # history = {\"time\": self.currentIteration, \"action_t\": self.actionListGivenIndex[action],\r\n # \"reward_tp1\": reward, \"observation_tp1\": self.stateListGivenIndex[self.get_state()]}\r\n self.currentIteration += 1\r\n return ob, reward, gameOver, history", "def _step(self, action: np.ndarray):\n # TODO: How do deal with goal changing?\n denormalize = False if self.use_raw_actions else True\n current_pos = self.sim.data.mocap_pos.copy()\n meanval = (self.mocap_pos_clip_upper + self.mocap_pos_clip_lower)/2.0\n rng = (self.mocap_pos_clip_upper - self.mocap_pos_clip_lower)/2.0\n new_pos = action[:3]*rng + meanval #current_pos + action[:3]*self.range\n # new_pos = current_pos + action[:3]*self.range\n new_pos = np.clip(new_pos, self.mocap_pos_clip_lower, self.mocap_pos_clip_upper)\n self.sim.data.mocap_pos[:] = new_pos.copy()\n self.robot.step({\n 'gripper': action[-2:]\n }, denormalize)", "def step(self, action_index):\n\n x_t1, r_t, terminal, info = self.env.step(self.gym_actions[action_index])\n s_t1 = self.get_preprocessed_RAM(x_t1)\n\n \n return s_t1, r_t, terminal, info", "def step(self, action):\n observation, reward, done, info = self.env.step(action)\n observation = cv2.resize(observation, (self.size, self.size))\n observation = np.array(observation, dtype=np.uint8)\n observation = observation.transpose(2, 0, 1)\n info = \"\"\n return observation, reward, done, info", "def step(self, action):\n \n # Check if the given position is empty\n if self.mat[action[0], action[1]] != 0:\n return (self.mat, -0.9, False)\n \n # Update\n self.mat[action[0], action[1]] = 1\n\n # Check if User won\n if self._check_win(1):\n return (self.mat, 1.0, True)\n\n # Check for game end\n acts = self.action_space\n if len(acts) == 0:\n return (self.mat, 0, True)\n\n # If not done, then randomly spawn an 'O' on the board and recalculate the reward\n spawn_point = acts[np.random.choice(acts.shape[0])]\n self.mat[spawn_point[0], spawn_point[1]] = 2\n\n # Check if User lost\n if self._check_win(2):\n return (self.mat, -1.0, True)\n \n # If nothing wrong happens\n else:\n return (self.mat, -0.1, False)", "def step(self, action):\n state, reward, done, debug_info = self.sample_transition(action)\n self.set_state(state)\n if \"next_state_heuristic\" in debug_info:\n self._current_heuristic = debug_info[\"next_state_heuristic\"]\n return state, reward, done, debug_info", "def step(self, observation, **extra_feed):\n\n action = self._evaluate(self.action, observation, **extra_feed)\n return action", "def step(self, observation, **extra_feed):\n\n action = self._evaluate(self.action, observation, **extra_feed)\n return action", "def step(self, action):\n reward_all = 0\n pose_all = []\n raw_states = []\n for _ in range(self.action_repeat):\n state, reward, done, _ = self.env.step(action) # run up the mountain\n\n processed_state = self.preprocess_state(state)\n raw_states.append(state)\n\n if done and self.i < 200:\n self.success = True\n\n reward_all += reward\n pose_all.append(processed_state)\n\n self.i += 1\n\n if done:\n missing = self.action_repeat - len(pose_all)\n pose_all.extend([pose_all[-1]] * missing)\n break\n\n next_state = np.concatenate(pose_all)\n return next_state, reward_all, done, raw_states", "def step(self, action, render=False):\n if self.scale == 1:\n # Scale the action\n action = np.multiply(action, self.scale_mult) + self.scale_add\n elif self.scale == 0:\n action = np.minimum(np.maximum(action, self.min_action), self.max_action)\n else:\n raise NotImplementedError\n\n # Publish action\n vel_cmd = 1.0 # action[0]\n steer_cmd = action # [1]\n self.__publish_cmd(vel_cmd, steer_cmd)\n\n # Wait specified time\n self.rate.sleep()\n\n # Collect new state\n next_state = self.get_state()\n reward, min_dist = self.__calculate_reward()\n # print(reward)\n done, exit_cond = self.__is_terminal(reward)\n\n # if not render:\n # # Reward for following the waypoints closely\n # reward = max((1. - min_dist) / 10., 0.0) + self.prev_reward\n # self.prev_reward = reward\n # if exit_cond:\n # reward = 0.0 # -10.0\n\n # else:\n # if exit_cond:\n # reward = -1.0\n \n if exit_cond:\n reward = 0.0\n\n # reward *= 10\n # print(reward)\n return next_state, reward, done, exit_cond", "def env_step(action: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n state, reward, done, _ = env.step(action)\n return (\n state.astype(np.float32),\n np.array(reward, np.float32),\n np.array(done, np.int32),\n )", "def step(self, action):\n self._last_base_position = self.rex.GetBasePosition()\n self._last_base_orientation = self.rex.GetBaseOrientation()\n if self._is_render:\n # Sleep, otherwise the computation takes less time than real time,\n # which will make the visualization like a fast-forward video.\n time_spent = time.time() - self._last_frame_time\n self._last_frame_time = time.time()\n time_to_sleep = self.control_time_step - time_spent\n if time_to_sleep > 0:\n time.sleep(time_to_sleep)\n base_pos = self.rex.GetBasePosition()\n # Keep the previous orientation of the camera set by the user.\n [yaw, pitch, dist] = self._pybullet_client.getDebugVisualizerCamera()[8:11]\n self._pybullet_client.resetDebugVisualizerCamera(dist, yaw, pitch, base_pos)\n\n for env_randomizer in self._env_randomizers:\n env_randomizer.randomize_step(self)\n\n # change up swing and stance ratio and desired speeds randomly for robustness\n if np.random.randint(300) == 0:\n self.ratio = np.random.uniform(self.min_swing_ratio, self.max_swing_ratio)\n\n if np.random.randint(300) == 0:\n self.speed = np.random.uniform(self.min_speed, self.max_speed)\n self.speed_des[0] = self.speed\n\n if np.random.randint(300) == 0:\n self.side_speed = np.random.uniform(self.min_side_speed, self.max_side_speed)\n self.speed_des[1] = self.side_speed\n\n self.base_vel_curr_trans, self.base_vel_curr_rot = self.get_base_velocity()\n action = self._transform_action_to_motor_command(action)\n self.rex.Step(action)\n self.base_vel_next_trans, self.base_vel_next_rot = self.get_base_velocity()\n \n self._env_step_counter += 1\n self.phase += self._action_repeat # the cycle length is CYCLE_TIME/time_step so can add \n # how many times an action was repeated\n\n if self.phase > self.cycle_len:\n self.phase = self.phase % self.cycle_len \n self.cycle_complete += 1\n\n reward = self._reward()\n done = self._termination()\n\n if done:\n self.rex.Terminate()\n\n return np.array(self._get_observation_np()), reward, done, {'action': action}", "def step(self, action):\n \n success = False\n self.curr_step += 1\n self._take_action(action)\n self._take_action(action)\n self._take_action(action)\n\n # initialize reward and get state \n reward = 0.0\n ob = self._get_state()\n\n # give dense rewards \n if not self.sparse_reward:\n reward = self._get_reward()\n\n # bad terminal conditions\n if self.curr_step >= self.max_steps \\\n or self.target_distance >= self.max_distance \\\n or self.mean_radius_sheep >= self.max_radius:\n self.finish = True\n if self.sparse_reward:\n reward = -1.0\n\n # good terminal conditions\n if self.target_distance <= 1.0:\n success = True\n self.finish = True\n if self.sparse_reward:\n reward = 1.0\n\n # update rl parameters\n self.episode_length += 1\n self.episode_reward += reward\n\n # generate info return parameter\n if self.info_mode == 1 and self.finish:\n info = {'r':self.episode_reward, 'l':self.episode_length, \n 's': success}\n else:\n info = {'n':self.num_sheep, 's': success}\n\n return ob, reward, self.finish, info", "def run_one_step(self):\n # Get the current state, action and initialise the reward\n state = copy.copy(self.env.get_state())\n action = self.agent.get_action(state)\n reward = 0.0\n # Check if the environment has reached a terminal state\n if self.env.check_terminal() is False:\n # Save the initial state and action to an 'experience'\n latest_experience = Experience(copy.copy(state), copy.copy(action))\n # Update the environment using the chosne action\n self.env.update(action)\n # Get the reward to attribute to the agent and save to the experience to save\n reward = self.env.get_reward()\n latest_experience.reward = copy.copy(reward)\n # Get the updated state\n state = self.env.get_state()\n if self.env.check_terminal() is False:\n # If the new state isn't terminal, save the next action and the 'done' flag to the experience\n action = self.agent.get_action(state)\n latest_experience.done = False\n else:\n # If the new state is terminal, save a dummy action and the 'done' flag to the experience\n action = 0.0\n latest_experience.done = True\n latest_experience.next_state = copy.copy(state)\n latest_experience.next_action = copy.copy(action)\n # Update the history with the latest experience\n self.agent.update_history(copy.copy(latest_experience))\n # Update the agents policy using a batch of experiences chosen from the history\n self.agent.update_policy_batch(max(1, self.batch_size))\n self.count += 1\n # Update the target network if appropriate\n if self.update_target_rate is not None:\n if self.count % self.update_target_rate == 0:\n self.agent.policy.learner.update_target_network()\n else:\n # If the environment is in a terminal state, record this and perform a policy update\n latest_experience = Experience(copy.copy(state), copy.copy(action))\n latest_experience.reward = 0.0\n latest_experience.next_state = copy.copy(state)\n latest_experience.next_action = 0.0\n latest_experience.done = True\n self.agent.update_history(copy.copy(latest_experience))\n self.agent.update_policy_batch(max(1, self.batch_size))\n self.count = 0\n return reward", "def step(self,action):\n observation, reward, done, info = self.env.step(action)\n if info[\"health\"] <= 0 or info[\"enemy_health\"] <= 0:\n self.player_hp = 120\n self.enemy_hp = 120\n reward = 0\n else:\n self.player_hp = info['health']\n self.enemy_hp = info[\"enemy_health\"]\n reward = self.player_hp - self.enemy_hp\n\n\n if info[\"enemy_rounds_won\"] == 2 or info[\"rounds_won\"] == 2:\n self.player_hp = 120\n self.enemy_hp = 120\n reward = 0\n done = True\n\n obs = self.observation(observation)\n if self.current_frame_number == self.frame_skipping:\n self.q.append(obs)\n self.current_frame_number = 0 \n self.current_frame_number += 1\n reward = reward / 120 +1\n return np.array(list(self.q)), reward, done, info", "def step(self, action):\n res = self.reward_table.get(self.curr_state, action)\n\n self.curr_state = res['result']\n\n return res", "def step(self, action):", "def step(self, action):\n # TODO: code here\n y, x = self.state\n dy, dx = self.moves[action]\n next_x, next_y = x+dx, y+dy\n\n next_x = np.clip(next_x, 0, self.width-1) # clip the values to the world\n next_y = np.clip(next_y, 0, self.height-1) # clip the values to the world\n\n if next_y == 1:\n rand = np.random.uniform()\n if rand < 0.2:\n next_x += 1\n elif rand < 0.7:\n next_x += 2\n else:\n next_x += 3\n\n next_x = np.clip(next_x, 0, self.width - 1)\n\n if next_x == 4 and next_y == 1:\n reward = -1\n done = True\n elif next_x == 4 and next_y == 2:\n reward = 1\n done = True\n else:\n reward = 0\n done = False\n\n next_state = (next_y, next_x)\n self.state = next_state\n\n return next_state, reward, done, {}", "def step(self, action_history, observations):\n return self.call(action_history, observations)", "def run_one_step(self):\n state = copy.copy(self.env.get_state())\n action = self.agent.get_action(state)\n reward = 0.0\n if self.env.check_terminal() is False:\n latest_experience = Experience(copy.copy(state), copy.copy(action))\n self.env.update(action)\n reward = self.env.get_reward()\n latest_experience.reward = copy.copy(reward)\n state = self.env.get_state()\n if self.env.check_terminal() is False:\n action = self.agent.get_action(state)\n latest_experience.done = False\n else:\n action = 0.0\n latest_experience.done = True\n latest_experience.next_state = copy.copy(state)\n latest_experience.next_action = copy.copy(action)\n self.agent.update_history(copy.copy(latest_experience))\n self.count += 1\n # If the latest history has a large enough batch, perform an update\n # CHECK IF THIS IS THE RIGHT METHOD\n if self.count % self.batch_size == 0:\n self.agent.update_policy_ordered(max(1, self.batch_size))\n if self.update_target_rate is not None:\n if self.count % self.update_target_rate == 0:\n self.count = 0\n self.agent.policy.learner.update_target_network()\n else:\n latest_experience = Experience(copy.copy(state), copy.copy(action))\n latest_experience.reward = 0.0\n latest_experience.next_state = copy.copy(state)\n latest_experience.next_action = 0.0\n latest_experience.done = True\n self.agent.update_history(copy.copy(latest_experience))\n # Perform an update on all of the previous experiences that haven't been updated\n if self.count % self.batch_size > 0:\n self.agent.update_policy_ordered((self.count % self.batch_size) + 1)\n self.count = 0\n return reward", "def step(self, reward, observation):\n self._last_observation = self._observation\n self._record_observation(observation)\n\n if not self.eval_mode:\n self._store_transition(self._last_observation, self.action, reward, False)\n self._train_step()\n\n self._rng, self.action = select_action(self.network_def,\n self.online_params,\n self.state,\n self._rng,\n self.num_actions,\n self.eval_mode,\n self.epsilon_eval,\n self.epsilon_train,\n self.epsilon_decay_period,\n self.training_steps,\n self.min_replay_history,\n self.epsilon_fn)\n self.action = onp.asarray(self.action)\n return self.action", "def step(self, action):\n \"\"\" Action is a motion command \"\"\"\n rich_obs, reward, done, info = super(ColoredEgoCostmapRandomAisleTurnEnv, self).step(action)\n obs = self._extract_egocentric_observation(rich_obs)\n return obs, reward, done, info", "def doAction(self, gameState, action):\n self.lastState = gameState\n self.lastAction = action", "def make_step(self, action_index):\n # Randomly sample action_index if world is stochastic\n if np.random.uniform(0, 1) < self.random_move_probability:\n action_indices = np.arange(self.num_actions, dtype=int)\n action_indices = np.delete(action_indices, action_index)\n action_index = np.random.choice(action_indices, 1)[0]\n\n action = self.actions[action_index]\n\n # Determine new position and check whether the agent hits a wall.\n old_position = self.agent_position\n new_position = self.agent_position\n if action == \"UP\":\n candidate_position = old_position + self.num_cols\n if candidate_position < self.num_fields:\n new_position = candidate_position\n elif action == \"RIGHT\":\n candidate_position = old_position + 1\n if candidate_position % self.num_cols > 0: # The %-operator denotes \"modulo\"-division.\n new_position = candidate_position\n elif action == \"DOWN\":\n candidate_position = old_position - self.num_cols\n if candidate_position >= 0:\n new_position = candidate_position\n elif action == \"LEFT\": # \"LEFT\"\n candidate_position = old_position - 1\n if candidate_position % self.num_cols < self.num_cols - 1:\n new_position = candidate_position\n else:\n raise ValueError('Action was mis-specified!')\n\n # Update the environment state\n self.agent_position = new_position\n \n # Calculate reward\n reward = self.rewards[self.agent_position]\n reward -= 1\n return reward, new_position", "def perform_step(self, action):\n pass" ]
[ "0.78822833", "0.7701546", "0.74653417", "0.735205", "0.73112786", "0.7296633", "0.72683334", "0.72642654", "0.72118324", "0.71811354", "0.7162281", "0.71551687", "0.71234053", "0.7118485", "0.70683175", "0.6997027", "0.6996414", "0.699088", "0.6952764", "0.6945095", "0.6939528", "0.6913101", "0.69109845", "0.68902975", "0.6889493", "0.6849144", "0.6844795", "0.6794719", "0.67794347", "0.67557055", "0.6746276", "0.6744473", "0.67318475", "0.67211956", "0.67033035", "0.67009604", "0.6668397", "0.6667357", "0.6614929", "0.6614293", "0.6611076", "0.6589347", "0.658554", "0.6584596", "0.65840155", "0.6571545", "0.6560315", "0.65325266", "0.6499401", "0.648296", "0.64809316", "0.6478797", "0.64636064", "0.64452255", "0.6443007", "0.6429109", "0.64073884", "0.6387944", "0.6346282", "0.63398856", "0.63303745", "0.63221216", "0.6320005", "0.628055", "0.62735206", "0.6271264", "0.62662584", "0.6263516", "0.6263119", "0.62606966", "0.62561554", "0.62558955", "0.6253868", "0.6247598", "0.6224966", "0.62091815", "0.6203728", "0.62018174", "0.6192084", "0.6185942", "0.61664414", "0.61564064", "0.61564064", "0.6154217", "0.615042", "0.61267275", "0.6117984", "0.61089015", "0.6099026", "0.6096507", "0.60923266", "0.60876334", "0.6087335", "0.6086161", "0.6076458", "0.6075029", "0.6073301", "0.60476965", "0.60413754", "0.6027932" ]
0.70150334
15
Starts a new sequence, returns the first `TimeStep` of this sequence. See `reset(self)` docstring for more details
def _reset(self) -> ts.TimeStep:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset(self) -> ts.TimeStep:\n self._current_time_step = self._reset()\n return self._current_time_step", "def start(self):\n\t\tself._start = time.clock()\n\t\tif self._initial is None:\n\t\t\tself._initial = self._start\n\t\treturn self", "def get_first_step(self):\n return self.get_step_by_index(0)", "def step(self, action: types.NestedArray) -> ts.TimeStep:\n if self._current_time_step is None or self.should_reset(\n self._current_time_step\n ):\n return self.reset()\n\n self._current_time_step = self._step(action)\n return self._current_time_step", "def start(self, step=None):\n\n if step is None:\n while True:\n next_event = self._pop_next_event()\n if next_event:\n self.current_time = next_event.datetime\n next_event.call()\n else:\n break\n else:\n # TODO: this is not right...\n while True:\n run_to = self.current_time + step\n while True:\n next_event = self._pop_next_event(run_to)\n if next_event:\n next_event.call()\n else:\n break\n print \"{time} Simulation Finished\".format(time=self.current_time)", "def start(self):\n return self.reset(\n starting=1,\n stopped=0,\n )", "def reset(self):\n # Initialize the timestep\n self.timestep = 0\n self.state = self.starting_state\n\n if self.from_data:\n self.episode_num += 1\n\n\n return self.starting_state", "def set_first_machine_time_step(self, first_machine_time_step):", "def new_task(self):\n self.true_trajectory = self.simulate()\n self.x0 = self.true_trajectory[0]\n self.xT = self.true_trajectory[-1]\n return self.reset()", "def _reset(self):\n self.spot_supervisor.reset()\n return ts.TimeStep(ts.StepType.FIRST, np.float32(0.0), DISCOUNT,\n np.zeros(23, dtype=np.float32))", "def start(self):\n return self.trial.start + timedelta(seconds=self.start_checkpoint)", "def before(self, time: float) -> 'Trajectory':\n return self.split(time)[0]", "def next(self):\n steps = self.context.get('process.steps', [])\n\n if len(steps) < 1:\n return None\n\n if self._index is None:\n self._index = 0\n elif self._index < len(steps)-1:\n self._index += 1\n\n return Step(attributes=steps[self._index], index=self._index)", "def current_time_step(self) -> ts.TimeStep:\n return self._current_time_step", "def StartTimer(self):\n self._start_time = time.time()", "def first_loop_start(self) -> int:\n return self.__first_loop_start", "def setStartTime(self, *args):\n return _osgAnimation.Animation_setStartTime(self, *args)", "def seek_to_start_time(self):\n return 0", "def go_to_start(self):\n self.go_to(0)", "def getStartTime(self):\n return _osgAnimation.MatrixLinearSampler_getStartTime(self)", "def reset(self):\n self._timestep = np.array([0])", "def next(self):\n last_time = self.next_time\n current_time = time.time()\n delta = last_time + self.interval - current_time\n\n if last_time > current_time + self.interval:\n # Clock appears to have moved backwards. Reset\n # the timer to avoid waiting for the clock to\n # catch up to whatever time it was previously.\n self.next_time = current_time + self.interval\n elif delta < 0 and abs(delta) > self.interval * self.max_catchup:\n # Execution of jobs is too far behind. Give up on\n # trying to catch up and reset the time, so that\n # will only be repeated a maximum of\n # self.max_catchup times.\n self.next_time = current_time - \\\n self.interval * self.max_catchup\n else:\n self.next_time = last_time + self.interval\n\n return self", "def startNextAnim(self):\n self.notify.debug(\"startNextAnim self.okToStartNextAnim=%s\" % self.okToStartNextAnim)\n #import pdb; pdb.set_trace()\n self.curIval = None\n if self.okToStartNextAnim:\n self.notify.debug(\"got pass okToStartNextAnim\")\n whichAnim = self.chooseAnimToRun()\n self.notify.debug(\"whichAnim=%s\" % whichAnim)\n self.lastPlayingAnimPhase = whichAnim # merely for debugging\n self.curIval = self.createAnimSequence(whichAnim)\n self.notify.debug(\"starting curIval of length %s\" % self.curIval.getDuration())\n self.curIval.start()\n else:\n self.notify.debug(\"false self.okToStartNextAnim=%s\" %self.okToStartNextAnim)", "def startTime(self) -> float:\n try: return self.times[0]\n except IndexError: return 0.0", "def start(self):\n\t\tif self._start is not None:\n\t\t\traise RuntimeError('Animations can only be run once')\n\t\tself._start = 1\t\n\t\t# start time\n\t\tstartticks = self._startticks if self.startticks else _pg.time.get_ticks()\n\t\tfor anim in self.animations:\n\t\t\tanim._startticks = startticks\n\t\t\tanim.start()\n\t\t\tstartticks += anim.duration\n\t\t# get updated\n\t\t_running.append(self)\n\t\t_anim_started(self)", "def start(self):\n try:\n return self.index[0]\n except:\n pass", "def start(self):\n\t\tif self._start is not None:\n\t\t\traise RuntimeError('Animations can only be run once')\n\t\tself._start = 1\t\n\t\t# start time\n\t\tif not self._startticks:\n\t\t\tself._startticks = _pg.time.get_ticks()\n\t\tfor anim in self.animations:\n\t\t\tanim._startticks = self._startticks\n\t\t\tanim.start()\n\t\t# get updated\n\t\t_running.append(self)\n\t\t_anim_started(self)", "def __next__(self):\n temp = timescale()\n try:\n temp.MJD = np.atleast_1d(self.MJD)[self.__index__].copy()\n except IndexError as exc:\n raise StopIteration from exc\n # add to index\n self.__index__ += 1\n return temp", "def start_record_trajectory(self):\r\n return self._arm.start_record_trajectory()", "def step(self):\n try:\n return next(self.generator)\n except StopIteration:\n return None", "def start(self):\r\n return self.schedule()", "def reset(self):\n self.state = self.start\n return self.start", "def RespStart(builder):\n return Start(builder)", "def begin(cls, timer=None):\r\n t = Timer(timer)\r\n try:\r\n yield t\r\n finally:\r\n t.finish()", "def getStartTime(self):\n return _osgAnimation.Vec3LinearSampler_getStartTime(self)", "def getStartTime(self):\n return _osgAnimation.Vec2LinearSampler_getStartTime(self)", "def reset(self, setup=False):\n self._done = False\n self._nbSteps = 0\n\n x = None\n if (self.startPosX == 'random' and setup) or (\n self.startPosX == 'episodeRandom'):\n x = random.randint(0, self._width - 1)\n elif (self.startPosX == 'random' and not setup):\n x = self._initState[0]\n elif self.startPosX == 'center':\n x = self._width - 1\n else:\n x = int(self.startPosX)\n\n y = None\n if (self.startPosX == 'random' and setup) or (\n self.startPosX == 'episodeRandom'):\n y = random.randint(0, self._height - 1)\n elif (self.startPosY == 'random' and not setup):\n y = self._initState[1]\n elif self.startPosX == 'center':\n y = self._height - 1\n else:\n y = int(self.startPosX)\n\n self._currentPos = (x, y)\n self._trajectory = [(x, y)]\n\n return (x, y)", "def start_timer(self):\n self.start_time = time.time()", "def getStartTime(self):\n return _osgAnimation.Vec4LinearSampler_getStartTime(self)", "def next_step(self):\n logging.debug(u\"Moving to next step\")\n\n if not self.steps or len(self.steps) < 1:\n logging.debug(u\"- no steps have ben set\")\n return None\n\n index = self.get('_index')\n\n if index is None:\n index = 0\n elif index < len(self.steps)-1:\n index += 1\n else:\n logging.debug(u\"- all steps have ben consumed\")\n return None\n\n current = self.current_step\n if current:\n current.stop()\n\n logging.debug(u\"- triggering step #{}\".format(index+1))\n self.set('_index', index)\n step = self.steps[index]\n step.trigger(bot=self.bot)\n return step", "def getStartTime(self):\n return _osgAnimation.Animation_getStartTime(self)", "def get_first_task(self):\n return self.get_first_step().get_last_task()", "def take_step(self):\n choices_of_steps = [(0,1), (1,0), (0,-1), (-1,0)]\n return random.choices(choices_of_steps)[0]", "def first(self) -> Task:\n return self._tasks[0]", "def step(self, time):\n raise \"use method step of class ReactorNet\" \n #return _cantera.reactor_step(self.__reactor_id, time) ", "def reset_next_step(self):\n return self._reset_next_step", "def _setup_next_sequence(cls):\n return 0", "def step(self):\n\n e = self.event_queue.get()\n self.current_time = e.time\n component = e.component\n component.output(self.current_time)\n component.input(self.current_time)\n component.fire()\n\n self.event_queue.put(VirtualTimeScheduler.Event(self.current_time + component.interval, component))\n\n return self.current_time", "def __call__ (self, t):\n #if t <= self.last_t:\n #raise SpaceTimeContinuumError(\n #\"We're moving back in time! Last t = {}, now = {}\".format(\n #self.last_t, t))\n\n #samp = self._sample(t)\n #self.last_t = t\n #self.last_samp = samp\n #return samp\n pass", "def start(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"start\")", "def setStartTime(self, t0):\n self._simulator_.update(t0=t0)\n return", "def start(self) -> pos.Pos:\n return self.__start", "def first(seq):\n try: # try iterator interface\n return seq.next()\n except AttributeError:\n pass\n try: # seq is no iterator, try indexed lookup\n return seq[0]\n except IndexError:\n pass\n raise TypeError(\n \"Argument to `first()` method needs to be iterator or sequence.\")", "def start_pose():\n global start_pose\n while start_pose is None:\n pass\n return start_pose", "def first(seq):\n return next(iter(seq))", "def __pos__(self):\n ts = self._fsm.get(self._id)\n return SMTimeSeries(ts._time, ts._value, self._fsm)", "def start(self):\r\n self.start_time = time.time()", "def first_order_posint(self, timestep):\n self.prev_pos = self.position\n self.position = self.position + (self.velocity * timestep)", "def start_transition(self, next=None):\n if next is not None:\n self._playlist.set_next_preset_by_name(next)\n\n self._in_transition = True\n self._start_transition = True\n self._elapsed = 0.0\n self.transition_starting.emit()", "def resume(self) -> None:\n if not self.started:\n TimerError(\"A timer should be started before to be resumed\")\n super().start()", "def start(self):\n moment = self.tz_match(self.moment)\n\n delta_to_start = timedelta(minutes=moment.minute % self.freq_minutes,\n seconds=moment.second,\n microseconds=moment.microsecond)\n\n start = moment - delta_to_start\n return start", "def get_time_step(self):\n return self._time_step", "def getStartTime(self):\n return _osgAnimation.QuatSphericalLinearSampler_getStartTime(self)", "def start(self):\n\t\tif self.__start_time is not None:\n\t\t\traise TimerError(f\"Timer is running. Use .stop() to stop the timer.\")\n\n\t\tself.__start_time = time.perf_counter()", "def t0(self):\n return self._time_axis.start", "def step(self, time):\n return _cantera.reactornet_step(self.__reactornet_id, time)", "def onTimeStepStart(self, timeStep):\n self.queuedInTimeStep = 0\n self.dequeuedInTimeStep = 0\n \n pass", "def min_time(self):\n #{{{ function to return time of first sample\n\n return self.mintime", "def first_tick(self, time):\n pass", "def load_first_ts_after(self, time):\n\n # get time step list\n df_ts = self.doc.c.sim.df.time_steps()\n \n if type(time) in [float, int]:\n if len(df_ts[df_ts.simulation_time > time]) == 0:\n raise RuntimeError(\"{} contains no timestep after {} d\".format(self.doc.c.original_filename, time))\n else:\n ts_no = int(df_ts[df_ts.simulation_time > time].reset_index().iloc[0].file_index)\n self.doc.loadTimeStep(ts_no)\n return df_ts[df_ts.simulation_time > time].reset_index().iloc[0]\n elif type(time) == datetime:\n if len(df_ts[df_ts.simulation_date>time])==0:\n raise RuntimeError(\"{} contains no timestep after {}\".format(self.doc.c.original_filename, time))\n else:\n ts_no = int(df_ts[df_ts.simulation_date > time].reset_index().iloc[0].file_index)\n self.doc.loadTimeStep(ts_no)\n return df_ts[df_ts.simulation_date > time].reset_index().iloc[0]\n else:\n raise ValueError(\"parameter 'time' must be of type float (simulation time in days) \")", "def start_timer(self):\n self.start_time = datetime.now()", "def getStartTime(self):\n return _osgAnimation.Channel_getStartTime(self)", "def initial_step(self, state, action):\n next_state = self.state_transition(state, action)\n env_action = random.choice(list(self.action_space(next_state)[1]))\n next_state = self.state_transition(next_state, env_action)\n return next_state", "def start(self):\n return self.__start", "def start(self):\n if self._start_time is not None:\n raise TimerError(\"Timer is running. Use stop() to stop it\")\n\n self._start_time = time.perf_counter()", "def time_step_spec(self) -> ts.TimeStep:\n return ts.time_step_spec(self.observation_spec(), self.reward_spec())", "def step ( self ) :\n return self.__step", "def start(self):\n self.start_time = time.time()", "def start_time(self):\n # TODO: use pd.Timestamp instead\n return self.time[0].to_pydatetime()", "def step(self):\n return self._step", "def get_next_batch_start(self):\n return None", "def set_start(self, ts):\n base_key = self.floor_time(ts)\n if self.first_timestamp is None or base_key < self.first_timestamp:\n self.first_timestamp = base_key", "def start(self):\n # type: () -> datetime\n return self._start", "def start(self) -> int:\n return self._start", "def start(self) -> int:\n return self._start", "def start(self) -> pdarray:\n return self._starts", "def start(self):\n# if self._start_time is not None:\n self._start_time = time.perf_counter()", "def initialize_simulator(self, startTime=None):\n \n # Load the inputs and check if any problem. If any exits.\n # Align inputs while loading.\n if not self.load_input(align = True):\n return False\n \n # Load the outputs and check if any problems. If any exits.\n if not self.load_outputs():\n return False\n \n # Take the time series: the first because now they are all the same (thanks to alignment)\n time = self.inputs[0].get_data_series().index\n \n # Define the initial time for the initialization\n if startTime == None:\n # Start time not specified, start from the beginning\n index = 0\n else:\n \n # Check that the type of start time is of type datetime\n if not isinstance(startTime, datetime.datetime):\n raise TypeError(\"The parameter startTime has to be of datetime.datetime type\")\n \n # Start time specified, start from the closest point\n if (startTime >= time[0]) and (startTime <= time[-1]):\n index = 0\n for t in time:\n if t < startTime:\n index += 1\n else:\n break\n else:\n index = 0\n raise IndexError(\"The value selected as initialization start time is outside the time frame\")\n \n # Once the index is know it can be used to define the start_time\n # If the offset is specified then use it as start time\n start_time = time[index]\n \n # Take all the data series\n Ninputs = len(self.inputs)\n start_input = numpy.zeros((1, Ninputs))\n start_input_1 = numpy.zeros((1, Ninputs))\n start_input_2 = numpy.zeros((1, Ninputs))\n i = 0\n if index == 0:\n for inp in self.inputs:\n dataInput = numpy.matrix(inp.get_data_series().values).reshape(-1,1)\n start_input[0, i] = dataInput[index,0]\n i += 1\n else:\n for inp in self.inputs:\n dataInput = numpy.matrix(inp.get_data_series().values).reshape(-1,1)\n start_input_1[0, i] = dataInput[index-1,0]\n start_input_2[0, i] = dataInput[index,0]\n \n # Linear interpolation between the two values\n dt0 = (time[index] - start_time).total_seconds()\n dT1 = (start_time - time[index-1]).total_seconds()\n DT = (time[index] - time[index-1]).total_seconds()\n \n # Perform the interpolation\n start_input[0, i] = (dt0*start_input_1[0, i] + dT1*start_input_2[0, i])/DT\n \n i += 1\n \n # Initialize the model for the simulation\n self.opts[\"initialize\"] = True\n \n try:\n # Simulate from the initial time to initial time + epsilon\n # thus we have 2 points\n \n # Create the input objects for the simulation that initializes\n input_u = numpy.hstack((start_input, start_input))\n input_u = input_u.reshape(2, -1)\n \n time = pd.DatetimeIndex([start_time, start_time])\n\n # Run the simulation, remember that\n # time has to be a dateteTimeIndex and Input has to be a numpy.matrix\n self.simulate(time=time, input=input_u)\n self.opts[\"initialize\"] = False\n \n # Initialize the selected variables and parameters to the values indicated \n # Done after very small simulation because there can be some internal parameters that defines\n # the initial value and may override the initialization with the indicated values\n # THIS DOESN'T WORK WITH MODELICA CONSTANTS!\n for v in self.variables:\n v.modify_initial_value_in_fmu(self.fmu)\n for p in self.parameters:\n p.modify_initial_value_in_fmu(self.fmu)\n \n return True\n \n except ValueError:\n logger.error(\"First simulation for initialize the model failed\")\n return False", "def start_run(self):\n return mlflow.start_run(\n run_id=self.run_id,\n experiment_id=self.experiment_id,\n run_name=self.run_name,\n nested=self.nested)", "def start(self):\n return self._start", "def start(self):\n return self._start", "def start(self):\n return self._start", "def start(self):\n return self._start", "def start(self):\n return self._start", "def __init__(self, start_time=None):\n if start_time is None:\n self.started = time.time()\n else:\n self.started = start_time", "def start(self):\n\t\treturn self._start", "def __nextRun(self, t1, t2):\n if self.t1==t1:\n # rerun from t1\n if self.t2!=t2:\n raise Exception(\"bad t2 (%f!=%f)\" % (t2, self.t2)) \n \n loader = fac.FacManager(self.metafor)\n nt = loader.lookForFile(self.nbFacs) #(0)\n loader.eraseAllFrom(nt)\n self.runOK = self.metafor.getTimeIntegration().restart(nt)\n else:\n # new time step\n tsm = self.metafor.getTimeStepManager()\n dt=t2-t1\n dtmax=dt\n tsm.setNextTime(t2, 1, dtmax) \n \n loader = fac.FacManager(self.metafor)\n nt1 = loader.lookForFile(self.nbFacs) #(0)\n nt2 = loader.lookForFile(self.nbFacs+1) #(1)\n if not self.saveAllFacs:\n loader.erase(nt1) # delete first fac\n self.runOK = self.metafor.getTimeIntegration().restart(nt2)\n if self.saveAllFacs:\n self.nbFacs+=1", "def time_step(self):\n return self._time_step", "def start(self) -> Vertex:\n return self._start", "def start_epoch(self):\n raise NotImplementedError" ]
[ "0.6659816", "0.64686", "0.61162376", "0.6072993", "0.60596496", "0.58604884", "0.56744826", "0.5647901", "0.5628664", "0.5623361", "0.56153256", "0.55805075", "0.5575825", "0.5544547", "0.54956526", "0.5452446", "0.5385656", "0.53372717", "0.53334737", "0.5326875", "0.5326443", "0.53258085", "0.5321996", "0.53214514", "0.5320048", "0.5317264", "0.53057706", "0.5291941", "0.5262118", "0.5258211", "0.52464443", "0.52440506", "0.5242724", "0.52323174", "0.521823", "0.52111644", "0.52083904", "0.5208232", "0.5194536", "0.5186112", "0.5183841", "0.5175628", "0.5163699", "0.51602775", "0.5157639", "0.5156871", "0.5153506", "0.51434803", "0.51422906", "0.5132119", "0.5131187", "0.51303834", "0.512412", "0.51196176", "0.5118226", "0.51146865", "0.51048005", "0.50964034", "0.5095496", "0.50954235", "0.50854105", "0.5077474", "0.50708294", "0.5062518", "0.50498277", "0.5040777", "0.50402147", "0.50400084", "0.5036319", "0.50323796", "0.5031477", "0.50241804", "0.5020389", "0.5018687", "0.50172174", "0.5015429", "0.5010081", "0.50090694", "0.50014466", "0.4999466", "0.4999081", "0.49967983", "0.4995986", "0.49893212", "0.49893212", "0.49831688", "0.49723807", "0.49721608", "0.4967185", "0.49595496", "0.49595496", "0.49595496", "0.49595496", "0.49595496", "0.4955533", "0.49447453", "0.49414524", "0.4927057", "0.49229914", "0.49175102" ]
0.5530532
14
r"""Calculate the cold plasma dispersion surfaces according to equation 2.64 in Plasma Waves by Swanson (2nd ed.)
def disp_surf_calc(kc_x_max, kc_z_max, m_i, wp_e): # Make vectors of the wave numbers kc_z = np.linspace(1e-6, kc_z_max, 35) kc_x = np.linspace(1e-6, kc_x_max, 35) # Turn those vectors into matrices kc_x_mat, kc_z_mat = np.meshgrid(kc_x, kc_z) # Find some of the numbers that appear later in the calculations kc_ = np.sqrt(kc_x_mat ** 2 + kc_z_mat ** 2) # Absolute value of k theta_ = np.arctan2(kc_x_mat, kc_z_mat) # The angle between k and B wc_i = 1 / m_i # The ion gyro frequency wp_i = wp_e / np.sqrt(m_i) # The ion plasma frequency wp_ = np.sqrt(wp_e ** 2 + wp_i ** 2) # The total plasma frequency # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # For every k_perp and k_par, turn the dispersion relation into a # polynomial equation and solve it. # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # The polynomial coefficients are calculated pol_koeff_8 = -2 * kc_ ** 2 pol_koeff_8 -= (1 + wc_i ** 2 + 3 * wp_ ** 2) * np.ones(kc_.shape) pol_koeff_6 = (2 * kc_ ** 2 + wp_ ** 2) * (1 + wc_i ** 2 + 2 * wp_ ** 2) pol_koeff_6 += kc_ ** 4 + (wp_ ** 2 + wc_i) ** 2 pol_koeff_4 = -kc_ ** 4 * (1 + wc_i ** 2 + wp_ ** 2) pol_koeff_4 -= 2 * kc_ ** 2 * (wp_ ** 2 + wc_i) ** 2 pol_koeff_4 -= (kc_ * wp_) ** 2 * (1 + wc_i ** 2 - wc_i) * ( 1 + np.cos(theta_) ** 2) pol_koeff_4 -= wp_ ** 2 * (wp_ ** 2 + wc_i) ** 2 pol_koeff_2 = kc_ ** 4 * (wp_ ** 2 * (1 + wc_i ** 2 - wc_i) * np.cos( theta_) ** 2 + wc_i * (wp_ ** 2 + wc_i)) pol_koeff_2 += kc_ ** 2 * wp_ ** 2 * wc_i * (wp_ ** 2 + wc_i) * ( 1 + np.cos(theta_) ** 2) pol_koeff_0 = -kc_ ** 4 * wc_i ** 2 * wp_ ** 2 * np.cos(theta_) ** 2 w_final = np.zeros((10, len(kc_z), len(kc_x))) # For each k, solve the equation for k_z, k_x in itertools.product(range(len(kc_z)), range(len(kc_x))): disp_polynomial = [1, 0, pol_koeff_8[k_z, k_x], 0, pol_koeff_6[k_z, k_x], 0, pol_koeff_4[k_z, k_x], 0, pol_koeff_2[k_z, k_x], 0, pol_koeff_0[k_z, k_x]] # theoretically should be real (A. Tjulin) w_temp = np.real(np.roots(disp_polynomial)) # We need to sort the answers to get nice surfaces. w_final[:, k_z, k_x] = np.sort(w_temp) n2_ = kc_ ** 2 / w_final ** 2 v_ph_c = np.sqrt(1. / n2_) va_c = 1 / (wp_e * np.sqrt(m_i)) v_ph_va = v_ph_c / va_c diel_tensor = _calc_diel(kc_, w_final, theta_, wp_e, wp_i, wc_i) e_x, e_y, e_z, e_per, e_tot, e_pol = _calc_e(diel_tensor) e_par = (kc_x_mat * e_x + kc_z_mat * e_z) / kc_ b_x, b_y, b_z, b_par, b_per, b_pol, b_tot = _calc_b(kc_x_mat, kc_z_mat, w_final, e_x, e_y, e_z) dk_x, dk_z = [kc_x_mat[1], kc_z_mat[1]] dw_x, dw_z = [np.zeros(w_final.shape) for _ in range(2)] dw_x[:, :, 1:] = np.diff(w_final, axis=2) dw_z[:, 1:, :] = np.diff(w_final, axis=1) v_x, v_z = [dw_ / dk for dw_, dk in zip([dw_x, dw_z], [dk_x, dk_z])] s_par, s_tot = _calc_s(e_x, e_y, e_z, b_x, b_y, b_z) # Compute ion and electron velocities v_ex, v_ey, v_ez, v_ix, v_iy, v_iz = _calc_vei(m_i, wc_i, w_final, e_x, e_y, e_z) # Ratio of parallel and perpendicular to B speed vepar_perp = v_ez * np.conj(v_ez) vepar_perp /= (v_ex * np.conj(v_ex) + v_ey * np.conj(v_ey)) vipar_perp = v_iz * np.conj(v_iz) vipar_perp /= (v_ix * np.conj(v_ix) + v_iy * np.conj(v_iy)) # Total particle speeds v_e2 = v_ex * np.conj(v_ex) + v_ey * np.conj(v_ey) + v_ez * np.conj(v_ez) v_i2 = v_ix * np.conj(v_ix) + v_iy * np.conj(v_iy) + v_iz * np.conj(v_iz) # Ion and electron energies m_e = -1 en_e = 0.5 * m_e * v_e2 en_i = 0.5 * m_i * v_i2 # Ratio of particle and field energy densities ratio_part_field = _calc_part2fields(wp_e, en_e, en_i, e_tot, b_tot) # Continuity equation dn_e_n, dn_i_n, dne_dni = _calc_continuity(kc_x_mat, kc_z_mat, w_final, v_ex, v_ez, v_ix, v_iz) dn_e_n_db_b = dn_e_n / b_tot dn_i_n_db_b = dn_i_n / b_tot dn_e_n_dbpar_b = dn_e_n / b_par dn_i_n_dbpar_b = dn_i_n / b_par dn_e = dn_e_n * wp_e ** 2 k_dot_e = e_x * kc_x_mat + e_z * kc_z_mat k_dot_e = np.sqrt(k_dot_e * np.conj(k_dot_e)) # Build output dict extra_param = {"Degree of electromagnetism": np.log10(b_tot / e_tot), "Degree of longitudinality": np.abs(e_par) / e_tot, "Degree of parallelity E": e_z / e_tot, "Degree of parallelity B": np.sqrt( b_z * np.conj(b_z)) / b_tot, "Ellipticity E": e_pol, "Ellipticity B": b_pol, "E_part/E_field": np.log10(ratio_part_field), "v_g": np.sqrt(v_x ** 2 + v_z ** 2), "v_ph/v_a": np.log10(v_ph_va), "E_e/E_i": np.log10(en_e / en_i), "v_e/v_i": np.log10(np.sqrt(v_e2 / v_i2)), "v_epara/v_eperp": np.log10(vepar_perp), "v_ipara/v_iperp": np.log10(vipar_perp), "dn_e/dn_i": np.log10(dne_dni), "(dn_e/n)/ (dB/B)": np.log10(dn_e_n_db_b), "(dn_i/n)/(dB/B)": np.log10(dn_i_n_db_b), "(dn_i/n)/(dBpar/B)": np.log10(dn_i_n_dbpar_b), "(dn_e/n)/(dB/B)": np.log10(dn_e / k_dot_e), "(dn_e/n)/(dBpar /B)": np.log10(dn_e_n_dbpar_b), " Spar/Stot": s_par / s_tot} for k, v in zip(extra_param.keys(), extra_param.values()): extra_param[k] = np.transpose(np.real(v), [0, 2, 1]) kx_ = np.transpose(kc_x_mat) kz_ = np.transpose(kc_z_mat) wf_ = np.transpose(w_final, [0, 2, 1]) return kx_, kz_, wf_, extra_param
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_mixing_coefficients_surf(self):\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w']\n\n # SET UP NEW MIXING COEFFICIENT ARRAYS\n self.Kv_surf = np.zeros([Ly,N+1])\n self.Kt_surf = np.zeros([Ly,N+1])\n \n self.ghat = np.zeros([Ly,N+1])\n \n\n #################################\n # \tSURFACE KPP\n ################################\n #---> j-loop\n \n self.wm2 = []\n self.ws2 = []\n self.sigma_y = []\n for j in range(Ly):\n #--> k-loop (top to kbl[j])\n # in fortran k=N-1,kbl(j),-1\n for k in range(N-1,self.kbl[j]-1,-1):\n k_w = k\n k_r = k-1\n\n Bfsfc = self.Bfsfc_bl[j]\n zscale = z_u_w[j,N] - z_u_w[j,k_w]\n \n # CALCULATE TURBULENT VELOCITY SCALES\n wm,ws = self.lmd_wscale_wm_and_ws(Bfsfc,zscale,self.ustar[j],self.hbls[j])\n self.wm2.append(wm)\n self.ws2.append(ws)\n # COMPUTE VERTICAL MIXING COEFFICIENTS\n sigma = (z_u_w[j,N] - z_u_w[j,k_w]) / np.max([self.hbls[j],self.eps])\n self.sigma1 = sigma #for debugging\n if j == 25: \n self.sigma_y.append(sigma)\n a1 = sigma - 2.\n a2 = 3.-2.*sigma\n a3 = sigma - 1.\n\n if sigma < 0.07:\n cff = 0.5 * (sigma-0.07)**2/0.07\n else:\n cff = 0\n \n \n if k == N-1: \n self.wm_debug = wm\n self.hbls_debug = self.hbls[j]\n self.cff_debug = cff\n self.sigma_debug = sigma\n self.a1_debug = a1\n self.a2_debug = a2\n self.a3_debug = a3\n\n self.Kv_surf[j,k_w] = wm * self.hbls[j] * ( cff + sigma * (1. + sigma * (\\\n a1 + a2*self.Gm1[j]+a3*self.dGm1_dS[j])))\n\n if k == N-1:\n self.ws_debug = ws\n self.hbls_debug = self.hbls[j]\n self.cff_debug = cff\n self.sigma_debug = sigma\n self.a1_debug = a1\n self.a2_debug = a2\n self.a3_debug = a3\n \n self.Kt_surf[j,k_w] = ws * self.hbls[j] * ( cff + sigma * (1. + sigma * (\\\n a1 + a2*self.Gt1[j]+a3*self.dGt1_dS[j])))\n #---> end k-loop \n if self.LMD_NONLOCAL:\n if Bfsfc < 0:\n self.ghat[j,k_w] = 0\n self.ghat[j,k_w] = self.Cg * sigma * (1.-sigma)**2\n else:\n self.ghat[j,k_w] = 0.\n\n # ADD CONVECTIVE ADJUSTMENT IN SURFACE MIXED LAYER \n if self.LMD_CONVEC and self.MLCONVEC: \n for k in range(N-1,int(self.kbl[j]-1),-1):\n k_w = k\n k_r = k -1\n\n if self.bvf[j,k_w] < 0:\n self.Kt_surf[j,k_w] = self.Kt_surf[j,k_w] + self.ffac*self.nu0c\n\n # ADD CONVECTIVE ADJUSTMENT BELOW SURFACE MIXED LAYER\n # IF BKPP IS SWITCHED OFF!!\n for k in range(int(self.kbl[j]-1),-1,-1):\n k_w = k\n k_r = k -1\n if self.LMD_NONLOCAL:\n self.ghat[j,k_w] = 0\n if self.LMD_CONVEC and self.LMD_BKPP == False:\n if self.bvf[j,k_w] < 0:\n self.Kv_surf[j,k_w] = self.Kv_surf[j,k_w] + self.nu0c\n self.Kt_surf[j,k_w] = self.Kt_surf[j,k_w] + self.nu0c\n \n\n #---> end j-loop", "def get_bforce_wm_ws_Gx_surf(self):\n\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w'] \n \n self.Gm1 = np.zeros([Ly])\n self.dGm1_dS = np.zeros([Ly]) \n self.Gt1 = np.zeros([Ly])\n self.dGt1_dS = np.zeros([Ly]) \n self.Bfsfc_bl = np.zeros([Ly])\n self.Av_bl = np.zeros([Ly])\n self.dAv_bl = np.zeros([Ly])\n \n #debugging\n self.wm_surf = np.zeros([Ly])\n self.ws_surf = np.zeros([Ly]) \n\n #---> j-loop\n for j in range(Ly): \n k_w = self.kbl[j] # KBL is \"new bl index after calling find_new_kbl()\n z_bl = z_u_w[j,N] - self.hbls[j]\n zscale = self.hbls[j] \n \n if self.swr_frac[j,k_w-1] > 0:\n Bfsfc = self.Bo[j] + self.Bosol[j] * ( 1. - self.swr_frac[j,k_w-1]\\\n * self.swr_frac[j,k_w] * ( z_u_w[j,k_w] - z_u_w[j,k_w-1] )\\\n / (self.swr_frac[j,k_w] * (z_u_w[j,k_w] - z_bl)\\\n + self.swr_frac[j,k_w-1] * (z_bl - z_u_w[j,k_w-1]) ))\n \n else:\n Bfsfc = self.Bo[j] + self.Bosol[j]\n \n # CALCUALTE TURBULENT VELOCITY SCALES\n wm,ws = self.lmd_wscale_wm_and_ws(Bfsfc,zscale,self.ustar[j],self.hbls[j])\n self.wm_surf[j] = wm\n self.ws_surf[j] = ws \n\n if self.LIMIT_UNSTABLE_ONLY:\n f1 = 5. * np.max([0,Bfsfc]) * self.vonKar / (self.ustar[j]**4+self.eps)\n else:\n f1 = 0\n\n \n cff = 1. / (z_u_w[j,k_w] - z_u_w[j,k_w-1])\n cff_up = cff * (z_bl - z_u_w[j,k_w])\n cff_dn = cff * (z_u_w[j,k_w] - z_bl)\n\n #MOMENTUM \n Av_bl = cff_up * self.Kv_old[j,k_w] + cff_dn * self.Kv_old[j,k_w-1]\n dAv_bl = cff * (self.Kv_old[j,k_w] - self.Kv_old[j,k_w-1])\n self.Av_bl[j] = Av_bl\n self.dAv_bl[j] = dAv_bl\n self.Gm1[j] = Av_bl / (self.hbls[j] * wm + self.eps)\n self.dGm1_dS[j] = np.min([0.,Av_bl*f1-dAv_bl/(wm+self.eps)]) \n\n #TEMPERATURE(BUOYANCY)\n At_bl = cff_up * self.Kt_old[j,k_w] + cff_dn * self.Kt_old[j,k_w-1]\n dAt_bl = cff * (self.Kt_old[j,k_w] - self.Kt_old[j,k_w-1])\n self.Gt1[j] = At_bl / (self.hbls[j] * ws + self.eps)\n self.dGt1_dS[j] = np.min([0.,At_bl*f1-dAt_bl/(ws+self.eps)]) \n\n self.Bfsfc_bl[j] = Bfsfc", "def bndy_plasma(self):\n self.ne[0], self.ne[-1] = 1e11, 1e11\n self.ni[0], self.ni[-1] = 1e11, 1e11\n self.nn[0], self.nn[-1] = 1e11, 1e11\n self.Te[0], self.Te[-1] = 0.1, 0.1\n self.Ti[0], self.Ti[-1] = 0.01, 0.01\n # self.coll_em[0], self.coll_em[-1] = 1e5, 1e5\n # self.coll_im[0], self.coll_im[-1] = 1e5, 1e5", "def dry_snow_density(self):\n return (self.rho - self.h2o_vol * RHO_W0) / \\\n (1 - self.h2o_vol * RHO_W0 / RHO_ICE)", "def snow_depth(lon, lat, month):\n\n im = month - 1\n\n h0 = np.array( [28.01, 30.28, 33.89, 36.80, 36.93, 36.59,\n 11.02, 4.64, 15.81, 22.66, 25.57, 26.67] )\n a = np.array( [ 0.1270, 0.1056, 0.5486, 0.4046, 0.0214, 0.7021,\n 0.3008, 0.3100, 0.2119, 0.3594, 0.1496, -0.1876] )\n b = np.array( [-1.1833, -0.5908, -0.1996, -0.4005, -1.1795, -1.4819,\n -1.2591, -0.6350, -1.0292, -1.3483, -1.4643, -1.4229] )\n c = np.array( [-0.1164, -0.0263, 0.0280, 0.0256, -0.1076, -0.1195,\n -0.0811, -0.0655, -0.0868, -0.1063, -0.1409, -0.1413] )\n d = np.array( [-0.0051, -0.0049, 0.0216, 0.0024, -0.0244, -0.0009,\n -0.0043, 0.0059, -0.0177, 0.0051, -0.0079, -0.0316] )\n e = np.array( [ 0.0243, 0.0044, -0.0176, -0.0641, -0.0142, -0.0603,\n -0.0959, -0.0005, -0.0723, -0.0577, -0.0258, -0.0029] )\n\n x = (90. - lat) * np.cos( np.radians(lon) )\n y = (90. - lat) * np.sin( np.radians(lon) )\n\n h = ( h0[im] + ( a[im] * x ) + ( b[im] * y ) + ( c[im] * x * y ) +\n ( d[im] * x * x ) + ( e[im] * y * y ) )\n\n return h", "def pwlFly(site_residuals, azSpacing=0.5,zenSpacing=0.5):\n tdata = res.reject_absVal(site_residuals,100.)\n del site_residuals \n data = res.reject_outliers_elevation(tdata,5,0.5)\n del tdata\n\n numd = np.shape(data)[0]\n numZD = int(90.0/zenSpacing) + 1\n numAZ = int(360./zenSpacing)\n pwl_All = np.zeros((numAZ,numZD))\n pwlSig_All = np.zeros((numAZ,numZD))\n Bvec_complete = []\n Sol_complete = []\n meas_complete = []\n model_complete = []\n postchis = []\n prechis = []\n aics = []\n bics = []\n #w = 1;\n\n for j in range(0,numAZ):\n # Find only those value within this azimuth bin:\n if(j - azSpacing/2. < 0) :\n criterion = (data[:,1] < (j + azSpacing/2.)) | (data[:,1] > (360. - azSpacing/2.) )\n else:\n criterion = (data[:,1] < (j + azSpacing/2.)) & (data[:,1] > (j - azSpacing/2.) )\n ind = np.array(np.where(criterion))[0]\n azData =data[ind,:]\n numd = np.shape(azData)[0]\n #print(\"NUMD:\",numd)\n if numd < 2:\n continue\n #\n # Neq is acting like a constrain on the model a small value 0.001\n # let the model vary by 1000 mm\n # will let it vary more. a large value -> 1 will force the model to be closer to 0\n # This gets too large for lots of observations, s best to doit on the fly..\n #\n Neq = np.eye(numZD,dtype=float)# * 0.001\n Apart = np.zeros((numd,numZD))\n\n for i in range(0,numd):\n iz = int(np.floor(azData[i,2]/zenSpacing))\n Apart[i,iz] = (1.-(azData[i,2]-iz*zenSpacing)/zenSpacing)\n Apart[i,iz+1] = (azData[i,2]-iz*zenSpacing)/zenSpacing\n w = np.sin(data[i,2]/180.*np.pi)\n for k in range(iz,iz+2):\n for l in range(iz,iz+2):\n Neq[k,l] = Neq[k,l] + (Apart[i,l]*Apart[i,k]) * 1./w**2\n\n prechi = np.dot(azData[:,3].T,azData[:,3])\n\n Bvec = np.dot(Apart.T,azData[:,3])\n for val in Bvec:\n Bvec_complete.append(val)\n\n Cov = np.linalg.pinv(Neq)\n Sol = np.dot(Cov,Bvec)\n for val in Sol:\n Sol_complete.append(val)\n\n #Qxx = np.dot(Apart.T,Apart)\n #Qvv = np.subtract( np.eye(numd) , np.dot(np.dot(Apart,Qxx),Apart.T))\n #sd = np.squeeze(np.diag(Qvv))\n #dx = np.dot(np.linalg.pinv(Qxx),Bvec)\n #dl = np.dot(Apart,dx)\n\n postchi = prechi - np.dot(Bvec.T,Sol)\n postchis.append(np.sqrt(postchi/numd))\n prechis.append(np.sqrt(prechi/numd))\n pwlsig = np.sqrt(np.diag(Cov) *postchi/numd)\n\n # calculate the model values for each obs\n model = np.dot(Apart,Sol) #np.zeros(numd)\n for d in range(0,numd):\n model_complete.append(model[d])\n meas_complete.append(azData[d,3])\n # zen = azData[d,2]\n # iz = int(np.floor(azData[d,2]/zenSpacing))\n # #model[d] = Sol[iz]\n\n #print(\"STATS:\",numd,np.sqrt(prechi/numd),np.sqrt(postchi/numd),np.sqrt((prechi-postchi)/numd),gls_results.rsquared,gls_results.aic,gls_results.bic)\n \n # loglikelihood(meas,model,sd)\n #sd = np.squeeze(np.diag(Qvv))\n #print(\"meas, model, sd:\",np.shape(azData),np.shape(model),np.shape(sd))\n f = loglikelihood(azData[:,3],model)\n dof = numd - np.shape(Sol)[0]\n aic = calcAIC(f,dof)\n bic = calcBIC(f,dof,numd)\n aics.append(aic) \n bics.append(bic) \n #print(\"=========================\")\n pwl_All[j,:] = Sol \n pwlSig_All[j,:] = pwlsig\n\n del Sol,pwlsig,Cov,Bvec,Neq,Apart,azData,ind\n\n #A_complete = np.squeeze(np.asarray(A_complete.todense()))\n #print(\"A shape\",np.shape(A_complete))\n\n print(\"Doing a fit to the data\")\n f = loglikelihood(np.array(meas_complete),np.array(model_complete))\n numd = np.size(meas_complete)\n dof = numd - np.shape(Sol_complete)[0]\n aic = calcAIC(f,dof)\n bic = calcBIC(f,dof,numd)\n #prechi = np.dot(data[:,3].T,data[:,3])\n prechi = np.dot(np.array(meas_complete).T,np.array(meas_complete))\n postchi = prechi - np.dot(np.array(Bvec_complete).T,np.array(Sol_complete))\n #print(\"My loglikelihood:\",f,aic,bic,dof,numd)\n print(\"STATS:\",numd,np.sqrt(prechi/numd),np.sqrt(postchi/numd),np.sqrt((prechi-postchi)/numd),aic,bic)\n\n return pwl_All, pwlSig_All", "def solid_surface_density_RC2014_given_observed_catalog(sss_per_sys, max_core_mass=10.):\n mult_obs = sss_per_sys['Mtot_obs']\n mult_obs_2p = []\n a_obs_2p = []\n core_mass_obs_2p = []\n sigma_obs_2p = []\n for i in np.arange(len(mult_obs))[mult_obs > 1]: # only consider multi-planet systems\n a_sys = gen.a_from_P(sss_per_sys['P_obs'][i], sss_per_sys['Mstar_obs'][i])\n core_mass_sys = generate_planet_mass_from_radius_Ning2018_table_above_lognormal_mass_earthlike_rocky_below_vec(sss_per_sys['radii_obs'][i][a_sys > 0])\n core_mass_sys[core_mass_sys > max_core_mass] = max_core_mass\n a_sys = a_sys[a_sys > 0]\n\n mult_obs_2p += [len(a_sys)]*len(a_sys)\n a_obs_2p += list(a_sys)\n core_mass_obs_2p += list(core_mass_sys)\n sigma_obs_2p += list(solid_surface_density_system_RC2014(core_mass_sys, a_sys))\n mult_obs_2p = np.array(mult_obs_2p)\n a_obs_2p = np.array(a_obs_2p)\n core_mass_obs_2p = np.array(core_mass_obs_2p)\n sigma_obs_2p = np.array(sigma_obs_2p)\n return sigma_obs_2p, core_mass_obs_2p, a_obs_2p, mult_obs_2p", "def main():\n\n varList = {'beta': 6., 'convSpeed': 1.2, 'Mark': 0., 'axi': 1, 'acModes': 4, 'Nr': 801, 'Tf': 600., 'xf': 0.51}\n\n # Solve steady flame.\n # BC1: I have the attachment BC at r = 1, always\n # BC2: I need to set dF/dr = 0 at r = 0 iff Mark != 0\n [qMean, r, FMean] = steady_flame_area_FD3(varList['Mark'], varList['beta'], varList['axi'], varList['Nr'])\n r = r * varList['beta']\n\n # Calculate mean flame derivatives\n dFMeanDr = derivsnew.FD1_CT2_D(FMean, r[1] - r[0])\n d2FMeanDr2 = derivsnew.FD2_CT2_D(FMean, r[1] - r[0])\n\n #Apply BC smooth tip:\n if(varList['Mark']!=0.0):\n dFMeanDr[-1] = 0.0\n\n # Use correct number of points. Remember that the extrems need to be set depending on the BC!\n # The attach BC (first point) is always assumed to be true and removed from the vector list\n if(varList['Mark']==0):\n Nr = varList['Nr'] / 2\n dFMeanDr = dFMeanDr[1:]\n d2FMeanDr2 = d2FMeanDr2[1:]\n r = r[1:]\n # The smooth BC holds only if Mark!=0 (second derivatives appear): remove also the last point\n else:\n Nr = varList['Nr'] / 2 - 1\n dFMeanDr = dFMeanDr[1:-1]\n d2FMeanDr2 = d2FMeanDr2[1:-1]\n r = r[1:-1]\n\n # Calculate geometric values\n den = 1 + varList['beta'] * varList['beta'] * dFMeanDr * dFMeanDr\n dR = r[1] - r[0]\n # Set Nx equal to Nr for now.\n # The implementation is more complicated if they differ, and need to interpolate between values.\n Nx = Nr\n\n # Nonuniform grid spacing along x!\n # Nx = length(dx) has to hold.\n dx = np.empty(len(FMean) - 1)\n for ii in range(1, len(FMean)):\n dx[ii - 1] = FMean[ii] - FMean[ii - 1]\n\n [A, B, C, tau] = loadAcoustics(varList['xf'], varList['Tf'], varList['acModes'], varList['beta'])\n\n Matrix = buildMatrix(Nr, dR, varList['beta'], den, r, FMean, dFMeanDr, d2FMeanDr2, varList['Mark'], varList['acModes'], A,\n B, C, Nx, dx, tau, qMean, varList['convSpeed'])\n\n [d, W, V] = eigProblem.solveEigProb(Matrix)\n [dnew, Wnew, Vnew] = eigProblem.selectUnstable(d, W, V)\n\n print dnew / (2. * np.pi)", "def calculate_surface_heatflux(self, weather, spaces_dict, surface, temp_record, Coeff, space, h_surface, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, Aflag, terrain, areaDict, areaWinDict, shadowRatios, shadowRatioIndex):\r\n #print \"Reaching Surface function...\"\r\n\r\n # First get the As\r\n A_total = self.get_A(surface, areaDict, areaWinDict)\r\n if Aflag == 0:\r\n # If it is the first surface of the space, label the space ID in the log file:\r\n la = str(surface.obj_id)\r\n lb = str(surface.obj_type)\r\n #TM_user.info(\"%s,surface area,%s,%s\" % (la, A_total, lb))\r\n A_noWin = self.get_A_noWin(surface, areaDict, areaWinDict)\r\n A_noOp = self.get_A_noOp(surface, areaDict, areaWinDict)\r\n T_space = spaces_dict[space.obj_id][1]\r\n T1 = weather[\"t_outside\"]\r\n hc_external = float(self.get_hc_external(weather, surface, h_surface, terrain))\r\n transmitted_win = 0\r\n Q_flux = 0\r\n\r\n # need the surface related information, T_space, U, R3\r\n U = self.get_U_surface_e(A_total, A_noOp, surface, areaWinDict) # U = Infor_surface{11,i_surface}; Defined Below\r\n #print U\r\n R3 = 1/U\r\n # Using calculations from: self.surface.constr.layer.C # Infor_surface{10, i_surface} ; from gbXML\r\n C = self.get_C_surface(A_total, A_noOp, surface, Coeff, areaWinDict) # need to pass surface and opening ids\r\n #print C\r\n\r\n temperature = Temperature()\r\n\r\n #Sub-routines for each wall type based on the returned hc_external\r\n # This hc is different for each surface type so moved under this sub-routine area\r\n #hc = 3.076 sent this to the Temperature Object\r\n if surface.obj_type == \"ExteriorWall\":\r\n transmitted_win, Q_flux = temperature.exterior_wall(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, T_space, temp_record, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, areaWinDict, shadowRatios, areaDict, shadowRatioIndex)\r\n #print Q_flux\r\n if surface.obj_type == \"Roof\":\r\n transmitted_win, Q_flux = temperature.roof(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, A_noOp, T_space, temp_record, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, areaWinDict, shadowRatios, areaDict, shadowRatioIndex)\r\n #print Q_flux # Matches for Four Room\r\n if surface.obj_type == \"InteriorWall\":\r\n transmitted_win, Q_flux = temperature.interior_wall(surface, A_total, R3, C, spaces_dict, T_space, temp_record)\r\n #print Q_flux # Matches for Four Room\r\n if surface.obj_type == \"UndergroundWall\":\r\n transmitted_win, Q_flux = temperature.underground_wall(surface, A_total, R3, C, T_space, temp_record) # No instance of yet to test\r\n if surface.obj_type == \"RaisedFloor\":\r\n # This will eventually need some values when we start using raised floors\r\n transmitted_win, Q_flux = temperature.raised_floor(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, A_noOp, T_space, temp_record) # Not instance of yet to test\r\n\r\n return transmitted_win, Q_flux", "def dynamic(self):\n # FrostIndexChangeRate=-(1-Afrost)*FrostIndex - Tavg*exp(-0.04*Kfrost*SnowCover/SnowWaterEquivalent);\n\n FrostIndexChangeRate = -(1 - self.var.Afrost) * self.var.FrostIndex - self.var.Tavg * \\\n np.exp(-0.04 * self.var.Kfrost * self.var.SnowCover / self.var.SnowWaterEquivalent)\n # FrostIndexChangeRate=self.var.AfrostIndex - self.var.Tavg* pcraster.exp(self.var.Kfrost*self.var.SnowCover*self.var.InvSnowWaterEquivalent)\n # Rate of change of frost index (expressed as rate, [degree days/day])\n # CHANGED 9 September 2004:\n # - first term should be negative\n # - second term should be subtracted, not added!!\n\n self.var.FrostIndex = np.maximum(self.var.FrostIndex + FrostIndexChangeRate * self.var.DtDay, 0)\n # frost index in soil [degree days]\n # based on Molnau and Bissel (1983, A Continuous Frozen Ground Index for Flood\n # Forecasting. In: Maidment, Handbook of Hydrology, p. 7.28, 7.55)\n # if Tavg is above zero, FrostIndex will stay 0\n # if Tavg is negative, FrostIndex will increase with 1 per degree C per day\n # Exponent of 0.04 (instead of 0.4 in HoH): conversion [cm] to [mm]!\n # Division by SnowDensity because SnowDepth is expressed as equivalent water\n # depth(always less than depth of snow pack)\n # SnowWaterEquivalent taken as 0.100 (based on density of 100 kg/m3) (Handbook of Hydrology, p. 7.5)\n # Afrost, (daily decay coefficient) is taken as 0.97 (Handbook of Hydrology,\n # p. 7.28)\n # Kfrost, (snow depth reduction coefficient) is taken as 0.57 [1/cm],\n # (HH, p. 7.28)", "def solid_surface_density_nHill_given_observed_catalog(sss_per_sys, max_core_mass=10., n=10.):\n Mstar_obs = np.repeat(sss_per_sys['Mstar_obs'][:,None], np.shape(sss_per_sys['P_obs'])[1], axis=1)[sss_per_sys['P_obs'] > 0] # flattened array of stellar masses repeated for each planet\n a_obs_per_sys = gen.a_from_P(sss_per_sys['P_obs'], sss_per_sys['Mstar_obs'][:,None])\n a_obs = a_obs_per_sys[sss_per_sys['P_obs'] > 0]\n radii_obs = sss_per_sys['radii_obs'][sss_per_sys['P_obs'] > 0]\n core_mass_obs = generate_planet_mass_from_radius_Ning2018_table_above_lognormal_mass_earthlike_rocky_below_vec(radii_obs)\n core_mass_obs[core_mass_obs > max_core_mass] = max_core_mass\n sigma_obs = solid_surface_density_nHill(core_mass_obs, a_obs, Mstar=Mstar_obs, n=n)\n return sigma_obs, core_mass_obs, a_obs", "def oceansim(sun_az,sun_zen,cam_head,cam_elev=0,m2=1.33,npart=1.08,mu=3.483, debug=True):\n\n #Water surface norm\n n = np.array([0,0,1])\n m1 = 1.0\n #vector from sun:\n ki = -np.asarray([np.sin(sun_az)*np.sin(sun_zen),\n np.cos(sun_az)*np.sin(sun_zen),\n np.cos(sun_zen)])\n xi = norm_cross(n,ki)\n #transmitted sunlight\n #tx, ty are the transmission amplitude coefficients in the xt, yt directions\n kt,tx,ty = Fresnel.transmission(ki,n,m1,m2)\n xt = xi\n #vector to camera\n kc = -np.asarray([np.sin(cam_head)*np.cos(cam_elev),\n np.cos(cam_head)*np.cos(cam_elev),\n np.sin(cam_elev)])*np.linalg.norm(kt)\n xc = norm_cross(n, kc) #right\n yc = norm_cross(kc, xc) #up\n #vectors for scattering\n ys = norm_cross(kt, kc) # y-axis of scattering event\n xst = norm_cross(ys, kt) # x-axis of scattering event relative to transmitted sunlight\n xsc = norm_cross(ys, kc) # x-axis of scattering event relative to camera\n #Mueller matrices\n # transmission through water surface:\n mm1 = Mueller.polarizer(tx,ty)\n # rotate to scattering plane\n mm2 = Mrotv(kt,xt,xst)\n # scatter\n th_s = vector_angle(kt,kc)\n #mm3 = Mocean(rad2deg(th_s)) #using Empirical ocean scattering\n mm3 = Mueller.rayleigh_norm(th_s) #normalized Rayleigh scattering matrix\n #b = Scattering.bsf_fournier(npart,mu)\n b = Scattering.vspf_fournier(th_s,npart,mu)\n # transform to camera's horizontal and up vectors\n mm4 = Mxform(xsc,ys, xc,yc)\n #Combined: mm4 . (b*mm3) . mm2 . mm1\n m = mm4.dot(b*mm3.dot(mm2.dot(mm1)))\n #stokes vector\n s = m.dot([1,0,0,0])\n if debug:\n return s,m,(ki,xi),(kt,xt,xst),(kc,xc,xsc),(mm1,mm2,mm3,b,mm4)\n else:\n return s,m", "def calc_ked_WFI(self):\n\n #Initialize kinetic energy density\n self.ked_WFI = np.zeros( (self.grid.Nelem, 1))\n\n #Figure out the number of occupied orbitals\n if self.m == 0:\n if self.pol == 1:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n else:\n Nocc = np.floor(self.N)\n nu = self.N - Nocc\n\n else:\n #m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n Nocc = np.floor(self.N / 4)\n nu = self.N / 4 - Nocc\n else:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n\n #Construct density\n for i in range(int(Nocc)):\n # print(\"phi from pssolver\", self.phi)\n # print(\"phi subset\", self.phi[:,i])\n # print(\"integrate returns\", self.grid.integrate( self.phi[:,i]**2 )**0.5)\n\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:,i]**2 )**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += (phi_norm * (self.H0 @ phi_norm)) / self.grid.w[:, None]\n\n #If we are doing fractional robitals and are non-integer\n if self.FRACTIONAL is True and nu != 0:\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:, Nocc+1]**2)**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += nu * ( phi_norm * (self.H0 @ phi_norm) ) / self.grid.w[:, None]\n\n #Scale densities appropriately\n if self.m == 0:\n if self.pol == 1: #Unpolarized electrons\n self.ked_WFI = 2 * self.ked_WFI\n\n else: # m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n self.ked_WFI = 4 * self.ked_WFI\n else:\n self.ked_WFI = 2 * self.ked_WFI", "def gz(xp, yp, zp, prisms):\n if xp.shape != yp.shape != zp.shape:\n raise ValueError(\"Input arrays xp, yp, and zp must have same shape!\")\n dummy = 1e-10\n res = 0\n for prism in prisms:\n if prism is None or 'density' not in prism.props:\n continue\n x, y = prism.x, prism.y\n z1, z2 = prism.z1, prism.z2\n density = prism.props['density']\n nverts = prism.nverts\n # Calculate the effect of the prism\n Z1 = z1 - zp\n Z2 = z2 - zp\n Z1_sqr = Z1**2\n Z2_sqr = Z2**2\n kernel = 0\n for k in range(nverts):\n Xk1 = x[k] - xp\n Yk1 = y[k] - yp\n Xk2 = x[(k + 1) % nverts] - xp\n Yk2 = y[(k + 1) % nverts] - yp\n p = Xk1*Yk2 - Xk2*Yk1\n p_sqr = p**2\n Qk1 = (Yk2 - Yk1)*Yk1 + (Xk2 - Xk1)*Xk1\n Qk2 = (Yk2 - Yk1)*Yk2 + (Xk2 - Xk1)*Xk2\n Ak1 = Xk1**2 + Yk1**2\n Ak2 = Xk2**2 + Yk2**2\n R1k1 = np.sqrt(Ak1 + Z1_sqr)\n R1k2 = np.sqrt(Ak2 + Z1_sqr)\n R2k1 = np.sqrt(Ak1 + Z2_sqr)\n R2k2 = np.sqrt(Ak2 + Z2_sqr)\n Ak1 = np.sqrt(Ak1)\n Ak2 = np.sqrt(Ak2)\n Bk1 = np.sqrt(Qk1**2 + p_sqr)\n Bk2 = np.sqrt(Qk2**2 + p_sqr)\n E1k1 = R1k1*Bk1\n E1k2 = R1k2*Bk2\n E2k1 = R2k1*Bk1\n E2k2 = R2k2*Bk2\n # Simplifying these arctans with, e.g., (Z2 - Z1)*arctan2(Qk2*p -\n # Qk1*p, p*p + Qk2*Qk1) doesn't work because of the restrictions\n # regarding the angles for that identity. The regression tests\n # fail for some points by a large amount.\n kernel += (Z2 - Z1)*(np.arctan2(Qk2, p) - np.arctan2(Qk1, p))\n kernel += Z2*(np.arctan2(Z2*Qk1, R2k1*p) -\n np.arctan2(Z2*Qk2, R2k2*p))\n kernel += Z1*(np.arctan2(Z1*Qk2, R1k2*p) -\n np.arctan2(Z1*Qk1, R1k1*p))\n Ck1 = Qk1*Ak1\n Ck2 = Qk2*Ak2\n # dummy helps prevent zero division and log(0) errors (that's why I\n # need to add it twice)\n # Simplifying these two logs with a single one is not worth it\n # because it would introduce two pow operations.\n kernel += 0.5*p*Ak1/(Bk1 + dummy)*np.log(\n (E1k1 - Ck1)*(E2k1 + Ck1)/((E1k1 + Ck1)*(E2k1 - Ck1) + dummy) +\n dummy)\n kernel += 0.5*p*(Ak2/(Bk2 + dummy))*np.log(\n (E2k2 - Ck2)*(E1k2 + Ck2)/((E2k2 + Ck2)*(E1k2 - Ck2) + dummy) +\n dummy)\n res += kernel*density\n res *= G*SI2MGAL\n return res", "def create_flux_vector_pms_gr(self):\n soma_prod = 0\n soma_inj = 0\n lim4 = 1e-4\n store_velocity = {}\n store_flux = {}\n for primal in self.primals:\n #1\n primal_id = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]\n primal_id = self.ident_primal[primal_id]\n fine_elems_in_primal = self.mb.get_entities_by_handle(primal)\n for volume in fine_elems_in_primal:\n #2\n flux = {}\n velocity = {}\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n centroid_volume = self.mesh_topo_util.get_average_position([volume])\n z_vol = self.tz - centroid_volume[2]\n adjs_vol = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)\n gid_vol = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]\n for adj in adjs_vol:\n #3\n gid_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]\n if adj not in fine_elems_in_primal:\n #4\n pvol = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]\n padj = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]\n #3\n else:\n #4\n pvol = self.mb.tag_get_data(self.pcorr_tag, volume, flat=True)[0]\n padj = self.mb.tag_get_data(self.pcorr_tag, adj, flat=True)[0]\n #3\n kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])\n centroid_adj = self.mesh_topo_util.get_average_position([adj])\n z_adj = self.tz - centroid_adj[2]\n direction = centroid_adj - centroid_volume\n unit = direction/np.linalg.norm(direction)\n #unit = vetor unitario na direcao de direction\n uni = self.unitary(direction)\n # uni = valor positivo do vetor unitario\n kvol = np.dot(np.dot(kvol,uni),uni)\n kadj = np.dot(np.dot(kadj,uni),uni)\n keq = self.kequiv(kvol, kadj)/(self.mi)\n keq2 = keq\n keq = keq*(np.dot(self.A, uni))\n pvol2 = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]\n padj2 = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]\n grad_p = (padj - pvol)/float(abs(np.dot(direction, uni)))\n grad_z = (z_adj - z_vol)/float(abs(np.dot(direction, uni)))\n grad_p2 = (padj2 - pvol2)/float(abs(np.dot(direction, uni)))\n q = (grad_p)*keq - grad_z*keq*self.gama\n print((grad_p)*keq)\n print(- grad_z*keq*self.gama)\n print(q)\n print(self.store_flux_pf_gr[volume][tuple(unit)])\n print('\\n')\n import pdb; pdb.set_trace()\n\n if gid_adj > gid_vol:\n v = -((grad_p2)*keq2 - grad_z*self.gama*keq2)\n else:\n v = -((grad_p2)*keq2 - grad_z*self.gama*keq2)\n\n flux[tuple(unit)] = q\n velocity[tuple(unit)] = v\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n\n #2\n # print(gid_vol)\n # print(velocity)\n # print('\\n')\n # import pdb; pdb.set_trace()\n store_flux[volume] = flux\n self.mb.tag_set_data(self.flux_fine_pms_tag, volume, sum(flux.values()))\n # flt = sum(flux.values())\n # if volume not in self.wells_inj and volume not in self.wells_prod:\n # lim4 = 1e-7\n # if abs(flt) > lim4:\n # print(gid_vol)\n # print(flt)\n # import pdb; pdb.set_trace()\n # flt = sum(flux.values())\n store_velocity[volume] = velocity\n\n for volume in set(self.all_fine_vols) - set(self.wells):\n gid = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]\n values = store_flux[volume].values()\n if sum(values) > lim4:\n print('fluxo multiescala nao esta dando conservativo')\n print('gid:{0}'.format(gid))\n print(sum(values))\n import pdb; pdb.set_trace()\n\n with open('fluxo_multiescala_gr.txt', 'w') as arq:\n for volume in self.wells:\n gid = self.mb.tag_get_data(self.global_id_tag, volume, flat= True)[0]\n values = store_flux[volume].values()\n if volume in self.wells_inj:\n soma_inj += sum(values)\n else:\n soma_prod += sum(values)\n arq.write('gid:{0} , fluxo:{1}\\n'.format(gid, sum(values)))\n arq.write('\\n')\n arq.write('soma_inj:{0}\\n'.format(soma_inj))\n arq.write('soma_prod:{0}\\n'.format(soma_prod))\n\n return store_flux", "def dispersion(self, k):\n return 2*self.material.gamma*np.sqrt(self.material.z)", "def Schechter_M_z_M200c(M, redshift, M200c):\n\treturn 0.4 * n.log(10.) * 10**logPhi_evol(redshift, mass_2_richness(M200c, redshift)) * 10**(0.4 * (M_s_evol(redshift, mass_2_richness(M200c, redshift)) - M) * (alpha_evol(redshift, mass_2_richness(M200c, redshift)) + 1)) * n.e**( -10** ( 0.4 * (M_s_evol(redshift,mass_2_richness(M200c, redshift)) - M)))", "def get_hbls_hbbl(self):\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w']\n z_u_r = self.grid_dict['z_u_r']\n u = self.u\n v = self.v\n \n v_upts = TTTW_func.v2u(v)\n Hz = z_u_w[:,1:] - z_u_w[:,:-1]\n\n\n\n # CALCULATE swr_frac\n self.swr_frac = TTTW_func.lmd_swr_frac(self.grid_dict)\n\n\n # WHOLE THING HAPPENS IN j loop through y-indices\n \n # INITIALIZE ARRAYS\n self.kmo = np.zeros([Ly])\n self.Cr = np.zeros([Ly])\n self.kbl = np.empty([Ly],dtype='int')\n self.C_h_MO = np.zeros([Ly])\n self.Cr = np.zeros([Ly,N+1]) # sum term\n self.FC = np.zeros([Ly,N+1])\n self.swdk_r = np.zeros([Ly,N+1])\n \n self.zscale = np.zeros([Ly,N])\n self.Kern = np.zeros([Ly,N])\n\n \n # --> LOOP THROUGH Y-INDICES\n for j in range(Ly):\n if self.LIMIT_MO_DEPTH:\n self.kmo[j] = 0\n self.C_h_MO[j] = self.C_MO *self.ustar[j]**3/self.vonKar\n \n self.kbl[j] = 0\n self.Cr[j,-1] = 0 # set top Cr\n self.Cr[j,0] = 0 # set bottom Cr\n \n # SEARCH FOR MIXED LAYER DEPTH\n self.FC[j,-1] = 0.\n\n\n # ---> LOOP TOP TO BOTTOM (FORTRAN ==> k=N-1,1,-1)\n for k in range(N-1,0,-1):\n # INDEX MAP\n k_r = k-1\n k_w = k\n\n \n zscale = z_u_w[j,N] - z_u_r[j,k_r]\n self.zscale[j,k_w] = zscale\n if self.LMD_KPP:\n if self.LMD_BKPP:\n zscaleb = z_u_r[j,k_r] - z_u_w[j,0]\n Kern = zscale * zscaleb**2 / ( (zscale + self.epssfcs*self.hbls_old[j]) * (zscaleb**2+(self.epssfcb**2*self.hbbl_old[j]**2)))\n else:\n Kern = zscale / (zscale + (self.epssfcs*self.hbls_old[j]))\n else:\n Kern = 1.\n \n\n\n self.Kern[j,k_w] = Kern\n self.FC[j,k_w] = self.FC[j,k_w+1] + Kern * (\\\n ( ( u[j,k_r+1] - u[j,k_r] )**2 + ( v_upts[j,k_r+1] - v_upts[j,k_r])**2 ) \\\n / (Hz[j,k_r] + Hz[j,k_r+1]) \\\n - 0.5 * ( Hz[j,k_r] + Hz[j,k_r+1]) * (self.Ri_inv * self.bvf[j,k_w] + self.C_Ek*self.f[j]*self.f[j]))\n\n\n #\t\tLOOP THAT FINDS BL DEPTH ##\n #----> LOOP TOP TO BOTTOM (start at free surface, w-level surface) \n \n if self.LMD_KPP:\n #swdk_r only used in this function so don't need to be class attribute\n # but for testing make it an attribute to see what it is\n \n # fortran equivlanet ===> k=N,1,-1 \n for k in range(N,0,-1):\n # INDEX MAP\n k_r = k-1\n k_w = k\n\n ###################################################################### \n self.swdk_r[j,k_w] = np.sqrt( self.swr_frac[j,k_w] * self.swr_frac[j,k_w-1])\n zscale = z_u_w[j,N] - z_u_r[j,k_r]\n Bfsfc = self.Bo[j] + self.Bosol[j] * (1-self.swdk_r[j,k_w])\n \n self.bvf_max = np.sqrt(np.max([0,self.bvf[j,k_w-1]]))\n \n # CALCULATE TURBULENT VELOCITY SCALE FOR TRACERS\n \t\t\t self.ws = self.lmd_wscale_ws_only(Bfsfc, zscale,self.hbls_old[j],self.ustar[j])\n \n self.Vtsq = self.Vtc * self.ws* self.bvf_max + self.V0\n \n\n self.Cr[j,k_w] = self.FC[j,k_w] + self.Vtsq\n \n\n #######################################################################\n \n # SEARCH FOR hbls vertical level #\n '''\n kbl is specified at vertical w-level (via Cr which is at\n vertical w-levels)\n '''\n if self.kbl[j] == 0 and self.Cr[j,k_w] < 0:\n self.kbl[j] = k_w\n if self.LIMIT_MO_DEPTH:\n if self.kmo[j] == 0 and Bfsfc*(z_u_w[j,N] - z_u_r[j,k_r]) > self.C_h_MO[j]:\n self.kmo[j] = k_w\n\n \n #--> still in j-loop\n #######################################################\n \n # \t\tGET SURFACE BOUNDARY LAYER DEPTH # \n self.hbls[j] = z_u_w[j,N] - z_u_w[j,0] + self.eps # set hbls as depth of entire water column\n if self.kbl[j] > 0:\n k_w = self.kbl[j]\n k_r = k_w - 1 \n if k_w == N: # set hbls at the surface btwn w- and rho-levels at surface\n self.hbls[j] = z_u_w[j,N] - z_u_r[j,N-1]\n \n else:\n self.hbls[j] = z_u_w[j,N] - ( z_u_r[j,k_r] * self.Cr[j,k_w+1] - z_u_r[j,k_r+1] * self.Cr[j,k_w]) / \\\n (self.Cr[j,k_w+1] - self.Cr[j,k_w])\n \n if self.LIMIT_MO_DEPTH:\n if self.kmo[j] > 0:\n k_w = self.kmo[j]\n k_r = k_w-1\n if k_w == N:\n z_up = z_u_w[j,N]\n cff_up = np.max([0,Bo[j]])\n else:\n z_up = z_r[j,k_w+1]\n cff_up = np.max([0, Bo[j] + self.Bosol[j]*(1-self.swdk_r[j,(k_w-1)+1])])\n \n cff_dn = np.max([0,Bo[j] + self.Bosol[j] * (1-self.swdk_r[j,k_w])]) \n h_MO = z_u_w[j,N] + self.C_h_MO[j] * ( cff_up*z_up - cff_dn * z_u_r[j,k_r] ) \\\n / ( cff_up * cff_dn * (z_up - z_u_r[j,k_r]) ) \\\n + self.C_h_MO[j] * (cff_dn - cff_up)\n\n self.hbls[j] = np.min([self.hbls[j],np.max([h_MO,0])])\n\n\n\n #### GET BOTTOM BOUNDARY LAYER DEPTH #######\n if self.LMD_BKPP:\n self.kbl[j] = 0 # reset Cr at bottom and kbl for BKPP\n self.Cr[j,0] = 0.\n self.FC[j,0] = 1.5 * self.FC[j,1] - 0.5 * self.FC[j,2] # linear extrapolation\n \n #---> LOOP BOTTOM TO TOP\n # FIND kbl for BBL\n for k in range(1,N+1):\n k_r = k-1\n k_w = k \n self.Cr[j,k_w] = self.FC[j,k_w] - self.FC[j,0]\n \n # LOOK FOR FIRST ZERO CROSSING FROM BOTTOM UP\n if self.kbl[j] == 0 and self.Cr[j,k_w] > 0:\n self.kbl[j] = k_w \n \n\n self.hbbl[j] = z_u_w[j,N] - z_u_w[j,0] # total depth\n if self.kbl[j] > 0 :\n k_w = self.kbl[j] \n k_r = k_w -1\n if k_w == 1: # NO BBL CASE\n self.hbbl[j] = z_u_r[j,0] - z_u_w[j,0] #in between bottom rho and w-level\n else:\n self.hbbl[j] = ( z_u_r[j,k_r-1] * self.Cr[j,k_w] - z_u_r[j,k_r] * self.Cr[j,k_w-1]) / \\\n (self.Cr[j,k_w] - self.Cr[j,k_w-1]) - z_u_w[j,0]", "def dispersion(self, k):\n return np.sqrt(\n 1 + (\n (k*self.material.hbar_m)**2 / (2*self.material.m_star_m) -\n self.material.z\n )**2\n )", "def sigmai_dep(ptem, psal, pref):\n zr4 = 4.8313e-4\n zd =-2.042967e-2\n zrau0 = 1000.e0\n \n sigmai_dep_out = zeros(psal.shape)\n \n # ?? for whatever reason sqrt(abs(psal)) seems to kick up a fuss when arrays\n # exceed a certain size...??? otherwise this could be vectorised\n # TODO: if pref is a number, broadcast it into a 2d field\n \n for jj in range(psal.shape[0]): # python indexing\n for ji in range(psal.shape[1]):\n \n ztem = ptem[jj, ji]\n zsal = psal[jj, ji]\n zws = sqrt( abs(psal[jj, ji]) )\n \n # Compute the volumic mass of pure water at atmospheric pressure.\n zr1 = ( ( ( ( (6.536332e-9 * ztem - 1.120083e-6) * ztem + 1.001685e-4 )\n * ztem - 9.095290e-3 ) * ztem + 6.793952e-2 ) * ztem + 999.842594e0\n )\n\n # Compute the seawater volumic mass at atmospheric pressure.\n zr2 = ( ( ( ( 5.3875e-9 * ztem - 8.2467e-7) * ztem + 7.6438e-5)\n * ztem - 4.0899e-3) * ztem + 0.824493e0\n )\n\n zr3 = (-1.6546e-6 * ztem + 1.0227e-4) * ztem - 5.72466e-3\n\n # Compute the potential volumic mass (referenced to the surface).\n zrhop = (zr4 * zsal + zr3 * zws + zr2) * zsal + zr1\n\n # Compute the compression terms.\n ze = (-3.508914e-8 * ztem - 1.248266e-8) * ztem - 2.595994e-6\n\n zbw = (1.296821e-6 * ztem - 5.782165e-9) * ztem + 1.045941e-4\n\n zb = zbw + ze * zsal\n\n zc = (-7.267926e-5 * ztem + 2.598241e-3) * ztem + 0.1571896e0\n\n zaw = ( ( (5.939910e-6 * ztem + 2.512549e-3) * ztem - 0.1028859e0 ) \n * ztem - 4.721788e0\n )\n\n za = (zd * zws + zc) * zsal + zaw\n\n zb1 = (-0.1909078e0 * ztem + 7.390729e0) * ztem - 55.87545e0\n\n za1 = ( ( (2.326469e-3 * ztem + 1.553190e0) * ztem - 65.00517e0)\n * ztem + 1044.077e0\n )\n\n zkw = ( ( ( (-1.361629e-4 * ztem - 1.852732e-2) * ztem - 30.41638e0)\n * ztem + 2098.925e0) * ztem + 190925.60\n )\n\n zk0 = (zb1 * zws + za1) * zsal + zkw\n\n # Compute the potential density anomaly.\n sigmai_dep_out[jj, ji] = ( zrhop / (1.0e0 - pref / \n ( zk0 - pref * (za - pref * zb) ) )\n - zrau0\n )\n \n return sigmai_dep_out", "def sat_vap_dens(nz, T, SWVD, plot=False):\r\n rho_v = np.zeros(nz)\r\n rho_v_dT = np.zeros(nz)\r\n if SWVD == \"Libbrecht\":\r\n rho_v = (\r\n np.exp(-T_ref_L / T) / (f * T) * (a0 + a1 * (T - 273) + a2 * (T - 273) ** 2)\r\n ) # [kg/m^3] Water vapor density\r\n rho_v_dT = (\r\n np.exp(-T_ref_L / T)\r\n / (f * T ** 2)\r\n * (\r\n (a0 - a1 * 273 + a2 * 273 ** 2) * (T_ref_L / T - 1)\r\n + (a1 - a2 * 2 * 273) * T_ref_L\r\n + a2 * T ** 2 * (T_ref_L / T + 1)\r\n )\r\n ) # [kg/m^3/K]\r\n elif SWVD == \"Calonne\":\r\n x = (L_Cal * mH2O) / (rho_i * kB)\r\n rho_v = rho_ref * np.exp(x * ((1 / T_ref_C) - (1 / T)))\r\n\r\n rho_v_dT = x / T ** 2 * rho_ref * np.exp(x * ((1 / T_ref_C) - (1 / T)))\r\n\r\n elif SWVD == \"Hansen\":\r\n\r\n rho_v = (\r\n (10.0 ** (c1 / T + c2 * np.log(T) / np.log(10) + c3 * T + c4 * T ** 2 + c5))\r\n * c6\r\n / R_v\r\n / T\r\n )\r\n rho_v_dT = (\r\n rho_v\r\n * np.log(10)\r\n * (-c1 / T ** 2 + c2 / (T * np.log(10)) + c3 + 2 * c4 * T)\r\n - rho_v / T\r\n )\r\n else:\r\n raise ValueError(\"Saturation water vapor density not available\")\r\n if plot:\r\n fig1 = plt.plot(T, rho_v)\r\n plt.title(\"Water vapor density with respect to temperature\")\r\n plt.show(fig1)\r\n fig2 = plt.plot(T, rho_v_dT)\r\n plt.title(\"Derivative of water vapor density with respect to temperature\")\r\n plt.show(fig2)\r\n return rho_v, rho_v_dT", "def solid_surface_density_CL2013(M, a):\n return solid_surface_density(M, a, a)", "def findzpd(self):\n dc=0.5*self.rms*self.ndstep\n #fixed at 0.1 of the dispersion\n dd=0.1*self.ws.coef[1]\n\n #set upt he docef values\n dcoef=self.ws.coef*0.0\n dcoef[0]=dc\n dcoef[1]=dd\n self.ws=st.findxcor(self.xarr, self.farr, self.swarr, self.sfarr, self.ws, \n dcoef=dcoef, ndstep=self.ndstep, best=False, inttype='interp')\n self.plotArt()\n self.redraw_canvas()", "def main_gamma_ray_loop(\n num_decays,\n model,\n plasma,\n time_steps=10,\n time_end=80.0,\n grey_opacity=-1,\n spectrum_bins=500,\n time_space=\"log\",\n photoabsorption_opacity=\"tardis\",\n pair_creation_opacity=\"tardis\",\n seed=1,\n path_to_decay_data=\"~/Downloads/tardisnuclear/decay_radiation.h5\",\n positronium_fraction=0.0,\n):\n # Note: not best numpy practice, but works better in numba than the alternatives\n np.random.seed(seed)\n\n # Enforce cgs\n outer_velocities = model.v_outer.to(\"cm/s\").value\n inner_velocities = model.v_inner.to(\"cm/s\").value\n ejecta_density = model.density.to(\"g/cm^3\").value\n ejecta_volume = model.volume.to(\"cm^3\").value\n ejecta_velocity_volume = (\n 4 * np.pi / 3 * (outer_velocities**3.0 - inner_velocities**3.0)\n )\n time_explosion = model.time_explosion.to(\"s\").value\n number_of_shells = model.no_of_shells\n raw_isotope_abundance = model.raw_isotope_abundance.sort_values(\n by=[\"atomic_number\", \"mass_number\"], ascending=False\n )\n\n shell_masses = ejecta_volume * ejecta_density\n\n time_start = time_explosion\n time_end *= u.d.to(u.s)\n\n assert (\n time_start < time_end\n ), \"Error, simulation start time greater than end time!\"\n\n if time_space == \"log\":\n times = np.zeros(time_steps + 1)\n\n # log time steps\n for i in range(time_steps + 1):\n times[i] = (\n np.log(time_start)\n + (np.log(time_end) - np.log(time_start)) / time_steps * i\n )\n times[i] = np.exp(times[i])\n else:\n times = np.linspace(time_start, time_end, time_steps + 1)\n\n dt_array = np.diff(times)\n effective_time_array = np.array(\n [np.sqrt(times[i] * times[i + 1]) for i in range(time_steps)]\n )\n\n # Use isotopic number density\n for atom_number in plasma.isotope_number_density.index.get_level_values(0):\n values = plasma.isotope_number_density.loc[atom_number].values\n if values.shape[1] > 1:\n plasma.number_density.loc[atom_number] = np.sum(values, axis=0)\n else:\n plasma.number_density.loc[atom_number] = values\n\n # Calculate electron number density\n electron_number_density = (\n plasma.number_density.mul(plasma.number_density.index, axis=0)\n ).sum()\n\n electron_number_density_time = np.zeros(\n (len(ejecta_velocity_volume), len(effective_time_array))\n )\n\n mass_density_time = np.zeros(\n (len(ejecta_velocity_volume), len(effective_time_array))\n )\n\n electron_number = (electron_number_density * ejecta_volume).to_numpy()\n\n inv_volume_time = np.zeros(\n (len(ejecta_velocity_volume), len(effective_time_array))\n )\n\n # Pre-calculate quantities as they change with time\n for i, t in enumerate(effective_time_array):\n inv_volume_time[:, i] = (1.0 / ejecta_velocity_volume) / (t**3.0)\n mass_density_time[:, i] = shell_masses * inv_volume_time[:, i]\n electron_number_density_time[:, i] = (\n electron_number * inv_volume_time[:, i]\n )\n\n energy_df_rows = np.zeros((number_of_shells, time_steps))\n\n # Calculate number of packets per shell based on the mass of isotopes\n number_of_isotopes = plasma.isotope_number_density * ejecta_volume\n total_number_isotopes = number_of_isotopes.sum(axis=1)\n\n inventories = raw_isotope_abundance.to_inventories()\n all_isotope_names = get_all_isotopes(raw_isotope_abundance)\n all_isotope_names.sort()\n\n gamma_ray_lines = get_nuclear_lines_database(path_to_decay_data)\n\n taus = {}\n parents = {}\n gamma_ray_line_array_list = []\n average_energies_list = []\n average_positron_energies_list = []\n\n for i, isotope in enumerate(all_isotope_names):\n nuclide = rd.Nuclide(isotope)\n taus[isotope] = nuclide.half_life() / np.log(2)\n child = nuclide.progeny()\n if child is not None:\n for c in child:\n if rd.Nuclide(c).half_life(\"readable\") != \"stable\":\n parents[c] = isotope\n\n energy, intensity = setup_input_energy(\n gamma_ray_lines[\n gamma_ray_lines.Isotope == isotope.replace(\"-\", \"\")\n ],\n \"g\",\n )\n gamma_ray_line_array_list.append(np.stack([energy, intensity]))\n average_energies_list.append(np.sum(energy * intensity))\n positron_energy, positron_intensity = setup_input_energy(\n gamma_ray_lines[\n gamma_ray_lines.Isotope == isotope.replace(\"-\", \"\")\n ],\n \"bp\",\n )\n average_positron_energies_list.append(\n np.sum(positron_energy * positron_intensity)\n )\n\n # Construct Numba typed dicts\n gamma_ray_line_arrays = {}\n average_energies = {}\n average_positron_energies = {}\n\n for iso, lines in zip(all_isotope_names, gamma_ray_line_array_list):\n gamma_ray_line_arrays[iso] = lines\n\n for iso, energy, positron_energy in zip(\n all_isotope_names, average_energies_list, average_positron_energies_list\n ):\n average_energies[iso] = energy\n average_positron_energies[iso] = positron_energy\n\n # urilight chooses to have 0 as the baseline for this calculation\n # but time_start may also be valid in which case decay time is time_end - time_start\n total_energy_list = []\n\n for shell, inv in enumerate(inventories):\n decayed_energy = {}\n total_decays = inv.cumulative_decays(time_end)\n for nuclide in total_decays:\n if nuclide in parents and nuclide != \"Co-56\" and nuclide != \"Co-57\":\n parent = parents[nuclide]\n if parent in parents:\n parent = parents[parent]\n decayed_energy[parent] += (\n total_decays[nuclide]\n * average_energies[nuclide]\n * shell_masses[shell]\n )\n else:\n decayed_energy[nuclide] = (\n total_decays[nuclide]\n * average_energies[nuclide]\n * shell_masses[shell]\n )\n\n total_energy_list.append(decayed_energy)\n\n total_energy = pd.DataFrame(total_energy_list)\n\n total_energy_columns = total_energy.columns.to_list()\n\n total_energy = total_energy[\n sorted(\n total_energy_columns, key=get_nuclide_atomic_number, reverse=True\n )\n ]\n\n energy_per_mass = total_energy.divide(\n (raw_isotope_abundance * shell_masses).T.to_numpy(),\n axis=0,\n )\n\n # Time averaged energy per mass for constant packet count\n average_power_per_mass = energy_per_mass / (time_end - time_start)\n\n energy_per_mass_norm = energy_per_mass.divide(\n energy_per_mass.sum(axis=1), axis=0\n ) # .cumsum(axis=1)\n\n decayed_packet_count = num_decays * number_of_isotopes.divide(\n total_number_isotopes, axis=0\n )\n\n packets_per_isotope = (\n (energy_per_mass_norm * decayed_packet_count.T.values)\n .round()\n .fillna(0)\n .astype(int)\n )\n\n print(\"Total gamma-ray energy\")\n print(total_energy.sum().sum() * u.keV.to(\"erg\"))\n\n print(\"Total positron energy\")\n print(total_energy[\"Co-56\"].sum(axis=0) * 0.0337 * u.keV.to(\"erg\"))\n\n # Taking iron group to be elements 21-30\n # Used as part of the approximations for photoabsorption and pair creation\n # Dependent on atomic data\n iron_group_fraction_per_shell = model.abundance.loc[(21):(30)].sum(axis=0)\n\n number_of_packets = packets_per_isotope.sum().sum()\n print(\"Total packets:\", number_of_packets)\n\n packet_energy = total_energy.sum().sum() / number_of_packets\n\n print(\"Energy per packet\", packet_energy)\n\n # Need to update volume for positron deposition to be time-dependent\n print(\"Initializing packets\")\n (\n packets,\n energy_df_rows,\n energy_plot_df_rows,\n energy_plot_positron_rows,\n ) = initialize_packets(\n packets_per_isotope,\n packet_energy,\n gamma_ray_line_arrays,\n positronium_fraction,\n inner_velocities,\n outer_velocities,\n inv_volume_time,\n times,\n energy_df_rows,\n effective_time_array,\n taus,\n parents,\n average_positron_energies,\n inventories,\n average_power_per_mass,\n )\n\n print(\"Total positron energy from packets\")\n print((energy_df_rows).sum().sum() * u.eV.to(\"erg\"))\n\n total_cmf_energy = 0\n total_rf_energy = 0\n\n for p in packets:\n total_cmf_energy += p.energy_cmf\n total_rf_energy += p.energy_rf\n\n print(\"Total CMF energy\")\n print(total_cmf_energy)\n\n # Below is the Artis compensation for their method of packet rejection\n \"\"\"\n energy_ratio = total_energy.sum().sum() / total_cmf_energy\n\n print(\"Energy ratio\")\n print(energy_ratio)\n \n for p in packets:\n p.energy_cmf *= energy_ratio\n p.energy_rf *= energy_ratio\n\n for e in energy_df_rows:\n e *= energy_ratio\n \n for row in energy_plot_df_rows:\n row[1] *= energy_ratio\n \"\"\"\n print(\"Total RF energy\")\n print(total_rf_energy)\n\n energy_bins = np.logspace(2, 3.8, spectrum_bins)\n energy_out = np.zeros((len(energy_bins - 1), time_steps))\n\n # Process packets\n (\n energy_df_rows,\n energy_plot_df_rows,\n energy_out,\n deposition_estimator,\n ) = gamma_packet_loop(\n packets,\n grey_opacity,\n photoabsorption_opacity,\n pair_creation_opacity,\n electron_number_density_time,\n mass_density_time,\n inv_volume_time,\n iron_group_fraction_per_shell.to_numpy(),\n inner_velocities,\n outer_velocities,\n times,\n dt_array,\n effective_time_array,\n energy_bins,\n energy_df_rows,\n energy_plot_df_rows,\n energy_out,\n )\n\n # DataFrame of energy information\n energy_plot_df = pd.DataFrame(\n data=energy_plot_df_rows,\n columns=[\n \"packet_index\",\n \"energy_input\",\n \"energy_input_r\",\n \"energy_input_time\",\n \"energy_input_type\",\n \"compton_opacity\",\n \"photoabsorption_opacity\",\n \"total_opacity\",\n ],\n )\n\n # DataFrame of positron energies\n energy_plot_positrons = pd.DataFrame(\n data=energy_plot_positron_rows,\n columns=[\n \"packet_index\",\n \"energy_input\",\n \"energy_input_r\",\n \"energy_input_time\",\n ],\n )\n\n # DataFrame of estimated deposition\n # Multiply dataframes by inv_volume_time array\n # if per unit volume is needed\n energy_estimated_deposition = (\n pd.DataFrame(data=deposition_estimator, columns=times[:-1])\n ) / dt_array\n\n # Energy is eV/s\n energy_df = pd.DataFrame(data=energy_df_rows, columns=times[:-1]) / dt_array\n\n final_energy = 0\n for p in packets:\n final_energy += p.energy_rf\n\n print(\"Final energy to test for conservation\")\n print(final_energy)\n\n escape_energy = pd.DataFrame(\n data=energy_out, columns=times[:-1], index=energy_bins\n )\n\n return (\n energy_df,\n energy_plot_df,\n escape_energy,\n decayed_packet_count,\n energy_plot_positrons,\n energy_estimated_deposition,\n )", "def force_12(alpha, r1, r2, R_particle, eps_particle, k, eps_out, eps_in,\n fiber_radius, nmin, nmax, kzimax, E0_mod, nmin_sc, nmax_sc, case):\n\n dr = 1 / k * 1e-5\n dz = dr\n dtheta = 1e-5\n\n p1 = dipole_moment(1, r1, r2, R_particle, eps_particle, k, eps_out, eps_in,\n fiber_radius, nmin, nmax, kzimax,\n E0_mod, nmin_sc, nmax_sc, case)\n p1c = p1.conjugate()\n\n # Fr\n if alpha == 0:\n r1plusdr = r1 + np.array([dr, 0, 0])\n r1minusdr = r1 - np.array([dr, 0, 0])\n Eplusr = total_loc_efield(1, r1plusdr, r2, k, case, nmin, nmax, kzimax,\n fiber_radius, eps_out, eps_in, E0_mod,\n nmin_sc, nmax_sc, R_particle, eps_particle)\n Eminusr = total_loc_efield(1, r1minusdr, r2, k, case, nmin, nmax, kzimax,\n fiber_radius, eps_out, eps_in, E0_mod,\n nmin_sc, nmax_sc, R_particle, eps_particle)\n grad_r = (Eplusr - Eminusr) / (2 * dr)\n\n return(0.5 * np.dot(p1c, grad_r).real)\n # Ftheta\n elif alpha == 1:\n r1plusdtheta = r1 + np.array([0, dtheta, 0])\n r1minusdtheta = r1 - np.array([0, dtheta, 0])\n\n Eplustheta = total_loc_efield(1, r1plusdtheta, r2, k, case, nmin, nmax, kzimax,\n fiber_radius, eps_out, eps_in, E0_mod,\n nmin_sc, nmax_sc, R_particle, eps_particle)\n Eminustheta = total_loc_efield(1, r1minusdtheta, r2, k, case, nmin, nmax, kzimax,\n fiber_radius, eps_out, eps_in, E0_mod,\n nmin_sc, nmax_sc, R_particle, eps_particle)\n grad_theta = (Eplustheta - Eminustheta) / (r1[0] * 2 * dtheta)\n\n return(0.5 * np.dot(p1c, grad_theta).real)\n # Fz\n elif alpha == 2:\n r1plusdz = r1 + np.array([0, 0, dz])\n r1minusdz = r1 - np.array([0, 0, dz])\n\n Eplusz = total_loc_efield(1, r1plusdz, r2, k, case, nmin, nmax, kzimax,\n fiber_radius, eps_out, eps_in, E0_mod,\n nmin_sc, nmax_sc, R_particle, eps_particle)\n Eminusz = total_loc_efield(1, r1minusdz, r2, k, case, nmin, nmax, kzimax,\n fiber_radius, eps_out, eps_in, E0_mod,\n nmin_sc, nmax_sc, R_particle, eps_particle)\n grad_z = (Eplusz - Eminusz) / (2 * dz)\n\n return(0.5 * np.dot(p1c, grad_z).real)\n else:\n print('alpha is out of range!')\n return(0)", "def compute_desired_velocity(self):\n mask_red = (self.image_red == 255) \\\n *(self.image_green == 0) \\\n *(self.image_blue == 0)\n ind_red = sp.where( mask_red )\n phi = sp.ones(self.image_red.shape)\n phi[ind_red] = 0\n phi = sp.ma.MaskedArray(phi, mask=self.mask)\n numpy.set_printoptions(threshold=sys.maxsize)\n self.door_distance = skfmm.distance(phi, dx=self.pixel_size)\n tmp_dist = self.door_distance.filled(9999)\n grad = sp.gradient(tmp_dist,edge_order=2)\n grad_X = -grad[1]/self.pixel_size\n grad_Y = -grad[0]/self.pixel_size\n norm = sp.sqrt(grad_X**2+grad_Y**2)\n norm = (norm>0)*norm+(norm==0)*0.001\n self.desired_velocity_X = self.vmax * (grad_X/norm)\n self.desired_velocity_Y = self.vmax * (grad_Y/norm)\n '''plt.subplot(1,2,1)\n plt.imshow(self.desired_velocity_X, cmap='hot', interpolation='nearest')\n plt.gca().invert_yaxis()\n plt.colorbar()\n plt.subplot(1,2,2)\n plt.imshow(self.desired_velocity_X, cmap='hot', interpolation='nearest')\n plt.gca().invert_yaxis()\n plt.colorbar()\n plt.show()'''\n return self.door_distance, self.desired_velocity_X, self.desired_velocity_Y", "def calculate_pressure_layers(P_surface = 100000,P_Cutoff = 0.00001):\n layers = np.ceil(-np.log(P_Cutoff/P_surface)) \n return [float(\"%.3g\"%x) for x in np.exp(-np.arange(layers))*P_surface]", "def calculate_muscl_fluxes(densities, pressures, velocities, gamma,\n mass_ratios, specific_heats, molar_masses, dt_over_dx):\n # Get half step densities\n limiter = UltraBeeLimiter()\n half_step_densities_L = np.zeros(len(densities) - 2)\n half_step_velocities_L = np.zeros(half_step_densities_L.shape)\n half_step_pressures_L = np.zeros(half_step_densities_L.shape)\n half_step_mass_ratios_L = np.zeros((len(densities) - 2, len(specific_heats)))\n half_step_densities_R = np.zeros(half_step_densities_L.shape)\n half_step_velocities_R = np.zeros(half_step_densities_L.shape)\n half_step_pressures_R = np.zeros(half_step_densities_L.shape)\n half_step_mass_ratios_R = np.zeros(half_step_mass_ratios_L.shape)\n for i, dens in enumerate(half_step_densities_L):\n idx = i + 1\n\n # Calculate slopes\n left_slopes = dict()\n left_slopes[\"rho\"] = (densities[idx] - densities[idx - 1]) / 2\n left_slopes[\"mom\"] = (densities[idx] * velocities[idx] - densities[idx - 1] * velocities[idx - 1]) / 2\n cell_energy = 0.5 * densities[idx] * velocities[idx] * velocities[idx] + pressures[idx] / (gamma[idx] - 1)\n behind_energy = 0.5 * densities[idx - 1] * velocities[idx - 1] * velocities[idx - 1] + pressures[idx - 1] / (gamma[idx - 1] - 1)\n left_slopes[\"energy\"] = (cell_energy - behind_energy) / 2\n\n right_slopes = dict()\n right_slopes[\"rho\"] = (densities[idx + 1] - densities[idx]) / 2\n right_slopes[\"mom\"] = (densities[idx + 1] * velocities[idx + 1] - densities[idx] * velocities[idx]) / 2\n forward_energy = 0.5 * densities[idx + 1] * velocities[idx + 1] * velocities[idx + 1] + pressures[idx + 1] / (gamma[idx + 1] - 1)\n right_slopes[\"energy\"] = (forward_energy - cell_energy) / 2\n\n average_density_slope, average_momentum_slope, average_energy_slope = limiter.calculate_limited_slopes(left_slopes, right_slopes)\n\n # Interpolate left and right densities\n left_density = densities[idx] - average_density_slope\n left_momentum = densities[idx] * velocities[idx] - average_momentum_slope\n left_energy = cell_energy - average_energy_slope\n left_mass_ratios = mass_ratios[idx, :]\n assert left_density > 0, left_density\n assert left_energy > 0, left_energy\n assert np.isclose(1.0, left_mass_ratios.sum(), 1e-14)\n\n right_density = densities[idx] + average_density_slope\n right_momentum = densities[idx] * velocities[idx] + average_momentum_slope\n right_energy = cell_energy + average_energy_slope\n right_mass_ratios = mass_ratios[idx, :]\n assert right_density > 0, right_density\n assert right_energy > 0, right_energy\n assert np.isclose(1.0, right_mass_ratios.sum(), 1e-14)\n\n # Perform half step flux\n left_velocity = left_momentum / left_density\n left_density_flux = left_momentum\n left_internal_energy = left_energy - 0.5 * left_momentum * left_velocity\n left_pressure = left_internal_energy * (gamma[idx] - 1)\n left_momentum_flux = left_momentum * left_velocity + left_pressure\n left_energy_flux = (left_energy + left_pressure) * left_velocity\n\n right_velocity = right_momentum / right_density\n right_density_flux = right_momentum\n right_internal_energy = right_energy - 0.5 * right_momentum * right_velocity\n right_pressure = right_internal_energy * (gamma[idx] - 1)\n right_momentum_flux = right_momentum * right_velocity + right_pressure\n right_energy_flux = (right_energy + right_pressure) * right_velocity\n\n half_step_density_flux = (left_density_flux - right_density_flux) * dt_over_dx * 0.5\n half_step_momentum_flux = (left_momentum_flux - right_momentum_flux) * dt_over_dx * 0.5\n half_step_energy_flux = (left_energy_flux - right_energy_flux) * dt_over_dx * 0.5\n\n state = ThermodynamicState1D(left_pressure, left_density, left_velocity, gamma[idx], left_mass_ratios)\n state.update_states(half_step_density_flux,\n half_step_momentum_flux,\n half_step_energy_flux,\n specific_heats, molar_masses)\n half_step_densities_L[i] = state.rho\n half_step_velocities_L[i] = state.u\n half_step_pressures_L[i] = state.p\n half_step_mass_ratios_L[i, :] = state.mass_ratios\n\n state = ThermodynamicState1D(right_pressure, right_density, right_velocity, gamma[idx], right_mass_ratios)\n state.update_states(half_step_density_flux,\n half_step_momentum_flux,\n half_step_energy_flux,\n specific_heats, molar_masses)\n half_step_densities_R[i] = state.rho\n half_step_velocities_R[i] = state.u\n half_step_pressures_R[i] = state.p\n half_step_mass_ratios_R[i, :] = state.mass_ratios\n\n # Calculate final fluxes\n density_fluxes = np.zeros(len(half_step_densities_R) - 1)\n momentum_fluxes = np.zeros(len(half_step_densities_R) - 1)\n total_energy_fluxes = np.zeros(len(half_step_densities_R) - 1)\n mass_ratio_fluxes = np.zeros((len(half_step_densities_R) - 1, mass_ratios.shape[1]))\n\n for i, dens_flux in enumerate(density_fluxes):\n solver = IterativeRiemannSolver()\n\n # Generate left and right states from cell averaged values\n left_state = ThermodynamicState1D(half_step_pressures_R[i],\n half_step_densities_R[i],\n half_step_velocities_R[i],\n gamma[i],\n half_step_mass_ratios_L[i, :])\n right_state = ThermodynamicState1D(half_step_pressures_L[i + 1],\n half_step_densities_L[i + 1],\n half_step_velocities_L[i + 1],\n gamma[i + 1],\n half_step_mass_ratios_R[i + 1, :])\n\n # Solve Riemann problem for star states\n p_star, u_star = solver.get_star_states(left_state, right_state)\n\n # Calculate fluxes using solver sample function\n p_flux, u_flux, rho_flux, is_left = solver.sample(0.0, left_state, right_state, p_star, u_star)\n\n # Store fluxes in array\n mass_ratio_fluxes[i, :] = left_state.mass_ratios if is_left else right_state.mass_ratios\n flux_gamma = left_state.gamma if is_left else right_state.gamma\n density_fluxes[i] = rho_flux * u_flux\n momentum_fluxes[i] = rho_flux * u_flux * u_flux + p_flux\n e_tot = p_flux / (flux_gamma - 1) + 0.5 * rho_flux * u_flux * u_flux\n total_energy_fluxes[i] = (p_flux + e_tot) * u_flux\n\n return density_fluxes, momentum_fluxes, total_energy_fluxes, mass_ratio_fluxes", "def compare_averages_shell_pspec_dft():\n\n select_radius = 5. #degrees\n\n Nside=256\n Npix = 12 * Nside**2\n Omega = 4*np.pi/float(Npix)\n\n Nfreq = 100\n freqs = np.linspace(167.0, 177.0, Nfreq)\n dnu = np.diff(freqs)[0]\n Z = 1420/freqs - 1.\n\n sig = 2.0\n mu = 0.0\n shell = np.random.normal(mu, sig, (Npix, Nfreq))\n\n dV = comoving_voxel_volume(Z[Nfreq/2], dnu, Omega)\n variances = []\n means = []\n pks = []\n\n gs = gridspec.GridSpec(2, 3)\n fig = pl.figure()\n\n ax0 = pl.subplot(gs[0, 0:2])\n ax1 = pl.subplot(gs[1, 0])\n ax3 = pl.subplot(gs[1, 1])\n ax2 = pl.subplot(gs[:, 2])\n\n steps = range(10,110,10)\n vmin,vmax = min(steps),max(steps)\n normalize = mcolors.Normalize(vmin=vmin, vmax=vmax)\n colormap = cm.viridis\n\n for n in steps:\n Nkbins = 100\n kbins, pk = pspec_funcs.shell_project_pspec(shell, Nside, select_radius, freqs=freqs, Nkbins=Nkbins, N_sections=n, cosmo=True, method='dft', error=False)\n variances.append(np.var(pk[0:Nkbins-5]))\n means.append(np.mean(pk[0:Nkbins-5]))\n pks.append(pk)\n ax0.plot(kbins, pk, label=str(n), color=colormap(normalize(n)))\n\n ax0.axhline(y=dV*sig**2, color='k', lw=2.0)\n# ax0.legend()\n scalarmappable = cm.ScalarMappable(norm=normalize, cmap=colormap)\n scalarmappable.set_array(steps)\n fig.colorbar(scalarmappable,label=r'Number of snapshots', ax=ax0)\n ax0.set_ylabel(r\"P(k) [mK$^2$ Mpc$^{3}]$\")\n ax0.set_xlabel(r\"k [Mpc$^{-1}]$\")\n ax1.plot(steps, np.array(variances), label=\"Variance\")\n ax1.set_ylabel(r\"Variance(P(k)) [mK$^4$ Mpc$^{6}]$\")\n ax1.set_xlabel(u\"Number of 5° snapshots\")\n ax3.plot(steps, means, label=\"Mean\")\n ax3.set_ylabel(r\"Mean(P(k)) [mK$^2$ Mpc$^{3}]$\")\n ax3.set_xlabel(u\"Number of 5° snapshots\")\n ax1.legend()\n ax3.legend()\n im = ax2.imshow(np.array(pks)[:,0:Nkbins-5], aspect='auto')#, norm=mcolors.LogNorm())\n fig.colorbar(im, ax=ax2)\n print('Fractional deviation: ', np.mean(np.abs(pk - dV*sig**2)))\n pl.show()", "def get_variables(self, z0, u_inf):\n # Get the ambient data from the CTD profile\n Ta, Sa, P = self.profile.get_values(z0, ['temperature', 'salinity',\n 'pressure'])\n rho = seawater.density(Ta, Sa, P)\n \n # Compute the properties of each dispersed-phase particle\n us = np.zeros(len(self.particles))\n rho_p = np.zeros(len(self.particles))\n m_p = np.zeros(len(self.particles))\n B_p = np.zeros(len(self.particles))\n for i in range(len(self.particles)):\n m0 = self.particles[i].m0\n T0 = self.particles[i].T0\n m_p[i] = np.sum(m0) * self.particles[i].nb0\n if m_p[i] > 0.:\n # Particles exist, get properties. Make sure the algorithm \n # uses the dirty bubble properties since this is supposed\n # to be the rise velocity averaged over the whole plume.\n us[i], rho_p[i]= self.particles[i].properties(m0, T0, P, Sa, \n Ta, np.inf)[0:2]\n B_p[i] = (rho - rho_p[i]) / rho * 9.81 * (m_p[i] / rho_p[i])\n else:\n # Particles dissolved, set to ambient conditions\n us[i] = 0.\n rho_p[i] = rho\n B_p[i] = 0.\n \n # Select the correct slip velocity\n u_slip = us[0]\n for i in range(len(self.particles) - 1):\n if B_p[i+1] > B_p[i]:\n u_slip = us[i+1]\n \n # Compute the total buoyancy flux\n B = np.sum(B_p)\n \n # Get the ambient buoyancy frequency\n N = self.profile.buoyancy_frequency(z0)\n \n # Return the governing parameters\n return (B, N, u_slip, u_inf)", "def get_effective_mass():\n\n H_BAR = 6.582119514e-16 # eV*s\n M_0 = 9.10938356e-31 # kg\n N_KPTS = 6 # Number of k-points included in the parabola.\n\n spin_up = Spin(1)\n\n band_structure = Vasprun('vasprun.xml').get_band_structure()\n\n # Locations of CBM and VBM in band_structure.bands\n cbm_band_index = band_structure.get_cbm()['band_index'][spin_up][0]\n cbm_kpoint_index = band_structure.get_cbm()['kpoint_index'][0]\n\n vbm_band_index = band_structure.get_vbm()['band_index'][spin_up][0]\n vbm_kpoint_index = band_structure.get_vbm()['kpoint_index'][0]\n\n k = {'electron': {'left': [], 'right': []},\n 'hole': {'left': [], 'right': []}}\n E = {'electron': {'left': [], 'right': []},\n 'hole': {'left': [], 'right': []}}\n\n e_ref_coords = band_structure.kpoints[cbm_kpoint_index]._ccoords\n h_ref_coords = band_structure.kpoints[vbm_kpoint_index]._ccoords\n\n for n in range(-N_KPTS, 1):\n e_coords = band_structure.kpoints[cbm_kpoint_index + n]._ccoords\n h_coords = band_structure.kpoints[vbm_kpoint_index + n]._ccoords\n\n k['electron']['left'].append(\n ((e_coords[0] - e_ref_coords[0])**2 +\n (e_coords[1] - e_ref_coords[1])**2 +\n (e_coords[2] - e_ref_coords[2])**2)**0.5\n )\n k['hole']['left'].append(\n ((h_coords[0] - h_ref_coords[0])**2 +\n (h_coords[1] - h_ref_coords[1])**2 +\n (h_coords[2] - h_ref_coords[2])**2)**0.5\n )\n\n e_energy = band_structure.bands[\n spin_up][cbm_band_index][cbm_kpoint_index + n]\n h_energy = band_structure.bands[\n spin_up][vbm_band_index][vbm_kpoint_index + n]\n\n E['electron']['left'].append(e_energy)\n E['hole']['left'].append(h_energy)\n\n for n in range(1, 1 + N_KPTS):\n e_coords = band_structure.kpoints[cbm_kpoint_index + n]._ccoords\n h_coords = band_structure.kpoints[vbm_kpoint_index + n]._ccoords\n\n k['electron']['right'].append(\n ((e_coords[0] - e_ref_coords[0])**2 +\n (e_coords[1] - e_ref_coords[1])**2 +\n (e_coords[2] - e_ref_coords[2])**2)**0.5\n )\n k['hole']['right'].append(\n ((h_coords[0] - h_ref_coords[0])**2 +\n (h_coords[1] - h_ref_coords[1])**2 +\n (h_coords[2] - h_ref_coords[2])**2)**0.5\n )\n\n e_energy = band_structure.bands[\n spin_up][cbm_band_index][cbm_kpoint_index + n]\n h_energy = band_structure.bands[\n spin_up][vbm_band_index][vbm_kpoint_index + n]\n\n E['electron']['right'].append(e_energy)\n E['hole']['right'].append(h_energy)\n\n # 2nd order fits\n e_l_fit = np.poly1d(\n np.polyfit(k['electron']['left'], E['electron']['left'], 2))\n e_r_fit = np.poly1d(\n np.polyfit(k['electron']['right'], E['electron']['right'], 2))\n h_l_fit = np.poly1d(\n np.polyfit(k['hole']['left'], E['hole']['left'], 2))\n h_r_fit = np.poly1d(\n np.polyfit(k['hole']['right'], E['hole']['right'], 2))\n\n # Curvatures\n e_l_curvature = e_l_fit.deriv().deriv()[0]\n e_r_curvature = e_r_fit.deriv().deriv()[0]\n h_l_curvature = h_l_fit.deriv().deriv()[0]\n h_r_curvature = h_r_fit.deriv().deriv()[0]\n\n # Unit conversion\n e_m_eff_l = 10 * ((H_BAR ** 2) / e_l_curvature) / M_0\n e_m_eff_r = 10 * ((H_BAR ** 2) / e_r_curvature) / M_0\n h_m_eff_l = -10 * ((H_BAR ** 2) / h_l_curvature) / M_0\n h_m_eff_r = -10 * ((H_BAR ** 2) / h_r_curvature) / M_0\n\n return {'electron': {'left': e_m_eff_l, 'right': e_m_eff_r},\n 'hole': {'left': h_m_eff_l, 'right': h_m_eff_r}}", "def evolve(self):\n\n rho = self.cc_data.get_var(\"density\")\n u = self.cc_data.get_var(\"x-velocity\")\n v = self.cc_data.get_var(\"y-velocity\")\n\n gradp_x = self.cc_data.get_var(\"gradp_x\")\n gradp_y = self.cc_data.get_var(\"gradp_y\")\n\n # note: the base state quantities do not have valid ghost cells\n beta0 = self.base[\"beta0\"]\n beta0_edges = self.base[\"beta0-edges\"]\n\n rho0 = self.base[\"rho0\"]\n\n phi = self.cc_data.get_var(\"phi\")\n\n myg = self.cc_data.grid\n\n # ---------------------------------------------------------------------\n # create the limited slopes of rho, u and v (in both directions)\n # ---------------------------------------------------------------------\n limiter = self.rp.get_param(\"lm-atmosphere.limiter\")\n\n ldelta_rx = reconstruction.limit(rho, myg, 1, limiter)\n ldelta_ux = reconstruction.limit(u, myg, 1, limiter)\n ldelta_vx = reconstruction.limit(v, myg, 1, limiter)\n\n ldelta_ry = reconstruction.limit(rho, myg, 2, limiter)\n ldelta_uy = reconstruction.limit(u, myg, 2, limiter)\n ldelta_vy = reconstruction.limit(v, myg, 2, limiter)\n\n # ---------------------------------------------------------------------\n # get the advective velocities\n # ---------------------------------------------------------------------\n\n \"\"\"\n the advective velocities are the normal velocity through each cell\n interface, and are defined on the cell edges, in a MAC type\n staggered form\n\n n+1/2\n v\n i,j+1/2\n +------+------+\n | |\n n+1/2 | | n+1/2\n u + U + u\n i-1/2,j | i,j | i+1/2,j\n | |\n +------+------+\n n+1/2\n v\n i,j-1/2\n\n \"\"\"\n\n # this returns u on x-interfaces and v on y-interfaces. These\n # constitute the MAC grid\n if self.verbose > 0:\n print(\" making MAC velocities\")\n\n # create the coefficient to the grad (pi/beta) term\n coeff = self.aux_data.get_var(\"coeff\")\n coeff.v()[:, :] = 1.0/rho.v()\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n self.aux_data.fill_BC(\"coeff\")\n\n # create the source term\n source = self.aux_data.get_var(\"source_y\")\n\n g = self.rp.get_param(\"lm-atmosphere.grav\")\n rhoprime = self.make_prime(rho, rho0)\n source.v()[:, :] = rhoprime.v()*g/rho.v()\n self.aux_data.fill_BC(\"source_y\")\n\n _um, _vm = lm_interface.mac_vels(myg.ng, myg.dx, myg.dy, self.dt,\n u, v,\n ldelta_ux, ldelta_vx,\n ldelta_uy, ldelta_vy,\n coeff*gradp_x, coeff*gradp_y,\n source)\n\n u_MAC = ai.ArrayIndexer(d=_um, grid=myg)\n v_MAC = ai.ArrayIndexer(d=_vm, grid=myg)\n\n # ---------------------------------------------------------------------\n # do a MAC projection to make the advective velocities divergence\n # free\n # ---------------------------------------------------------------------\n\n # we will solve D (beta_0^2/rho) G phi = D (beta_0 U^MAC), where\n # phi is cell centered, and U^MAC is the MAC-type staggered\n # grid of the advective velocities.\n\n if self.verbose > 0:\n print(\" MAC projection\")\n\n # create the coefficient array: beta0**2/rho\n # MZ!!!! probably don't need the buf here\n coeff.v(buf=1)[:, :] = 1.0/rho.v(buf=1)\n coeff.v(buf=1)[:, :] = coeff.v(buf=1)*beta0.v2d(buf=1)**2\n\n # create the multigrid object\n mg = vcMG.VarCoeffCCMG2d(myg.nx, myg.ny,\n xl_BC_type=self.cc_data.BCs[\"phi-MAC\"].xlb,\n xr_BC_type=self.cc_data.BCs[\"phi-MAC\"].xrb,\n yl_BC_type=self.cc_data.BCs[\"phi-MAC\"].ylb,\n yr_BC_type=self.cc_data.BCs[\"phi-MAC\"].yrb,\n xmin=myg.xmin, xmax=myg.xmax,\n ymin=myg.ymin, ymax=myg.ymax,\n coeffs=coeff,\n coeffs_bc=self.cc_data.BCs[\"density\"],\n verbose=0)\n\n # first compute div{beta_0 U}\n div_beta_U = mg.soln_grid.scratch_array()\n\n # MAC velocities are edge-centered. div{beta_0 U} is cell-centered.\n div_beta_U.v()[:, :] = \\\n beta0.v2d()*(u_MAC.ip(1) - u_MAC.v())/myg.dx + \\\n (beta0_edges.v2dp(1)*v_MAC.jp(1) -\n beta0_edges.v2d()*v_MAC.v())/myg.dy\n\n # solve the Poisson problem\n mg.init_RHS(div_beta_U)\n mg.solve(rtol=1.e-12)\n\n # update the normal velocities with the pressure gradient -- these\n # constitute our advective velocities. Note that what we actually\n # solved for here is phi/beta_0\n phi_MAC = self.cc_data.get_var(\"phi-MAC\")\n phi_MAC[:, :] = mg.get_solution(grid=myg)\n\n coeff = self.aux_data.get_var(\"coeff\")\n coeff.v()[:, :] = 1.0/rho.v()\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n self.aux_data.fill_BC(\"coeff\")\n\n coeff_x = myg.scratch_array()\n b = (3, 1, 0, 0) # this seems more than we need\n coeff_x.v(buf=b)[:, :] = 0.5*(coeff.ip(-1, buf=b) + coeff.v(buf=b))\n\n coeff_y = myg.scratch_array()\n b = (0, 0, 3, 1)\n coeff_y.v(buf=b)[:, :] = 0.5*(coeff.jp(-1, buf=b) + coeff.v(buf=b))\n\n # we need the MAC velocities on all edges of the computational domain\n # here we do U = U - (beta_0/rho) grad (phi/beta_0)\n b = (0, 1, 0, 0)\n u_MAC.v(buf=b)[:, :] -= \\\n coeff_x.v(buf=b)*(phi_MAC.v(buf=b) - phi_MAC.ip(-1, buf=b))/myg.dx\n\n b = (0, 0, 0, 1)\n v_MAC.v(buf=b)[:, :] -= \\\n coeff_y.v(buf=b)*(phi_MAC.v(buf=b) - phi_MAC.jp(-1, buf=b))/myg.dy\n\n # ---------------------------------------------------------------------\n # predict rho to the edges and do its conservative update\n # ---------------------------------------------------------------------\n _rx, _ry = lm_interface.rho_states(myg.ng, myg.dx, myg.dy, self.dt,\n rho, u_MAC, v_MAC,\n ldelta_rx, ldelta_ry)\n\n rho_xint = ai.ArrayIndexer(d=_rx, grid=myg)\n rho_yint = ai.ArrayIndexer(d=_ry, grid=myg)\n\n rho_old = rho.copy()\n\n rho.v()[:, :] -= self.dt*(\n # (rho u)_x\n (rho_xint.ip(1)*u_MAC.ip(1) - rho_xint.v()*u_MAC.v())/myg.dx +\n # (rho v)_y\n (rho_yint.jp(1)*v_MAC.jp(1) - rho_yint.v()*v_MAC.v())/myg.dy)\n\n self.cc_data.fill_BC(\"density\")\n\n # update eint as a diagnostic\n eint = self.cc_data.get_var(\"eint\")\n gamma = self.rp.get_param(\"eos.gamma\")\n eint.v()[:, :] = self.base[\"p0\"].v2d()/(gamma - 1.0)/rho.v()\n\n # ---------------------------------------------------------------------\n # recompute the interface states, using the advective velocity\n # from above\n # ---------------------------------------------------------------------\n if self.verbose > 0:\n print(\" making u, v edge states\")\n\n coeff = self.aux_data.get_var(\"coeff\")\n coeff.v()[:, :] = 2.0/(rho.v() + rho_old.v())\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n self.aux_data.fill_BC(\"coeff\")\n\n _ux, _vx, _uy, _vy = \\\n lm_interface.states(myg.ng, myg.dx, myg.dy, self.dt,\n u, v,\n ldelta_ux, ldelta_vx,\n ldelta_uy, ldelta_vy,\n coeff*gradp_x, coeff*gradp_y,\n source,\n u_MAC, v_MAC)\n\n u_xint = ai.ArrayIndexer(d=_ux, grid=myg)\n v_xint = ai.ArrayIndexer(d=_vx, grid=myg)\n u_yint = ai.ArrayIndexer(d=_uy, grid=myg)\n v_yint = ai.ArrayIndexer(d=_vy, grid=myg)\n\n # ---------------------------------------------------------------------\n # update U to get the provisional velocity field\n # ---------------------------------------------------------------------\n if self.verbose > 0:\n print(\" doing provisional update of u, v\")\n\n # compute (U.grad)U\n\n # we want u_MAC U_x + v_MAC U_y\n advect_x = myg.scratch_array()\n advect_y = myg.scratch_array()\n\n advect_x.v()[:, :] = \\\n 0.5*(u_MAC.v() + u_MAC.ip(1))*(u_xint.ip(1) - u_xint.v())/myg.dx +\\\n 0.5*(v_MAC.v() + v_MAC.jp(1))*(u_yint.jp(1) - u_yint.v())/myg.dy\n\n advect_y.v()[:, :] = \\\n 0.5*(u_MAC.v() + u_MAC.ip(1))*(v_xint.ip(1) - v_xint.v())/myg.dx +\\\n 0.5*(v_MAC.v() + v_MAC.jp(1))*(v_yint.jp(1) - v_yint.v())/myg.dy\n\n proj_type = self.rp.get_param(\"lm-atmosphere.proj_type\")\n\n if proj_type == 1:\n u.v()[:, :] -= (self.dt*advect_x.v() + self.dt*gradp_x.v())\n v.v()[:, :] -= (self.dt*advect_y.v() + self.dt*gradp_y.v())\n\n elif proj_type == 2:\n u.v()[:, :] -= self.dt*advect_x.v()\n v.v()[:, :] -= self.dt*advect_y.v()\n\n # add the gravitational source\n rho_half = 0.5*(rho + rho_old)\n rhoprime = self.make_prime(rho_half, rho0)\n source[:, :] = rhoprime*g/rho_half\n self.aux_data.fill_BC(\"source_y\")\n\n v[:, :] += self.dt*source\n\n self.cc_data.fill_BC(\"x-velocity\")\n self.cc_data.fill_BC(\"y-velocity\")\n\n if self.verbose > 0:\n print(\"min/max rho = {}, {}\".format(self.cc_data.min(\"density\"), self.cc_data.max(\"density\")))\n print(\"min/max u = {}, {}\".format(self.cc_data.min(\"x-velocity\"), self.cc_data.max(\"x-velocity\")))\n print(\"min/max v = {}, {}\".format(self.cc_data.min(\"y-velocity\"), self.cc_data.max(\"y-velocity\")))\n\n # ---------------------------------------------------------------------\n # project the final velocity\n # ---------------------------------------------------------------------\n\n # now we solve L phi = D (U* /dt)\n if self.verbose > 0:\n print(\" final projection\")\n\n # create the coefficient array: beta0**2/rho\n coeff = 1.0/rho\n coeff.v()[:, :] = coeff.v()*beta0.v2d()**2\n\n # create the multigrid object\n mg = vcMG.VarCoeffCCMG2d(myg.nx, myg.ny,\n xl_BC_type=self.cc_data.BCs[\"phi\"].xlb,\n xr_BC_type=self.cc_data.BCs[\"phi\"].xrb,\n yl_BC_type=self.cc_data.BCs[\"phi\"].ylb,\n yr_BC_type=self.cc_data.BCs[\"phi\"].yrb,\n xmin=myg.xmin, xmax=myg.xmax,\n ymin=myg.ymin, ymax=myg.ymax,\n coeffs=coeff,\n coeffs_bc=self.cc_data.BCs[\"density\"],\n verbose=0)\n\n # first compute div{beta_0 U}\n\n # u/v are cell-centered, divU is cell-centered\n div_beta_U.v()[:, :] = \\\n 0.5*beta0.v2d()*(u.ip(1) - u.ip(-1))/myg.dx + \\\n 0.5*(beta0.v2dp(1)*v.jp(1) - beta0.v2dp(-1)*v.jp(-1))/myg.dy\n\n mg.init_RHS(div_beta_U/self.dt)\n\n # use the old phi as our initial guess\n phiGuess = mg.soln_grid.scratch_array()\n phiGuess.v(buf=1)[:, :] = phi.v(buf=1)\n mg.init_solution(phiGuess)\n\n # solve\n mg.solve(rtol=1.e-12)\n\n # store the solution in our self.cc_data object -- include a single\n # ghostcell\n phi[:, :] = mg.get_solution(grid=myg)\n\n # get the cell-centered gradient of p and update the velocities\n # this differs depending on what we projected.\n gradphi_x, gradphi_y = mg.get_solution_gradient(grid=myg)\n\n # U = U - (beta_0/rho) grad (phi/beta_0)\n coeff = 1.0/rho\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n\n u.v()[:, :] -= self.dt*coeff.v()*gradphi_x.v()\n v.v()[:, :] -= self.dt*coeff.v()*gradphi_y.v()\n\n # store gradp for the next step\n\n if proj_type == 1:\n gradp_x.v()[:, :] += gradphi_x.v()\n gradp_y.v()[:, :] += gradphi_y.v()\n\n elif proj_type == 2:\n gradp_x.v()[:, :] = gradphi_x.v()\n gradp_y.v()[:, :] = gradphi_y.v()\n\n self.cc_data.fill_BC(\"x-velocity\")\n self.cc_data.fill_BC(\"y-velocity\")\n\n self.cc_data.fill_BC(\"gradp_x\")\n self.cc_data.fill_BC(\"gradp_y\")\n\n # increment the time\n if not self.in_preevolve:\n self.cc_data.t += self.dt\n self.n += 1", "def wfc3Dispersion(xc, yc, subarray=256):\n coord0 = (1014 - subarray) // 2\n xc = xc + coord0\n yc = yc + coord0\n DLDP0 = [8949.40742544, 0.08044032819916265]\n DLDP1 = [44.97227893276267,\n 0.0004927891511929662,\n 0.0035782416625653765,\n -9.175233345083485e-7,\n 2.2355060371418054e-7, -9.258690000316504e-7]\n # calculate field dependent dispersion coefficient\n p0 = DLDP0[0] + DLDP0[1] * xc\n p1 = DLDP1[0] + DLDP1[1] * xc + DLDP1[2] * yc + \\\n DLDP1[3] * xc**2 + DLDP1[4] * xc * yc + DLDP1[5] * yc**2\n dx = np.arange(1014) - xc\n wavelength = (p0 + dx * p1)\n if subarray < 1014:\n i0 = (1014 - subarray) // 2\n wavelength = wavelength[i0: i0 + subarray]\n return wavelength", "def z_offline(ctx, w, k=0):\n s = ctx.mpf('0.5')+ctx.j*w\n s1 = s\n s2 = ctx.conj(1-s1)\n wpinitial = ctx.prec\n ctx.prec = 35\n # X see II Section 3.21 (109) and (110)\n # M1 see II Section 3.21 (111) and (112)\n if (ctx._re(s1) >= 0):\n M1 = 2*ctx.sqrt(ctx._im(s1)/(2 * ctx.pi))\n X = ctx.sqrt(abs(s1))\n else:\n X = (2*ctx.pi)**(ctx._re(s1)-1) * abs(1-s1)**(0.5-ctx._re(s1))\n M1 = 4 * ctx._im(s1)*X\n # M2 see II Section 3.21 (111) and (112)\n if (ctx._re(s2) >= 0):\n M2 = 2*ctx.sqrt(ctx._im(s2)/(2 * ctx.pi))\n else:\n M2 = 4 * ctx._im(s2)*(2*ctx.pi)**(ctx._re(s2)-1)*abs(1-s2)**(0.5-ctx._re(s2))\n # T see II Section 3.21 Prop. 27\n T = 2*abs(ctx.siegeltheta(w))\n # defining some precisions\n # see II Section 3.22 (115), (116), (117)\n aux1 = ctx.sqrt(X)\n aux2 = aux1*(M1+M2)\n aux3 = 3 +wpinitial\n wpbasic = max(6, 3+ctx.mag(T), ctx.mag(aux2*(26+2*T))+aux3)\n wptheta = max(4,ctx.mag(2.04*aux2)+aux3)\n wpR = ctx.mag(4*aux1)+aux3\n # now the computations\n ctx.prec = wptheta\n theta = ctx.siegeltheta(w)\n ctx.prec = wpR\n xrz, yrz = Rzeta_simul(ctx,s,k)\n pta = 0.25 + 0.5j*w\n ptb = 0.25 - 0.5j*w\n if k > 0: ps1 = 0.25*(ctx.psi(0,pta)+ctx.psi(0,ptb)) - ctx.ln(ctx.pi)/2\n if k > 1: ps2 = (1j/8)*(ctx.psi(1,pta)-ctx.psi(1,ptb))\n if k > 2: ps3 = (-1./16)*(ctx.psi(2,pta)+ctx.psi(2,ptb))\n if k > 3: ps4 = (-1j/32)*(ctx.psi(3,pta)-ctx.psi(3,ptb))\n ctx.prec = wpbasic\n exptheta = ctx.expj(theta)\n if k == 0:\n zv = exptheta*xrz[0]+yrz[0]/exptheta\n j = ctx.j\n if k == 1:\n zv = j*exptheta*(xrz[1]+xrz[0]*ps1)-j*(yrz[1]+yrz[0]*ps1)/exptheta\n if k == 2:\n zv = exptheta*(-2*xrz[1]*ps1-xrz[0]*ps1**2-xrz[2]+j*xrz[0]*ps2)\n zv =zv + (-2*yrz[1]*ps1-yrz[0]*ps1**2-yrz[2]-j*yrz[0]*ps2)/exptheta\n if k == 3:\n zv1 = -3*xrz[1]*ps1**2-xrz[0]*ps1**3-3*xrz[2]*ps1+j*3*xrz[1]*ps2\n zv1 = (zv1+ 3j*xrz[0]*ps1*ps2-xrz[3]+xrz[0]*ps3)*j*exptheta\n zv2 = 3*yrz[1]*ps1**2+yrz[0]*ps1**3+3*yrz[2]*ps1+j*3*yrz[1]*ps2\n zv2 = j*(zv2 + 3j*yrz[0]*ps1*ps2+ yrz[3]-yrz[0]*ps3)/exptheta\n zv = zv1+zv2\n if k == 4:\n zv1 = 4*xrz[1]*ps1**3+xrz[0]*ps1**4 + 6*xrz[2]*ps1**2\n zv1 = zv1-12j*xrz[1]*ps1*ps2-6j*xrz[0]*ps1**2*ps2-6j*xrz[2]*ps2\n zv1 = zv1-3*xrz[0]*ps2*ps2+4*xrz[3]*ps1-4*xrz[1]*ps3-4*xrz[0]*ps1*ps3\n zv1 = zv1+xrz[4]+j*xrz[0]*ps4\n zv2 = 4*yrz[1]*ps1**3+yrz[0]*ps1**4 + 6*yrz[2]*ps1**2\n zv2 = zv2+12j*yrz[1]*ps1*ps2+6j*yrz[0]*ps1**2*ps2+6j*yrz[2]*ps2\n zv2 = zv2-3*yrz[0]*ps2*ps2+4*yrz[3]*ps1-4*yrz[1]*ps3-4*yrz[0]*ps1*ps3\n zv2 = zv2+yrz[4]-j*yrz[0]*ps4\n zv = exptheta*zv1+zv2/exptheta\n ctx.prec = wpinitial\n return zv", "def _surface_runoff(self, SWi, saturation, field_capacity, whc, rf_coeff, geo_dict=None):\n\n saturation[saturation < 0] = np.nan\n field_capacity[field_capacity < 0] = np.nan\n whc[whc < 0] = np.nan\n\n # total runoff based on water left in soil after SAT-FC\n sat_fc = saturation - field_capacity\n Rf1 = SWi - whc\n # if runoff is < 0, make it 0\n Rf = np.zeros(SWi.shape)\n rf_boolean = (Rf1 >= 0)\n Rf[rf_boolean] = Rf1[rf_boolean]\n\n # Surface runoff\n SRf = np.zeros(SWi.shape)\n # SRf = if rf <= sat_fc, make it (rf * rf_coeff)(35% of the runoff value), else (rf - sat_fc) + (rf_coeff * sat_fc)\n SRf_boolean = (Rf <= sat_fc)\n SRf[SRf_boolean] = Rf[SRf_boolean] * rf_coeff\n SRf[~SRf_boolean] = (Rf[~SRf_boolean] - sat_fc[~SRf_boolean]) + rf_coeff * sat_fc[~SRf_boolean]\n # Deep Drainage\n # DDrain occurs if SWi > WHC, amount of DDrain is SAT <> WHC with a maximum DDrain of SAT - WHC\n DDrain = Rf - SRf\n\n return DDrain, SRf", "def solid_surface_density_S2014_given_observed_catalog(sss_per_sys, max_core_mass=10.):\n Mstar_obs = np.repeat(sss_per_sys['Mstar_obs'][:,None], np.shape(sss_per_sys['P_obs'])[1], axis=1)[sss_per_sys['P_obs'] > 0] # flattened array of stellar masses repeated for each planet\n a_obs_per_sys = gen.a_from_P(sss_per_sys['P_obs'], sss_per_sys['Mstar_obs'][:,None])\n a_obs = a_obs_per_sys[sss_per_sys['P_obs'] > 0]\n radii_obs = sss_per_sys['radii_obs'][sss_per_sys['P_obs'] > 0]\n core_mass_obs = generate_planet_mass_from_radius_Ning2018_table_above_lognormal_mass_earthlike_rocky_below_vec(radii_obs)\n core_mass_obs[core_mass_obs > max_core_mass] = max_core_mass\n sigma_obs = solid_surface_density_S2014(core_mass_obs, radii_obs, a_obs, Mstar=Mstar_obs)\n return sigma_obs, core_mass_obs, a_obs", "def contrast_curve_main(data, fwhm, instrument, position=None):\n # assign plate scale\n plate_scale_dict = {\"PHARO\": 0.025, \"ShARCS\": 0.0333}\n\n plate_scale = plate_scale_dict[instrument]\n\n #set radius_size so that radius is no larger than 1\"\n radius_size = np.min([1./plate_scale, fwhm])\n\n#DO NOT TAKE ABSOLUTE VALUE!\n contrast_result = contrast_curve_core(\n data, plate_scale, fwhm=fwhm, radius_size=radius_size, center=position\n )\n separation = contrast_result[0]\n means = contrast_result[1]\n stds = contrast_result[2]\n\n center_flux = run_ap_phot(data, fwhm, position=position)\n\n # intiialize the \"fake im fluxes\" with the central aperture flux.\n all_seps = [0]\n fake_im_fluxes = [center_flux[0]]\n fake_im_stds = [center_flux[1]]\n\n fake_ims = []\n\n for i, (all_mean, all_std) in enumerate(zip(means, stds)):\n # initialize fake fluxes for a given annulus\n fake_im_fluxes_an = []\n n_annuli = 12\n for j in range(n_annuli):\n mean = all_mean[j]\n std = all_std[j]\n x, y = np.meshgrid(np.arange(-1000, 1000), np.arange(-1000, 1000)) #was 100x100; CDD made larger for poor FWHMs\n dst = np.sqrt(x * x + y * y)\n\n # Initializing sigma and muu: size of fake injected source\n sigma = fwhm\n muu = 0.0\n\n bg_std = std\n\n noise_image = make_noise_image(\n (2000, 2000), distribution=\"gaussian\", mean=mean, stddev=bg_std\n ) #Was 200x200, but that's too small for some images because the sky annulus falls outside the fake image for high FWHM.\n # Calculating Gaussian array. tuned to a total STD=5\n fake = (\n 7 * std * np.exp(-((dst - muu) ** 2 / (2.0 * sigma**2)))\n + noise_image\n + 3\n )\n\n flux, err = run_ap_phot(fake, fwhm)\n\n # rescale to a full std of 5\n fixscale = (flux / err) / 5\n\n flux = flux / fixscale\n fake_im_fluxes_an += [flux]\n fake_im_fluxes += [np.nanmedian(fake_im_fluxes_an)]\n fake_im_stds += [np.nanstd(fake_im_fluxes_an)]\n all_seps += [separation[i]]\n\n fake_im_fluxes = np.array(fake_im_fluxes)\n\n err = 2.5 * np.log10(1.0 + (fake_im_stds / fake_im_fluxes))\n\n#DELETE THIS\n# indices = np.arange(len(fake_im_fluxes))\n# separation = fwhm * plate_scale * indices\n\n contrast = -2.5 * np.log10(fake_im_fluxes / center_flux[0])\n\n #Save contrast curve as a pandas DataFrame\n df = pd.DataFrame({'arcsec': all_seps, 'dmag': contrast, 'dmrms': err})\n\n return df #separation, contrast, err", "def calc_enginprops(self):\n # Let's assemble the ABD matrix even if it is not required\n ABD = np.bmat([[self.A, self.B], [self.B, self.D]])\n ABD_inv = np.linalg.inv(ABD)\n # We would use the whole matrix. This gives results similar to elamX and considers poisson effects\n A_inv = ABD_inv[0:3, 0:3]\n self.Ex = 1 / (self.total_t * A_inv[0, 0]) # It is 2 * t because we need total thickness\n self.Ey = 1 / (self.total_t * A_inv[1, 1])\n self.Gxy = 1 / (self.total_t * A_inv[2, 2])\n self.poissonxy = - A_inv[0,1] / A_inv[0, 0]\n # Flexural stiffness properties\n self.zx = 0.0\n self.zy = 0.0\n zx_dem = 0.0\n zy_dem = 0.0\n self.EIx = 0.0\n self.EIy = 0.0\n z = 0.0\n # Calculate neutral axis in direction x and y\n for S_bar, t in zip(self.S_bars, self.ts):\n Ex = 1 / S_bar[0, 0]\n Ey = 1 / S_bar[1, 1]\n z += t / 2.0\n self.zx += Ex * t * z\n zx_dem += Ex * t\n self.zy += Ey * t * z\n zy_dem += Ey * t\n z += t / 2.0\n self.zx = self.zx / zx_dem\n self.zy = self.zy / zy_dem\n # Calculate EI in direction x and y\n z = 0.0\n for S_bar, t in zip(self.S_bars, self.ts):\n Ex = 1 / S_bar[0, 0]\n Ey = 1 / S_bar[1, 1]\n Gxy = 1 / S_bar[2, 2]\n z += t / 2.0\n self.EIx += Ex * (t**3 / 12 + t * (z - self.zx)**2)\n self.EIy += Ey * (t**3 / 12 + t * (z - self.zy)**2)\n self.GA += Gxy * t\n z += t / 2.0\n return self.Ex, self.Ey, self.Gxy, self.poissonxy", "def create_flux_vector_pf_gr(self):\n t0 = time.time()\n\n verif_local = 1\n lim4 = 1e-4\n soma = 0\n soma2 = 0\n soma3 = 0\n store_flux_pf = {}\n\n for volume in self.all_fine_vols:\n #1\n flux = {}\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n centroid_volume = self.mesh_topo_util.get_average_position([volume])\n z_vol = self.tz - centroid_volume[2]\n adjs_vol = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)\n gid_vol = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]\n pvol = self.mb.tag_get_data(self.pf_tag, volume, flat=True)[0]\n for adj in adjs_vol:\n #2\n gid_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]\n padj = self.mb.tag_get_data(self.pf_tag, adj, flat=True)[0]\n kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])\n centroid_adj = self.mesh_topo_util.get_average_position([adj])\n z_adj = self.tz - centroid_adj[2]\n direction = centroid_adj - centroid_volume\n altura = centroid_adj[2]\n unit = direction/np.linalg.norm(direction)\n #unit = vetor unitario na direcao de direction\n uni = self.unitary(direction)\n z = uni[2]\n # uni = valor positivo do vetor unitario\n kvol = np.dot(np.dot(kvol,uni),uni)\n kadj = np.dot(np.dot(kadj,uni),uni)\n keq = self.kequiv(kvol, kadj)\n keq = keq*(np.dot(self.A, uni))/(self.mi)\n grad_p = (padj - pvol)/float(abs(np.dot(direction, uni)))\n grad_z = (z_adj - z_vol)/float(abs(np.dot(direction, uni)))\n\n q = (grad_p)*keq - grad_z*keq*self.gama\n flux[tuple(unit)] = q\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n #1\n store_flux_pf[volume] = flux\n flt = sum(flux.values())\n # print(gid_vol)\n # print(flt)\n # print(store_flux_pf)\n # print('\\n')\n # import pdb; pdb.set_trace()\n self.mb.tag_set_data(self.flux_fine_pf_tag, volume, flt)\n soma += flt\n if abs(flt) > lim4 and volume not in self.wells:\n verif_local = 0\n print('nao esta dando conservativo na malha fina')\n print(gid_vol)\n print(flt)\n import pdb; pdb.set_trace()\n soma_prod = []\n soma_inj = []\n with open('fluxo_malha_fina_gr.txt', 'w') as arq:\n for volume in self.wells:\n gid = self.mb.tag_get_data(self.global_id_tag, volume, flat = True)[0]\n values = store_flux_pf[volume].values()\n arq.write('gid:{0} , fluxo:{1}\\n'.format(gid, sum(values)))\n\n # print('gid:{0}'.format(gid))\n # print('valor:{0}'.format(sum(values)))\n if volume in self.wells_inj:\n soma_inj.append(sum(values))\n else:\n soma_prod.append(sum(values))\n # print('\\n')\n soma2 += sum(values)\n arq.write('\\n')\n arq.write('soma_inj:{0}\\n'.format(sum(soma_inj)))\n arq.write('soma_prod:{0}'.format(sum(soma_prod)))\n\n print('soma_inj:{0}'.format(sum(soma_inj)))\n print('soma_prod:{0}'.format(sum(soma_prod)))\n\n print('soma2 : {0}'.format(soma2))\n if abs(soma2) > lim4:\n print('nao esta dando conservativo globalmente')\n import pdb; pdb.set_trace()\n\n # print('saiu de def create_flux_vector_pf')\n print('\\n')\n\n tf = time.time()\n # import pdb; pdb.set_trace()\n return store_flux_pf", "def sh( values ):\n # ECMWF normalizes the spherical harmonic coeffs differently than NCEP.\n # (m=0,n=0 is global mean, instead of sqrt(2)/2 times global mean)\n fld = 2.*values/np.sqrt(2.)\n \n #------SPLITTING IT UP IN AN IMAGARY AND REAL PART--------\n fldr = fld[ 0::2 ] #annenhver verdi fra 0\n fldi = fld[ 1::2 ] #annenhver verdi fra 1\n fldn = np.zeros( fldr.shape, 'F' ) #blir halvparten så stor som orginale fld\n fldn.real = fldr #legges da til i fldn vectoren\n fldn.imag = fldi\n #----------------------------------------------------------\n \n nlons = 360 #Have a feeling it probably is number of values like grid val\n nlats = 1280 #web sais it shourld be 180.. wellwell, seems to work\n s = spharm.Spharmt( nlons, nlats ) \n \n data = s.spectogrd( fldn ) #Hvis nlats = 180, så feiler denne delen pga hvordan formelen fungerer..\n \n lons = ( 360./nlons ) * np.arange( nlons )\n lats = 90.-( 180./( nlats - 1 ) ) * np.arange( nlats )\n lons, lats = np.meshgrid( lons, lats )\n \n #stack grids side-by-side (in longitiudinal direction), so\n # any range of longitudes (between -360 and 360) may be plotted on a world map.\n lons = np.concatenate(( lons - 360, lons ), 1 )\n lats = np.concatenate(( lats, lats ), 1 )\n data = np.concatenate(( data, data ), 1 )\n \n return lats, lons, data", "def fglidingHST_PL(xy, v, NL, KL, BM, Mm, params):\n I1 = params['I1']\n I3 = params['I3']\n l = params['l']\n g = params['g']\n k = params['k']\n\n try:\n NP, NN = np.shape(NL)\n except:\n '''There is only one particle'''\n NP = 1\n NN = 0\n\n X = xy[:, 0].ravel() # .reshape(NP,1);\n Y = xy[:, 1].ravel() # .reshape(NP,1);\n dX = xy[:, 2].ravel() # .reshape(NP,1);\n dY = xy[:, 3].ravel() # .reshape(NP,1);\n vX = v[:, 0].ravel() # .reshape(NP,1);\n vX = v[:, 1].ravel() # .reshape(NP,1);\n vdX = v[:, 2].ravel() # .reshape(NP,1);\n vdY = v[:, 3].ravel() # .reshape(NP,1);\n\n phi = np.arctan2(dY, dX)\n # print 'xy = ', xy\n # print 'v = ', v\n\n # Note: w3 = vpsi + vphi*np.cos(theta)\n w3 = params['w3']\n\n # SPRING FORCE\n vecx = np.array([[KL[i, j] * (xy[i, 0] - xy[NL[i, j], 0]) for j in range(NN)] for i in range(NP)])\n vecy = np.array([[KL[i, j] * (xy[i, 1] - xy[NL[i, j], 1]) for j in range(NN)] for i in range(NP)])\n mag = np.sqrt(vecx ** 2 + vecy ** 2)\n # KLnoz = KL.copy() #no zeros\n # KLnoz[KLnoz ==0] = 1. #same value as mag[mag==0], so that stretch=0 for those\n stretch = mag - BM\n mag[mag == 0.] = 1. # avoid divide by zero error\n # print(stretch)\n springx = k * np.sum(stretch * vecx / mag, axis=-1)\n springy = k * np.sum(stretch * vecy / mag, axis=-1)\n # print 'stretch = ', stretch\n\n # add them up\n FX = - springx.ravel() # .reshape(NP,1)\n FY = - springy.ravel() # .reshape(NP,1)\n\n # Set force on fixed particles to zero\n if 'BIND' in params:\n if len(params['BIND']) > 0:\n FX[params['BIND']] = 0.\n FY[params['BIND']] = 0.\n\n # Transform into A frame\n Fx = FX * np.cos(phi) + FY * np.sin(phi)\n Fy = -FX * np.sin(phi) + FY * np.cos(phi)\n\n # print '\\n Fx =', Fx\n\n # POLAR COORDINATES (delta, phi)\n delta = np.sqrt(dX ** 2 + dY ** 2)\n v_delta = vdX * np.cos(phi) + vdY * np.sin(phi)\n v_phi = -vdX * np.sin(phi) + vdY * np.cos(phi)\n\n # VERTICAL REACTION FORCE\n gn = Mm * (g * l * I1 + I1 * (vdX ** 2 + vdY ** 2) \\\n + I3 * w3 * v_phi * delta \\\n - l ** 2 * delta * Fx) / (l * I1 + Mm * l * delta ** 2)\n\n # print 'gn = ', gn\n\n # EULER EQUATIONS\n dv_phi = (1. / I1) * (-l ** 2 * Fy - I3 * w3 * v_delta)\n dv_delta = (1. / I1) * (-l * gn * delta - l ** 2 * Fx + I3 * w3 * v_phi)\n\n d_vdX = dv_delta * np.cos(phi) - dv_phi * np.sin(phi)\n d_vdY = dv_delta * np.sin(phi) + dv_phi * np.cos(phi)\n\n # SPRING EQUATIONS\n # print 'dvtheta =', dvtheta\n qx = dv_delta - v_delta ** 2 * delta / l ** 2\n qy = dv_phi\n qX = qx * np.cos(phi) - qy * np.sin(phi)\n qY = qx * np.sin(phi) + qy * np.cos(phi)\n d_vX = (FX / Mm) - qX\n d_vY = (FY / Mm) - qY\n\n # print 'check d_vX = ', d_vX\n\n if params['BCtype'] == 'excite':\n if params['excite_continue']:\n # print 'exciting'\n d = params['amplitude']\n freq = params['frequency']\n x0_BIND = params['x0_BIND']\n y0_BIND = params['y0_BIND']\n BIND = params['BIND']\n w3 = params['w3'][BIND]\n\n nu = freq\n phi_BIND = (np.arctan2(dY[BIND], dX[BIND]) + nu * params['h'])[0]\n # print 'phi_BIND =', phi_BIND\n\n d_vX[BIND] = d * nu ** 2 * np.cos(phi_BIND)\n d_vY[BIND] = d * nu ** 2 * np.sin(phi_BIND)\n d_vdX[BIND] = -d * nu ** 2 * np.cos(phi_BIND)\n d_vdY[BIND] = -d * nu ** 2 * np.sin(phi_BIND)\n\n elif 'BIND' in params:\n if len(params['BIND']) > 0:\n # ftx[params['BIND'],0:2] = [0.,0.]\n d_vX[params['BIND']] = 0.\n d_vY[params['BIND']] = 0.\n\n # print 'shapes = ', np.shape(dvX), np.shape(dvY),np.shape(dvtheta),np.shape(dvphi),np.shape(dvpsi)\n ftx = np.dstack((d_vX, d_vY, d_vdX, d_vdY))[0]\n # print 'Resulting second derivative: ', ftx[1,:]\n # ftx_exact = fglidingHST_exact(xy, v, NL, KL, BM, Mm, params)\n # print 'gn = ', gn\n # print 'ftx = ', ftx\n # print 'v_delta = ', v_delta\n # print 'v_phi = ', v_phi\n # print 'dv_delta = ', dv_delta\n # print 'dv_phi = ', dv_phi\n # print 'qx = ', qx\n # print 'qy = ', qy\n # print 'ftx_exact = ', ftx_exact\n\n return ftx", "def rb_nfw(m200,c,z):\n\n #Setting up cosmology\n rho0=1.4876862e+11;\n omegam=0.238000;\n msun=1.98892e+33;\n delta_vir=200.;\n G=6.6730003e-08;\n kmpsToCmps = 1.0*10.**(5.);\n Rvir=200.;\n kpc2cm=3.086*10.**(21);\n \n deltac = (delta_vir/3.)*( (c**3.)/( np.log(1.+c) - (c / (1.+c))));\n rho_crit =rho0*omegam*(1.+z)**3.;\n r200 =(m200/delta_vir / rho_crit / (4.*np.pi/3.) )**0.33333 * 1000. ;\n v200 = ((6.67e-8 * m200 * msun / (r200* 3.086*10.**(21.)) )**0.5)/1e5 ;\n \n r =np.linspace(1.,3.*r200,500); # kpc\n rs = r200 / c; \n ss=(((r/rs)*(1.+(r/rs))**2.)*1000.**3);\n rho = (rho_crit * deltac)/(ss); \n M_r = 4.*np.pi* integrate.cumtrapz((r**2)*rho, r,initial=0.)\n \n x = r/r200 ;\n tab=1./x*(np.log(1.+c*x)-c*x/(1.+c*x))/(np.log(1.+c)-c/(1.+c));\n vcirc = v200*(tab)**0.5 ;\n maxvcirc = np.max(vcirc) ;\n q=np.where((vcirc == np.max(vcirc)));\n maxvcircr = r[q];\n \n \n # Now compute V_Esc as per nfw.pro Binney & Tremaine equation 2.31\n Phi_new = r * 0.0;\n vesc = r * 0.0 ;\n for ir in range(2,len(r)-4):\n term1 = (np.trapz(rho[0:ir]*(r[0:ir]**2.),x=r[0:ir])/(r[ir]))* msun; \n term2 = np.trapz(rho[ir:len(r)]*r[ir:len(r)],x=r[ir:len(r)])*msun; \n Phi_new[ir] = -4. *np.pi*6.67e-8*(term1 + term2)/3.086e21 ;\n vesc[ir] = ((2. * np.abs(Phi_new[ir]))**0.5) / 1e5 ; # See Binney & Tremaine (2-22) \n \n\n # Chage Units to do velocity dispersion calculations\n rcm=r*kpc2cm;\n\n #M_r in gram\n M_r_gram=M_r*msun;\n\n Phi=G*integrate.cumtrapz((M_r_gram/rcm**(2)),rcm,initial=0);\n \n Phi=Phi*(1./((1e5)**2.));#%km^2/s^2\n Phi_out=np.max(Phi);\n\n k=0.41;\n a=0.29;\n\n sig = np.sqrt(a *(( Phi/Phi_out)**(k))*(Phi_out -Phi));\n \n nfw={}\n qqqt=np.where((vesc==0.))\n vesc[qqqt]=1e-99\n\n nfw[\"m200\"]=m200;\n nfw[\"c\"]=c;\n nfw[\"r200\"]=r200;\n nfw[\"v200\"]=v200;\n nfw[\"maxvcirc\"]=maxvcirc;\n nfw[\"maxvcircr\"]=maxvcircr;\n nfw[\"r\"]=r;\n nfw[\"rho\"]=rho;\n nfw[\"vcirc\"]=vcirc;\n nfw[\"M_r\"]=M_r;\n nfw[\"sig_v\"]=sig;\n nfw[\"vesc\"]=vesc;\n \n return nfw", "def compute_tsky_hot( xv, yv, hv, thot, tcold):\n\n nData = len(yv) \n epsilons = np.full( nData, EPSILON)\n tsys = np.zeros(nData) # initialize arrays\n\n Z = np.zeros(nData)\n oneMZ = np.zeros(nData)\n # For full Temp calibration, a spectrum taken at high elevation away from \n # The galactic plan is used. For this program the cold spectrum must be\n # the spectrum being calibrated. See the M command for comparision\n epsilons = np.full( nData, EPSILON)\n yv = np.maximum( yv, epsilons)\n hv = np.maximum( hv, epsilons)\n # comput the cold/hot ratio\n Z = yv/hv\n oneMZ = np.full( nData, 1.) - Z\n oneMZ = np.maximum( oneMZ, epsilons)\n\n # the cold, receiver, temperature is this function\n tsys = ((Z*thot) - tcold)/oneMZ\n \n n6 = int(nData/6)\n n56 = 5*n6\n\n tsysmedian = np.median( tsys[n6:n56])\n\n tsky = np.zeros(nData) # initialize arrays\n S = np.zeros(nData) # initialize arrays\n\n # The system gain S is computed assuming a tsys is the cold load\n S = np.full( nData, tsysmedian+thot)/hv\n # scale the observed instensity in counts to Kelvins.\n tsky = S*yv\n\n return tsky", "def dewT_2_q_magnus(ds, var):\n A1, B1, C1 = 17.625, 243.04, 610.94\n vpsl = C1 * np.exp(A1 * (ds[var['dew']] - 273.15) / (B1 + (ds[var['dew']] - 273.15)))\n wsl = eps0 * vpsl / (ds[var['pressure']] - vpsl)\n ds[var['spec_h']] = wsl / (1 + wsl)\n return ds", "def SCF(N, R, Zeta1, Zeta2, Za, Zb, G):\n Crit = 1e-11 # Convergence critera\n Maxit = 250 # Maximum number of iterations\n Iter = 0\n\n ######## STEP 1. Guess an initial density matrix ########\n # Use core hamiltonian for initial guess of F, I.E. (P=0)\n P = np.zeros([2, 2])\n\n Energy = 0.0\n\n while (Iter < Maxit):\n Iter += 1\n print(Iter)\n\n ######## STEP 2. calculate the Fock matrix ########\n # Form two electron part of Fock matrix from P\n G = np.zeros([2, 2]) # This is the two electron contribution in the equations above\n for i in range(2):\n for j in range(2):\n for k in range(2):\n for l in range(2):\n G[i, j] = G[i, j] + P[k, l] * (TT[i, j, k, l] - 0.5 * TT[i, j, k, l])\n\n # Add core hamiltonian H^CORE to get fock matrix\n F = H + G\n\n # Calculate the electronic energy\n Energy = np.sum(0.5 * P * (H + F))\n\n print('Electronic energy = ', Energy)\n\n ######## STEP 3. Calculate F' (remember S^-1/2 is X and S^1/2 is X.T) ########\n G = np.matmul(F, X)\n Fprime = np.matmul(X.T, G)\n\n ######## STEP 4. Solve the eigenvalue problem ########\n # Diagonalise transformed Fock matrix\n Diag(Fprime, Cprime, E)\n\n ######## STEP 5. Calculate the molecular orbitals coefficients ########\n # Transform eigen vectors to get matrix C\n C = np.matmul(X, Cprime)\n\n ######## STEP 6. Calculate the new density matrix from the old P ########\n Oldp = np.array(P)\n P = np.zeros([2, 2])\n\n # Form new density matrix\n for i in range(2):\n for j in range(2):\n # Save present density matrix before creating a new one\n for k in range(1):\n P[i, j] += 2.0 * C[i, k] * C[j, k]\n\n ######## STEP 7. Check to see if the energy has converged ########\n Delta = 0.0\n # Calculate delta the difference between the old density matrix Old P and the new P\n Delta = (P - Oldp)\n Delta = np.sqrt(np.sum(Delta ** 2) / 4.0)\n print(\"Delta\", Delta)\n\n # Check for convergence\n if (Delta < Crit):\n # Add nuclear repulsion to get the total energy\n Energytot = Energy + Za * Zb / R\n print(\"Calculation converged with electronic energy:\", Energy)\n print(\"Calculation converged with total energy:\", Energytot)\n print(\"Density matrix\", P)\n print(\"Mulliken populations\", np.matmul(P, S))\n print(\"Coeffients\", C)\n\n break", "def g2dfwhm(img):\n npix = img.shape[0]\n rowCen,colCen = adaptiveCentroid(img,1.1/scale)\n row,col = np.mgrid[0:npix,0:npix]\n row = row - rowCen\n col = col - colCen\n A0,sigmac0 = moments(img)\n sigmar0 = sigmac0\n rho0 = 0.\n B0 = 0.\n p0=np.array([sigmac0,sigmar0,rho0,A0, B0])\n def residualg2d(p,x,y,xc,yc,I):\n sigmax,sigmay,rho,A,B = p\n Ierr = np.sqrt(abs(I))+0.00001 # to avoid those = 0, add a small number \n res = (gaussian2d(x,y,xc,yc,sigmax,sigmay,rho,A,B) - I)/Ierr\n return res.flatten()\n p = leastsq(residualg2d,p0,args=(col,row,colCen,rowCen,img))[0]\n sigmac,sigmar,rho,A,B = p\n Mcc = sigmac**2\n Mrr = sigmar**2\n Mrc = rho**2*Mcc*Mrr\n M20 = Mrr + Mcc\n M22 = complex(Mcc - Mrr,2*Mrc)\n whiskerLength = np.sqrt(np.abs(M22))\n lambdap = 0.5*(M20 + abs(M22))\n lambdam = 0.5*(M20 - abs(M22))\n fwhm_g2d = np.sqrt(2.*np.log(2.))*(np.sqrt(lambdap)+np.sqrt(lambdam))\n #fwhm = np.sqrt(M20/2.)*2.35482*scale\n return A, B, whiskerLength, fwhm_g2d", "def solid_surface_density_RC2014_given_physical_catalog(sssp_per_sys, max_core_mass=10.):\n mult_all = sssp_per_sys['Mtot_all']\n a_all_2p = []\n mult_all_2p = []\n sigma_all_2p = []\n for i in np.arange(len(mult_all))[mult_all > 1]: # only consider multi-planet systems\n a_sys = sssp_per_sys['a_all'][i]\n core_mass_sys = np.copy(sssp_per_sys['mass_all'][i][a_sys > 0])\n core_mass_sys[core_mass_sys > max_core_mass] = max_core_mass\n a_sys = a_sys[a_sys > 0]\n a_all_2p += list(a_sys)\n mult_all_2p += [len(a_sys)]*len(a_sys)\n sigma_all_2p += list(solid_surface_density_system_RC2014(core_mass_sys, a_sys))\n a_all_2p = np.array(a_all_2p)\n mult_all_2p = np.array(mult_all_2p)\n sigma_all_2p = np.array(sigma_all_2p)\n return sigma_all_2p, a_all_2p, mult_all_2p", "def _full_relativistic_loss(eps, eta, t):\n # Surrounding medium, eta terms for surface loss\n lmb2_eta = Theta2 - eta * ThetaE2 * beta2\n lmb_eta = np.lib.scimath.sqrt(lmb2_eta)\n phi2_eta = lmb2_eta + ThetaE2\n\n # Thin layer, epsilon terms for surface loss\n lmb2_eps = Theta2 - eps * ThetaE2 * beta2\n lmb_eps = np.lib.scimath.sqrt(lmb2_eps) # should be > 0.\n phi2_eps = lmb2_eps + ThetaE2\n\n # Combined term for relativistic surface loss\n phi2_eps_eta = Theta2 + ThetaE2 * (1. - (eps + eta) * beta2)\n\n # Thickness dependent terms for surface loss\n de = t * Psurf\n sin_de = np.sin(de)\n cos_de = np.cos(de)\n txy = np.tanh(lmb_eps * de / ThetaE)\n lplus = lmb_eta * eps + lmb_eps * eta * txy\n lminus = lmb_eta * eps + lmb_eps * eta / txy\n\n # \"Relativistic surface plasmon\"\n A1 = phi2_eps_eta**2. / eps / eta\n A2 = sin_de**2. / lplus + cos_de**2. / lminus\n A = A1 * A2\n # Guided light mode 1\n B1 = beta2 * lmb_eta * ThetaE * phi2_eps_eta / eta\n B2 = (1. / lplus - 1. / lminus) * 2. * sin_de * cos_de\n B = B1*B2\n # Guided light mode 2\n C1 = - beta2**2. * lmb_eta * lmb_eps * ThetaE2\n C2 = cos_de**2. * txy / lplus\n C3 = sin_de**2. / txy / lminus\n C = C1 * (C2 + C3)\n\n # Build relativistic surface loss\n Ps1 = 2 * Theta2 * (eps - eta)**2. / phi2_eta**2. / phi2_eps**2.\n Ps2 = hbar/momentum\n Ps3 = A + B + C\n Ps = Ps1 * Ps2 * Ps3\n\n # Build relativistic bulk loss\n Pv = t * (1. - (eps*beta2)) / eps / phi2_eps\n\n # Calculate P and Pvol (volume only)\n P = Pcoef * np.imag(Pv - Ps)\n Pvol = Pcoef * np.imag(Pv)\n return (P + 1j*Pvol)", "def __init__(self):\n # Set constants\n self.fromHztoeV = 6.58e-16\n self.gramstoeV = 1 / ( 1.78 * 1e-33)\n self.mtoev = 1/(1.97 * 1e-7) \n self.H0 = cosmo.H(0).value * 1e3 / (1e3 * const.kpc.value) #expressed in 1/s\n self.rhocritical = cosmo.critical_density(0).value * self.gramstoeV /(1e-2)**3 # eV/m**3\n self.Om0 = cosmo.Om0 #total matter \n self.OLambda0 = cosmo.Ode0 # cosmological constant\n self.DM0 = self.Om0 - cosmo.Ob0 # dark matter\n self.evtonJoule = 1.60218 * 1e-10 # from eV to nJ\n self.evtoJoule = 1.60218 * 1e-19 # from eV to J\n PSgal1h = np.loadtxt(\"/Users/andreacaputo/Desktop/Phd/AxionDecayCrossCorr/Codes/NIRB_PS/PS_GALl_1h.dat\")\n PSgal2h = np.loadtxt(\"/Users/andreacaputo/Desktop/Phd/AxionDecayCrossCorr/Codes/NIRB_PS/PS_GALl_2h.dat\")\n self.Mpc = 1e3 * const.kpc.value\n self.zmin = 0.001\n self.zmax = 30.001\n self.zbins = 301\n self.h = cosmo.h\n self.z_vect = np.linspace(self.zmin, self.zmax, self.zbins)\n self.k_vect = PSgal1h[:,0]* self.h\n self.Power1h = PSgal1h[:,1:]/(self.h**3)\n self.Power2h = PSgal2h[:,1:]/(self.h**3)\n self.Power = self.Power1h + self.Power2h\n self.Praw_prova1h = interp2d(self.k_vect, self.z_vect, np.transpose(self.Power1h))\n self.Praw_prova2h = interp2d(self.k_vect, self.z_vect, np.transpose(self.Power2h))\n self.Praw_prova = interp2d(self.k_vect, self.z_vect, np.transpose(self.Power))", "def model_prem(r):\n\n\t#- normalised radius\n\tx = r / 6371000.0\n\n\t#- march through the various depth levels -----------------------------------------------------\n\n\t#- upper crust\n\tif (r >= 6356000.0):\n\t\trho = 2.6\n\t\tvpv = 5.8\n\t\tvph = vpv\n\t\tvsv = 3.2\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- lower crust\n\telif (r >= 6346000.6) & (r < 6356000.0):\n\t\trho = 2.9\n\t\tvpv = 6.8\n\t\tvph = vpv\n\t\tvsv = 3.9\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- LID\n\telif (r >= 6291000.0) & (r < 6346000.6):\n\t\trho = 2.6910 + 0.6924 * x\n\t\tvpv = 0.8317 + 7.2180 * x\n\t\tvph = 3.5908 + 4.6172 * x\n\t\tvsv = 5.8582 - 1.4678 * x\n\t\tvsh = -1.0839 + 5.7176 * x\n\t\teta = 3.3687 - 2.4778 * x\n\n\t#- LVZ\n\telif (r >= 6151000.0) & (r < 6291000.0):\n\t\trho = 2.6910 + 0.6924 * x\n\t\tvpv = 0.8317 + 7.2180 * x\n\t\tvph = 3.5908 + 4.6172 * x\n\t\tvsv = 5.8582 - 1.4678 * x\n\t\tvsh = -1.0839 + 5.7176 * x\n\t\teta = 3.3687 - 2.4778 * x\n\n\t#- Transition zone 1\n\telif (r >= 5971000.0) & (r < 6151000.0):\n\t\trho = 7.1089 - 3.8045 * x\n\t\tvpv = 20.3926 - 12.2569 * x\n\t\tvph = vpv\n\t\tvsv = 8.9496 - 4.4597 * x\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Transition zone 2\n\telif (r >= 5771000.0) & (r < 5971000.0):\n\t\trho = 11.2494 - 8.0298 * x\n\t\tvpv = 39.7027 - 32.6166 * x\n\t\tvph = vpv\n\t\tvsv = 22.3512 - 18.5856 * x\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Transition zone 3\n\telif (r >= 5701000.0) & (r < 5771000.0):\n\t\trho = 5.3197 - 1.4836 * x\n\t\tvpv = 19.0957 - 9.8672 * x\n\t\tvph = vpv\n\t\tvsv = 9.9839 - 4.9324 * x\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Lower mantle 1\n\telif (r >= 5600000.0) & (r < 5701000.0):\n\t\trho = 7.9565 - 6.4761 * x + 5.5283 * x**2 - 3.0807 * x**3\n\t\tvpv = 29.2766 - 23.6027 * x + 5.5242 * x**2 - 2.5514 * x**3\n\t\tvph = vpv\n\t\tvsv = 22.3459 - 17.2473 * x - 2.0834 * x**2 + 0.9783 * x**3\n\t\tvsh = vsv\n\t\teta = 1.0 \n\n\t#- Lower mantle 2\n\telif (r >= 3630000.0) & (r < 5600000.0):\n\t\trho = 7.9565 - 6.4761 * x + 5.5283 * x**2 - 3.0807 * x**3\n\t\tvpv = 24.9520 - 40.4673 * x + 51.4832 * x**2 - 26.6419 * x**3\n\t\tvph = vpv\n\t\tvsv = 11.1671 - 13.7818 * x + 17.4575 * x**2 - 9.2777 * x**3\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Lower mantle 3\n\telif (r >= 3480000.0) & (r < 3630000.0):\n\t\trho = 7.9565 - 6.4761 * x + 5.5283 * x**2 - 3.0807 * x**3\n\t\tvpv = 15.3891 - 5.3181 * x + 5.5242 * x**2 - 2.5514 * x**3\n\t\tvph = vpv\n\t\tvsv = 6.9254 + 1.4672 * x - 2.0834 * x**2 + 0.9783 * x**3\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Outer core\n\telif (r >= 1221000.5) & (r < 3480000.0):\n\t\trho = 12.5815 - 1.2638 * x - 3.6426 * x**2 - 5.5281 * x**3\n\t\tvpv = 11.0487 - 4.0362 * x + 4.8023 * x**2 - 13.5732 * x**3\n\t\tvph = vpv\n\t\tvsv = 0.0\n\t\tvsh = 0.0\n\t\teta = 1.0\n\n\t#- Inner Core\n\telif (r >= 0.0) & (r < 1221000.5):\n\t\trho = 13.0885 - 8.8381 * x**2\n\t\tvpv = 11.2622 - 6.3640 * x**2\n\t\tvph = vpv\n\t\tvsv = 3.6678 - 4.4475 * x**2\n\t\tvsh = vsv\n\t\teta = 1.0 \n\n\t#- convert to elastic parameters --------------------------------------------------------------\n\n\trho = 1000.0 * rho\n\tvpv = 1000.0 * vpv\n\tvph = 1000.0 * vph\n\tvsv = 1000.0 * vsv\n\tvsh = 1000.0 * vsh\n\n\tA = rho * vph**2\n\tC = rho * vpv**2\n\tN = rho * vsh**2\n\tL = rho * vsv**2\n\tF = eta * (A - 2 * L)\n\n\treturn rho, A, C, F, L, N", "def SH_surface_plots(n_max=6,figsize=(15,15),fs=15,saveA=True,show=False,dpi=400,vis_type='real'):\n\n N = 100j\n\n for n in range(n_max+1):\n for m in range(n+1):\n plt.close('all')\n print(\"working on Y_%s^%s\" % (n,m) )\n\n PHI,THETA = np.mgrid[0:2*np.pi:N*2, 0:np.pi:N]\n if vis_type == 'real':\n R = sp.sph_harm(m,n,PHI,THETA).real\n if vis_type == 'modulus':\n r = sp.sph_harm(m,n,PHI,THETA)\n R = r * r.conjugate()\n if vis_type == 'unit':\n R = sp.sph_harm(m,n,PHI,THETA).real + 1\n\n X = np.abs(R) * np.sin(THETA) * np.cos(PHI)\n Y = np.abs(R) * np.sin(THETA) * np.sin(PHI)\n Z = np.abs(R) * np.cos(THETA)\n\n norm = colors.Normalize()\n fig, ax = plt.subplots(subplot_kw=dict(projection='3d'), figsize=(14,10))\n sm = cm.ScalarMappable(cmap=cm.seismic)\n ax.plot_surface(X, Y, Z, rstride=1, cstride=1, facecolors=cm.seismic(norm(R)))\n ax.set_title('real$(Y^%s_%s)$' % (m,n), fontsize=fs)\n ax.set_aspect(1)\n sm.set_array(R)\n fig.colorbar(sm, shrink=0.8)\n\n if saveA:\n fig.savefig('images/%s/%s_%s.png' % (vis_type,n,m), dpi=dpi)\n if show:\n plt.show()\n\n # print(\"\\n only +m values are used.\")\n # for n in range(n_max+1):\n # for m in range(n+1):\n # plt.close('all')\n # print(\"\\n n,m = %s,%s\" % (n,m) )\n #\n # R,X,Y,Z = harmonics(m,n)\n #\n # fig = plt.figure(figsize=figsize)\n # ax = plt.subplot(projection='3d')\n # ax.set_aspect(1)\n # ax.set_title(\"n: %s m: %s\" % (n,m), fontsize=fs+2)\n # ax.plot_surface(X,Y,Z,\\\n # cmap = cm.seismic,\n # norm = colors.Normalize( vmin=np.min(R),vmax=np.max(R) )\\\n # )\n #\n # if saveA:\n # fig.savefig('images/%s_%s.png' % (n,m), dpi=dpi)\n # if show:\n # plt.show()", "def airy_and_slicer(surface, wavelength, scale_mas, PSF_window, N_window):\n\n # Print message to know we are updating the cache\n print('Recalculating Airy Pattern for %.3f microns' % wavelength)\n\n # Plate scales [Px, Py] for each spaxel scale in mm / arcsec,\n # depending on the surface [IS: Image Slicer, DET: Detector]\n plate_scales = {'IS': {4.0: [125, 250], 60.0: [16.67, 16.67]},\n 'DET': {4.0: [3.75, 7.5], 60.0: [0.5, 0.5]}}\n plate_x = plate_scales[surface][scale_mas][0]\n plate_y = plate_scales[surface][scale_mas][1]\n\n # We know how many Microns the pixels of the Geometric PSF span [PSF_window / N_window]\n pix_sampling = PSF_window / N_window # micron at the detector plane\n # Using the plate scale we calculate how many m.a.s each of those pixels have to span\n pix_scale_x = pix_sampling / plate_x # milliarcsec / pixel\n pix_scale_y = pix_sampling / plate_y # milliarcsec / pixel\n\n # Calculate the relative size of the pupil aperture needed to ensure the PSF is\n # sampled with the given pix_scale at the focal plane\n ELT_DIAM = 39\n MILIARCSECS_IN_A_RAD = 206265000\n pix_rad_x = pix_scale_x / MILIARCSECS_IN_A_RAD # radians / pixel\n pix_rad_y = pix_scale_y / MILIARCSECS_IN_A_RAD\n RHO_APER_x = pix_rad_x * ELT_DIAM / (wavelength * 1e-6)\n RHO_APER_y = pix_rad_y * ELT_DIAM / (wavelength * 1e-6)\n RHO_OBSC_x = 0.30 * RHO_APER_x # ELT central obscuration\n RHO_OBSC_y = 0.30 * RHO_APER_y # ELT central obscuration\n\n # Sanity check\n PIX_RAD_x = RHO_APER_x * wavelength / ELT_DIAM * 1e-6\n PIX_RAD_y = RHO_APER_y * wavelength / ELT_DIAM * 1e-6\n PIX_MAS_x = PIX_RAD_x * MILIARCSECS_IN_A_RAD\n PIX_MAS_y = PIX_RAD_y * MILIARCSECS_IN_A_RAD\n\n # Define the ELT pupil mask. Note that we use a central obscuration too\n N = 2048\n x = np.linspace(-1, 1, N)\n xx, yy = np.meshgrid(x, x)\n\n # To get the anamorphic scaling we define the equation for an ellipse\n rho = np.sqrt((xx / RHO_APER_x) ** 2 + (yy / RHO_APER_y) ** 2)\n\n # (1) Propagate to the Image Slicer Focal plane\n elt_mask = (RHO_OBSC_x / RHO_APER_x < rho) & (rho < 1.0)\n pupil = elt_mask * np.exp(1j * elt_mask)\n image_electric = fftshift(fft2(pupil))\n\n if surface == 'IS':\n # print(\"IS\")\n # We are already at the Image Slicer, don't do anything else\n min_pix, max_pix = N // 2 - N_window // 2, N // 2 + N_window // 2\n final_psf = (np.abs(image_electric))**2\n final_psf /= np.max(final_psf)\n crop_psf = final_psf[min_pix:max_pix, min_pix:max_pix]\n\n elif surface == 'DET':\n # print(\"DET\")\n # (1.1) Add slicer effect by masking\n # We mask the PSF covering a band of size 1x SPAXEL, depending on the scale\n # If we have 4x4 mas, then we cover a band of 4 mas over the PSF\n x_min, x_max = -N/2 * PIX_MAS_x, N/2 * PIX_MAS_x\n y_min, y_max = -N/2 * PIX_MAS_y, N/2 * PIX_MAS_y\n x_slice = np.linspace(x_min, x_max, N, endpoint=True)\n y_slice = np.linspace(y_min, y_max, N, endpoint=True)\n x_grid, y_grid = np.meshgrid(x_slice, y_slice)\n slicer_mask = np.abs(y_grid) < scale_mas / 2\n\n # ## Show the PSF both in [mas] space where it should be circular and in [pixel] space where it should be anamorphic\n # fig, ax = plt.subplots(1, 1)\n # img1 = ax.imshow((np.abs(image_electric))**2, extent=[x_min, x_max, y_min, y_max], cmap='bwr')\n # # plt.colorbar(img1, ax=ax)\n # ax.set_title(r'Airy Pattern | %.1f mas scale | Wavelength: %.3f $\\mu$m' % (scale_mas, wavelength))\n # ax.set_xlabel(r'X [mas]')\n # ax.set_ylabel(r'Y [mas]')\n # ax.set_xlim([-10, 10])\n # ax.set_ylim([-10, 10])\n #\n # fig, ax = plt.subplots(1, 1)\n # img1 = ax.imshow((np.abs(image_electric))**2, extent=[-N/2, N/2, -N/2, N/2], cmap='bwr')\n # ax.set_title(r'Airy Pattern | %.1f mas scale | Wavelength: %.3f $\\mu$m' % (scale_mas, wavelength))\n # ax.set_xlabel(r'Pixels [ ]')\n # ax.set_ylabel(r'Pixels [ ]')\n # ax.set_xlim([-100, 100])\n # ax.set_ylim([-100, 100])\n\n # plt.show()\n\n # (2) Propagate the masked electric field to Pupil Plane\n pup_grating = ifft2(fftshift(slicer_mask * image_electric))\n # (2.1) Add pupil mask, this time without the central obscuration\n aperture_mask = rho < 1.0\n\n # (3) Propagate back to Focal Plane\n final_focal = fftshift(fft2(aperture_mask * pup_grating))\n final_psf = (np.abs(final_focal))**2\n final_psf /= np.max(final_psf)\n\n # (4) Crop the PSF to fit to the necessary window to ease the convolutions\n min_pix, max_pix = N//2 - N_window//2, N//2 + N_window//2\n crop_psf = final_psf[min_pix:max_pix, min_pix:max_pix]\n\n # If we want to show the plots for Documentation\n\n # fig, (ax1, ax2, ax3) = plt.subplots(1, 3)\n # psf_airy = (np.abs(image_electric))**2\n # img1 = ax1.imshow(psf_airy, extent=[x_min, x_max, y_min, y_max], cmap='bwr')\n # ax1.axhline(y=scale_mas/2, linestyle='--', color='black')\n # ax1.axhline(y=-scale_mas/2, linestyle='--', color='black')\n # ax1.set_xlabel(r'X [mas]')\n # ax1.set_ylabel(r'Y [mas]')\n # ax1.set_xlim([-15, 15])\n # ax1.set_ylim([-15, 15])\n # ax1.set_title(r'Airy Pattern | Slicer Mask %.1f mas' % scale_mas)\n #\n # img2 = ax2.imshow(aperture_mask * (np.abs(pup_grating)**2), extent=[-1, 1, -1, 1], cmap='bwr')\n # ax2.set_title(r'Pupil Plane | Aperture Mask')\n # ax2.set_xlim([-0.25, 0.25])\n # ax2.set_ylim([-0.25, 0.25])\n #\n # img3 = ax3.imshow(final_psf, extent=[x_min, x_max, y_min, y_max], cmap='bwr')\n # ax3.set_xlabel(r'X [mas]')\n # ax3.set_ylabel(r'Y [mas]')\n # ax3.set_xlim([-15, 15])\n # ax3.set_ylim([-15, 15])\n # ax3.set_title(r'Diffraction Effects')\n # plt.show()\n\n return crop_psf", "def weak_lensing_kernel(cosmo, pzs, z, ell):\n z = np.atleast_1d(z)\n zmax = max([pz.zmax for pz in pzs])\n # Retrieve comoving distance corresponding to z\n chi = bkgrd.radial_comoving_distance(cosmo, z2a(z))\n\n # Extract the indices of pzs that can be treated as extended distributions,\n # and the ones that need to be treated as delta functions.\n pzs_extended_idx = [\n i for i, pz in enumerate(pzs) if not isinstance(pz, rds.delta_nz)\n ]\n pzs_delta_idx = [i for i, pz in enumerate(pzs) if isinstance(pz, rds.delta_nz)]\n # Here we define a permutation that would put all extended pzs at the begining of the list\n perm = pzs_extended_idx + pzs_delta_idx\n # Compute inverse permutation\n inv = np.argsort(np.array(perm, dtype=np.int32))\n\n # Process extended distributions, if any\n radial_kernels = []\n if len(pzs_extended_idx) > 0:\n\n @vmap\n def integrand(z_prime):\n chi_prime = bkgrd.radial_comoving_distance(cosmo, z2a(z_prime))\n # Stack the dndz of all redshift bins\n dndz = np.stack([pzs[i](z_prime) for i in pzs_extended_idx], axis=0)\n return dndz * np.clip(chi_prime - chi, 0) / np.clip(chi_prime, 1.0)\n\n radial_kernels.append(simps(integrand, z, zmax, 256) * (1.0 + z) * chi)\n # Process single plane redshifts if any\n if len(pzs_delta_idx) > 0:\n\n @vmap\n def integrand_single(z_prime):\n chi_prime = bkgrd.radial_comoving_distance(cosmo, z2a(z_prime))\n return np.clip(chi_prime - chi, 0) / np.clip(chi_prime, 1.0)\n\n radial_kernels.append(\n integrand_single(np.array([pzs[i].params[0] for i in pzs_delta_idx]))\n * (1.0 + z)\n * chi\n )\n # Fusing the results together\n radial_kernel = np.concatenate(radial_kernels, axis=0)\n # And perfoming inverse permutation to put all the indices where they should be\n radial_kernel = radial_kernel[inv]\n\n # Constant term\n constant_factor = 3.0 * const.H0 ** 2 * cosmo.Omega_m / 2.0 / const.c\n # Ell dependent factor\n ell_factor = np.sqrt((ell - 1) * (ell) * (ell + 1) * (ell + 2)) / (ell + 0.5) ** 2\n return constant_factor * ell_factor * radial_kernel", "def div(\n coeff_rho,\n momentum_x,\n momentum_y,\n momentum_z,\n ):\n # Compute the fourth order derivative of the pressure for the face\n # velocity correction.\n p_corr = (\n states['p']\n if self._params.enable_rhie_chow_correction else states['dp'])\n d4p_dx4 = self._kernel_op.apply_kernel_op_x(p_corr, 'k4d2x')\n d4p_dy4 = self._kernel_op.apply_kernel_op_y(p_corr, 'k4d2y')\n d4p_dz4 = self._kernel_op.apply_kernel_op_z(p_corr, 'k4d2z',\n 'k4d2zsh')\n\n # Compute velocity gradient based on interpolated values on cell faces.\n coeff_x = dt / (4. * coeff_rho * dx**2)\n du = self._kernel_op.apply_kernel_op_x(momentum_x, 'kDx')\n du_dx = [\n du_i / (2. * dx) + coeff_x * d4p_dx4_i\n for du_i, d4p_dx4_i in zip(du, d4p_dx4)\n ]\n\n coeff_y = dt / (4. * coeff_rho * dy**2)\n dv = self._kernel_op.apply_kernel_op_y(momentum_y, 'kDy')\n dv_dy = [\n dv_i / (2. * dy) + coeff_y * d4p_dy4_i\n for dv_i, d4p_dy4_i in zip(dv, d4p_dy4)\n ]\n\n coeff_z = dt / (4. * coeff_rho * dz**2)\n dw = self._kernel_op.apply_kernel_op_z(momentum_z, 'kDz', 'kDzsh')\n dw_dz = [\n dw_i / (2. * dz) + coeff_z * d4p_dz4_i\n for dw_i, d4p_dz4_i in zip(dw, d4p_dz4)\n ]\n\n return [\n du_dx_i + dv_dy_i + dw_dz_i\n for du_dx_i, dv_dy_i, dw_dz_i in zip(du_dx, dv_dy, dw_dz)\n ]", "def kelvin_effect(pres, surft, temp, mw_ba, dcell):\n volm = mw_ba/1e3 # approximation: using density 1000 kg/m3\n return pres*exp(-4*surft*volm/(dcell*gas_constant*temp))", "def test_double_ended_wls_fix_gamma_estimate_synthetic():\n from dtscalibration import DataStore\n\n cable_len = 100.0\n nt = 500\n time = np.arange(nt)\n x = np.linspace(0.0, cable_len, 100)\n ts_cold = np.ones(nt) * 4.0\n ts_warm = np.ones(nt) * 20.0\n\n C_p = 15246\n C_m = 2400.0\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask = x < 0.5 * cable_len\n warm_mask = np.invert(cold_mask) # == False\n temp_real = np.ones((len(x), nt))\n temp_real[cold_mask] *= ts_cold + 273.15\n temp_real[warm_mask] *= ts_warm + 273.15\n\n st = (\n C_p\n * np.exp(-(dalpha_r + dalpha_p) * x[:, None])\n * np.exp(gamma / temp_real)\n / (np.exp(gamma / temp_real) - 1)\n )\n ast = (\n C_m\n * np.exp(-(dalpha_r + dalpha_m) * x[:, None])\n / (np.exp(gamma / temp_real) - 1)\n )\n rst = (\n C_p\n * np.exp(-(dalpha_r + dalpha_p) * (-x[:, None] + cable_len))\n * np.exp(gamma / temp_real)\n / (np.exp(gamma / temp_real) - 1)\n )\n rast = (\n C_m\n * np.exp(-(dalpha_r + dalpha_m) * (-x[:, None] + cable_len))\n / (np.exp(gamma / temp_real) - 1)\n )\n\n alpha = np.mean(np.log(rst / rast) - np.log(st / ast), axis=1) / 2\n alpha -= alpha[0] # the first x-index is where to start counting\n dalpha = dalpha_p - dalpha_m\n alpha2 = x * dalpha\n\n # to ensure the st, rst, ast, rast were correctly defined.\n np.testing.assert_allclose(alpha2, alpha, atol=1e-15, rtol=0)\n\n ds = DataStore(\n {\n \"st\": ([\"x\", \"time\"], st),\n \"ast\": ([\"x\", \"time\"], ast),\n \"rst\": ([\"x\", \"time\"], rst),\n \"rast\": ([\"x\", \"time\"], rast),\n \"userAcquisitionTimeFW\": ([\"time\"], np.ones(nt)),\n \"userAcquisitionTimeBW\": ([\"time\"], np.ones(nt)),\n \"cold\": ([\"time\"], ts_cold),\n \"warm\": ([\"time\"], ts_warm),\n },\n coords={\"x\": x, \"time\": time},\n attrs={\"isDoubleEnded\": \"1\"},\n )\n\n sections = {\n \"cold\": [slice(0.0, 0.35 * cable_len)],\n \"warm\": [slice(0.67 * cable_len, cable_len)],\n }\n\n # WLS\n ds.calibration_double_ended(\n sections=sections,\n st_var=1e-12,\n ast_var=1e-12,\n rst_var=1e-12,\n rast_var=1e-12,\n method=\"wls\",\n solver=\"sparse\",\n fix_gamma=(gamma, 0.0),\n )\n\n assert_almost_equal_verbose(ds.gamma.values, gamma, decimal=18)\n assert_almost_equal_verbose(ds.alpha.values, alpha, decimal=9)\n assert_almost_equal_verbose(ds.tmpf.values, temp_real - 273.15, decimal=6)\n assert_almost_equal_verbose(ds.tmpb.values, temp_real - 273.15, decimal=6)\n assert_almost_equal_verbose(ds.tmpw.values, temp_real - 273.15, decimal=6)\n\n pass", "def solid_surface_density_CL2013_given_observed_catalog(sss_per_sys, max_core_mass=10.):\n a_obs_per_sys = gen.a_from_P(sss_per_sys['P_obs'], sss_per_sys['Mstar_obs'][:,None])\n a_obs = a_obs_per_sys[sss_per_sys['P_obs'] > 0]\n radii_obs = sss_per_sys['radii_obs'][sss_per_sys['P_obs'] > 0]\n core_mass_obs = generate_planet_mass_from_radius_Ning2018_table_above_lognormal_mass_earthlike_rocky_below_vec(radii_obs)\n core_mass_obs[core_mass_obs > max_core_mass] = max_core_mass\n sigma_obs = solid_surface_density_CL2013(core_mass_obs, a_obs)\n return sigma_obs, core_mass_obs, a_obs", "def main():\n \n # Particle in SHO - c.f. Mocz & Succi (2015) Fig. 2\n # parameters\n n = 100 # number of particles\n dt = 0.02 # timestep\n nt = 100 # number of timesteps\n nt_setup = 400 # number of timesteps to set up simulation\n n_out = 25 # plot solution every nout steps\n b = 4 # velocity damping for acquiring initial condition\n m = 1/n # mass of SPH particle ( m * n = 1 normalizes |wavefunction|^2 to 1)\n h = 40/n # smoothing length\n t = 0. # time\n\n # plot potential\n xx = np.linspace(-4.0, 4.0, num=400)\n xx = np.reshape(xx,(xx.size,1))\n fig = plt.plot(xx, 0.5*xx**2, linewidth=5, color=[0.7, 0.7, 0.9])\n \n # initialize\n x = np.linspace(-3.0, 3.0, num=n)\n x = np.reshape(x,(n,1))\n u = np.zeros((n,1))\n \n rho = density( x, m, h )\n P = pressure( x, rho, m, h )\n a = acceleration( x, u, m, rho, P, b, h )\n\n # get v at t=-0.5*dt for the leap frog integrator using Euler's method\n u_mhalf = u - 0.5 * dt * a\n\n # main loop (time evolution)\n for i in np.arange(-nt_setup, nt): # negative time (t<0, i<0) is used to set up initial conditions\n\n # leap frog\n u_phalf = u_mhalf + a*dt\n x = x + u_phalf*dt\n u = 0.5*(u_mhalf+u_phalf)\n u_mhalf = u_phalf\n if (i >= 0):\n t = t + dt\n print(\"%.2f\" % t)\n \n if (i == -1 ): # switch off damping before t=0\n u = np.zeros((n,1)) + 1.0\n u_mhalf = u\n b = 0 # switch off damping at time t=0\n \n # update densities, pressures, accelerations\n rho = density( x, m, h )\n P = pressure( x, rho, m, h )\n a = acceleration( x, u, m, rho, P, b, h)\n \n # plot solution every n_out steps\n if( (i >= 0) and (i % n_out) == 0 ):\n xx = np.linspace(-4.0, 4.0, num=400)\n xx = np.reshape(xx,(xx.size,1))\n rr = probeDensity(x, m, h, xx)\n rr_exact = 1./np.sqrt(np.pi) * np.exp(-(xx-np.sin(t))**2/2.)**2\n fig = plt.plot(xx, rr_exact, linewidth=2, color=[.6, .6, .6])\n fig = plt.plot(xx, rr, linewidth=2, color=[1.*i/nt, 0, 1.-1.*i/nt], label='$t='+\"%.2f\" % t +'$')\n # plot the t<0 damping process for fun\n if( i==-nt_setup or i==-nt_setup*3/4 or i==-nt_setup/2 ):\n xx = np.linspace(-4.0, 4.0, num=400)\n xx = np.reshape(xx,(xx.size,1))\n rr = probeDensity(x, m, h, xx)\n fig = plt.plot(xx, rr, linewidth=1, color=[0.9, 0.9, 0.9])\n \n plt.legend()\n plt.xlabel('$x$')\n plt.ylabel('$|\\psi|^2$')\n plt.axis([-2, 4, 0, 0.8])\n plt.savefig('solution.pdf', aspect = 'normal', bbox_inches='tight', pad_inches = 0)\n plt.close()", "def contract_tenors(self):\n\n\tself.r_outer_r[:,:,0,1,:] = self.r_outer_r[:,:,0,1,:]/(1. - self.k_dot_r[0,1,:])\n\tself.r_outer_r[:,:,0,2,:] = self.r_outer_r[:,:,0,2,:]/(1. - self.k_dot_r[0,2,:])\n\t\n\tself.r_outer_r[:,:,1,0,:] = self.r_outer_r[:,:,1,0,:]/(1. - self.k_dot_r[1,0,:])\n\tself.r_outer_r[:,:,1,2,:] = self.r_outer_r[:,:,1,2,:]/(1. - self.k_dot_r[1,2,:])\n\t\n\tself.r_outer_r[:,:,2,0,:] = self.r_outer_r[:,:,2,0,:]/(1. - self.k_dot_r[2,0,:])\n\tself.r_outer_r[:,:,2,1,:] = self.r_outer_r[:,:,2,1,:]/(1. - self.k_dot_r[2,1,:])\n\n\tself.delta_l = np.zeros((3,3,self.N),dtype=np.complex_)\n \n\tself.delta_l[0,1,:] = get_l(self,0,1)\n\tself.delta_l[1,0,:] = get_l(self,1,0)\n\t\n\tself.delta_l[0,2,:] = get_l(self,0,2)\n\tself.delta_l[2,0,:] = get_l(self,2,0)\n\t\n\tself.delta_l[1,2,:] = get_l(self,1,2)\n\tself.delta_l[2,1,:] = get_l(self,2,1)\n \n\treturn", "def calculate_godunov_fluxes(densities, pressures, vel_x, vel_y, gamma):\n density_fluxes = np.zeros((densities.shape[0] - 1, densities.shape[1] - 1, 2))\n momentum_flux_x = np.zeros(density_fluxes.shape)\n momentum_flux_y = np.zeros(density_fluxes.shape)\n total_energy_fluxes = np.zeros(density_fluxes.shape)\n\n i_length, j_length = np.shape(densities)\n for i in range(i_length - 1):\n for j in range(j_length - 1):\n solver = IterativeRiemannSolver()\n\n # Generate left and right states from cell averaged values\n left_state = ThermodynamicState1D(pressures[i, j], densities[i, j], vel_x[i, j], gamma[i, j])\n right_state = ThermodynamicState1D(pressures[i + 1, j], densities[i + 1, j], vel_x[i + 1, j], gamma[i + 1, j])\n\n # Solve Riemann problem for star states\n p_star, u_star = solver.get_star_states(left_state, right_state)\n\n # Calculate fluxes using solver sample function\n p_flux, u_flux, rho_flux, is_left = solver.sample(0.0, left_state, right_state, p_star, u_star)\n\n # Store fluxes in array\n v_y = vel_y[i, j] if is_left else vel_y[i + 1, j]\n flux_gamma = left_state.gamma if is_left else right_state.gamma\n density_fluxes[i, j - 1, 0] = rho_flux * u_flux\n momentum_flux_x[i, j - 1, 0] = rho_flux * u_flux * u_flux + p_flux\n momentum_flux_y[i, j - 1, 0] = rho_flux * u_flux * v_y\n e_tot = p_flux / (flux_gamma - 1) + 0.5 * rho_flux * u_flux * u_flux + 0.5 * rho_flux * v_y ** 2\n total_energy_fluxes[i, j - 1, 0] = (p_flux + e_tot) * u_flux\n\n # Generate left and right states from cell averaged values\n left_state = ThermodynamicState1D(pressures[i, j], densities[i, j], vel_y[i, j], gamma[i, j])\n right_state = ThermodynamicState1D(pressures[i, j + 1], densities[i, j + 1], vel_y[i, j + 1], gamma[i, j + 1])\n\n # Solve Riemann problem for star states\n p_star, v_star = solver.get_star_states(left_state, right_state)\n\n # Calculate fluxes using solver sample function\n p_flux, v_flux, rho_flux, is_left = solver.sample(0.0, left_state, right_state, p_star, v_star)\n\n # Store fluxes in array\n v_x = vel_x[i, j] if is_left else vel_x[i, j + 1]\n flux_gamma = left_state.gamma if is_left else right_state.gamma\n density_fluxes[i - 1, j, 1] = rho_flux * v_flux\n momentum_flux_x[i - 1, j, 1] = rho_flux * v_x * v_flux\n momentum_flux_y[i - 1, j, 1] = rho_flux * v_flux * v_flux + p_flux\n e_tot = p_flux / (flux_gamma - 1) + 0.5 * rho_flux * v_flux * v_flux + 0.5 * rho_flux * v_x ** 2\n total_energy_fluxes[i - 1, j, 1] = (p_flux + e_tot) * v_flux\n\n return density_fluxes, momentum_flux_x, momentum_flux_y, total_energy_fluxes", "def run_grav(self):\n\n # Solucao direta\n self.prod_w = []\n self.prod_o = []\n t0 = time.time()\n # self.set_volumes_in_primal()\n self.set_sat_in()\n self.set_lamb_2()\n self.set_global_problem_vf_3_gr1_bif()\n self.Pf = self.solve_linear_problem(self.trans_fine, self.b, len(self.all_fine_vols_ic))\n self.organize_Pf()\n del self.Pf\n self.mb.tag_set_data(self.pf_tag, self.all_fine_vols, np.asarray(self.Pf_all))\n del self.Pf_all\n self.test_conservation_fine()\n # self.store_flux_pf_gr_bif = self.create_flux_vector_pf_gr_bif_1()\n\n \"\"\"\n ################################################################\n # Solucao Multiescala\n self.calculate_restriction_op_2()\n self.calculate_prolongation_op_het()\n self.organize_op()\n self.Tc = self.modificar_matriz(self.pymultimat(self.pymultimat(self.trilOR, self.trans_fine, self.nf_ic), self.trilOP, self.nf_ic), self.nc, self.nc)\n self.Qc = self.modificar_vetor(self.multimat_vector(self.trilOR, self.nf_ic, self.b), self.nc)\n self.Pc = self.solve_linear_problem(self.Tc, self.Qc, self.nc)\n self.set_Pc()\n self.Pms = self.multimat_vector(self.trilOP, self.nf_ic, self.Pc)\n\n del self.trilOP\n del self.trilOR\n del self.Tc\n del self.Qc\n del self.Pc\n\n self.organize_Pms()\n del self.Pms\n self.mb.tag_set_data(self.pms_tag, self.all_fine_vols, np.asarray(self.Pms_all))\n del self.Pms_all\n self.erro()\n\n self.test_conservation_coarse_gr()\n # self.Neuman_problem_6_gr()\n # self.store_flux_pms_gr = self.create_flux_vector_pms_gr()\n ####################################################################\n \"\"\"\n\n\n\n\n\n\n\n print('acaboooou')\n self.mb.write_file('new_out_bif_gr.vtk')\n\n\n shutil.copytree(self.caminho1, self.pasta)", "def excitation_force(w, draft, radius, water_depth):\n k = w**2 / 9.81\n ka = k * radius\n kd = k * draft\n kh = k * water_depth\n\n rho = 1025\n g = 9.81\n\n # XXX check this!\n f1 = -1j * (jn(1, ka) - jnd(1, ka) * hankel2(1, ka) / hankel2d(1, ka))\n #f1 = -1j * (jn(1, ka) - jnd(1, ka) * hankel1(1, ka) / hankel1d(1, ka))\n M = (kd*sinh(kh-kd) + cosh(kh-kd) - cosh(kh)) / (k**2 * cosh(kh))\n F = (-sinh(kh-kd) + sinh(kh)) / (k * cosh(kh))\n\n zs = zeros_like(F, dtype=np.complex)\n X = np.c_[F, zs, zs, zs, M, zs]\n X *= (-rho * g * pi * radius) * 2 * f1[:, newaxis]\n\n return X", "def fdspring(xy, v, NL, KL, BM, Mm, beta):\n NP, nn = np.shape(NL)\n if np.shape(xy)[1] == 2:\n '''2D version'''\n vecx = np.array([[KL[i, j] * (xy[i, 0] - xy[NL[i, j], 0]) for j in range(nn)] for i in range(NP)])\n vecy = np.array([[KL[i, j] * (xy[i, 1] - xy[NL[i, j], 1]) for j in range(nn)] for i in range(NP)])\n mag = np.sqrt(vecx ** 2 + vecy ** 2)\n # KLnoz = KL.copy() #no zeros\n # KLnoz[KLnoz ==0] = 1. --> same value as mag[mag==0], so that stretch=0 for those\n stretch = mag - BM\n mag[mag == 0.] = 1. # avoid divide by zero error\n # print(stretch)\n dxvec = np.sum(stretch * vecx / mag, axis=-1) / Mm\n dyvec = np.sum(stretch * vecy / mag, axis=-1) / Mm\n # damping term\n damp_dv = np.array([beta / Mm[i] * v[i] for i in range(NP)])\n # add them up\n ftx = -np.hstack((dxvec.reshape(NP, 1), dyvec.reshape(NP, 1))) - damp_dv\n else:\n '''3D version'''\n vecx = np.array([[KL[i, j] * (xy[i, 0] - xy[NL[i, j], 0]) for j in range(nn)] for i in range(NP)])\n vecy = np.array([[KL[i, j] * (xy[i, 1] - xy[NL[i, j], 1]) for j in range(nn)] for i in range(NP)])\n vecz = np.array([[KL[i, j] * (xy[i, 2] - xy[NL[i, j], 2]) for j in range(nn)] for i in range(NP)])\n mag = np.sqrt(vecx ** 2 + vecy ** 2 + vecz ** 2)\n # KLnoz = KL.copy() #no zeros\n # KLnoz[KLnoz ==0] = 1. #same value as mag[mag==0], so that stretch=0 for those\n stretch = mag - BM\n mag[mag == 0.] = 1. # avoid divide by zero error\n dxvec = np.sum(stretch * vecx / mag, axis=-1) / Mm\n dyvec = np.sum(stretch * vecy / mag, axis=-1) / Mm\n dzvec = np.sum(stretch * vecz / mag, axis=-1) / Mm\n # damping term\n damp_dv = np.array([beta / Mm[i] * v[i] for i in range(NP)])\n # add them up\n ftx = -np.hstack((dxvec.reshape(NP, 1), dyvec.reshape(NP, 1), dyvec.reshape(NP, 1))) - damp_dv\n return ftx", "def Schechter_M_z(M, redshift, richness):\n\treturn 0.4 * n.log(10.) * 10**logPhi_evol(redshift, richness) * 10**(0.4 * (M_s_evol(redshift, richness) - M) * (alpha_evol(redshift, richness) + 1)) * n.e**( -10** ( 0.4 * (M_s_evol(redshift,richness) - M)))", "def heat_vaporization_func(ts):\n heat_vaporization = np.copy(ts).astype(np.float64)\n heat_vaporization -= 273.15\n heat_vaporization *= -0.00236\n heat_vaporization += 2.501\n heat_vaporization *= 1E6\n return heat_vaporization.astype(np.float32)", "def get_sn2005ek(colorplt=False):\n z = 0.016551\n ebv = 0.210\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n t_max = 53639.9\n print (\"adopt r band t_max from Drout+13\")\n \n # tb = pd.read_csv('/Users/yuhanyao/Desktop/ZTF18abfcmjw/data/Drout2013/table1', sep='\\t')\n # tb = tb.drop(columns=[\"Unnamed: 6\"])\n \n mjds = np.array([53639.3, 53640.3, 53641.3, 53642.2, 53643.2, 53645.3,\n 53646.5, 53648.0, 53649.2, 53650.4, 53651.3, 53652.5,\n 53654.2, 53655.2, 53656.2, 53657.2])\n \n Bmags = np.array([18.25, 18.38, 18.65, np.nan, 19.10, 19.71,\n 20.07, np.nan, 20.67, 20.90, 21.05, np.nan,\n 21.74, np.nan, np.nan, np.nan])\n \n Bmag_uncs = np.array([0.02, 0.03, 0.02, np.nan, 0.05, 0.07, \n 0.07, np.nan, 0.04, 0.04, 0.04, np.nan,\n 0.12, np.nan, np.nan, np.nan])\n \n Vmags = np.array([17.83, 18.03, 17.92, np.nan, 18.24, 18.66,\n 18.93, 19.48, 19.63, 19.86, 19.98, 20.35,\n 20.60, 20.74, 20.88, 21.22])\n \n Vmag_uncs = np.array([0.02, 0.03, 0.01, np.nan, 0.02, 0.02,\n 0.02, 0.06, 0.03, 0.03, 0.04, 0.05, \n 0.08, 0.10, 0.08, 0.13])\n \n Rmags = np.array([17.46, 17.41, 17.60, 17.69, 17.86, 18.18, \n np.nan, 18.83, 19.03, 19.26, 19.48, 19.75,\n 20.08, np.nan, 20.47, np.nan])\n \n Rmag_uncs = np.array([0.01, 0.02, 0.01, 0.02, 0.01, 0.01,\n np.nan, 0.03, 0.02, 0.02, 0.02, 0.04,\n 0.05, np.nan, 0.08, np.nan])\n\n Imags = np.array([17.20, 17.13, 17.18, np.nan, 17.47, 17.71, \n np.nan, 18.13, 18.26, 18.51, 18.61, 18.74, \n 19.01, np.nan, 19.47, np.nan])\n \n Imag_uncs = np.array([0.02, 0.04, 0.02, np.nan, 0.03, 0.02,\n np.nan, 0.06, 0.02, 0.02, 0.02, 0.03,\n 0.05, np.nan, 0.06, np.nan])\n \n mymjds = np.hstack([mjds, mjds, mjds, mjds])\n mymags = np.hstack([Bmags, Vmags, Rmags, Imags])\n myemags = np.hstack([Bmag_uncs, Vmag_uncs, Rmag_uncs, Imag_uncs])\n myfilts = np.hstack([ np.repeat(\"B\", len(Bmags)),\n np.repeat(\"V\", len(Bmags)),\n np.repeat(\"R\", len(Rmags)),\n np.repeat(\"I\", len(Imags)) ])\n ix = ~np.isnan(mymags)\n tb = pd.DataFrame({'mjd': mymjds[ix],\n 'mag': mymags[ix],\n 'emag': myemags[ix],\n \"filter\": myfilts[ix]})\n \n ixB = tb['filter'].values==\"B\"\n ixV = tb['filter'].values==\"V\"\n ixR = tb['filter'].values==\"R\"\n ixI = tb['filter'].values==\"I\"\n \n tb['wave'] = np.zeros(len(tb))\n tb['wave'].values[ixB] = 4359\n tb['wave'].values[ixV] = 5430\n tb['wave'].values[ixR] = 6349\n tb['wave'].values[ixI] = 8797\n \n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)\n if colorplt==False:\n return tb\n else:\n tb = add_datecol(tb)\n ix = np.in1d(tb[\"filter\"].values, np.array(['B', 'R', 'I']))\n tb = tb[ix]\n\n dates = get_date_span(tb)\n datesave = []\n for i in range(len(dates)):\n x = dates[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n if len(tbsub)!=0:\n flts = tbsub['filter'].values\n if \"R\" in flts and np.sum(np.unique(flts))!=1:\n datesave.append(x)\n datesave = np.array(datesave)\n \n mcolor = []\n mcolor_unc = []\n mjds = []\n colorname = []\n for i in range(len(datesave)):\n x = datesave[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n gtb = tbsub[tbsub[\"filter\"].values==\"B\"]\n rtb = tbsub[tbsub[\"filter\"].values==\"R\"]\n itb = tbsub[tbsub[\"filter\"].values==\"I\"]\n if len(gtb)!=0:\n gmjds = gtb[\"mjd\"].values\n gmags = gtb[\"mag0\"].values\n gemags = gtb[\"emag\"].values\n gwtgs = 1/gemags**2\n gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)\n gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)\n gemag = 1/ np.sqrt(np.sum(gwtgs))\n if len(rtb)!=0:\n rmjds = rtb[\"mjd\"].values\n rmags = rtb[\"mag0\"].values\n remags = rtb[\"emag\"].values\n rwtgs = 1/remags**2\n rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)\n rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)\n remag = 1/ np.sqrt(np.sum(rwtgs))\n if len(itb)!=0:\n imjds = itb[\"mjd\"].values\n imags = itb[\"mag0\"].values\n iemags = itb[\"emag\"].values\n iwtgs = 1/iemags**2\n imag = np.sum(imags * iwtgs) / np.sum(iwtgs)\n imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)\n iemag = 1/ np.sqrt(np.sum(iwtgs))\n if len(gtb)!=0 and len(rtb)!=0:\n mcolor.append(gmag - rmag)\n mjds.append( 0.5 * (gmjd + rmjd) )\n mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )\n colorname.append(\"BmR\")\n if len(rtb)!=0 and len(itb)!=0:\n mcolor.append(rmag - imag)\n mjds.append( 0.5 * (rmjd + imjd) )\n mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )\n colorname.append(\"RmI\")\n \n ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],\n names = [\"mjd\", \"c\", \"ec\", \"cname\"])\n \n ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)\n ctb = ctb.to_pandas()\n return ctb", "def build_rhs():\n\n def div(\n coeff_rho,\n momentum_x,\n momentum_y,\n momentum_z,\n ):\n \"\"\"Computes the divergence of the velocity field.\"\"\"\n # Compute the fourth order derivative of the pressure for the face\n # velocity correction.\n p_corr = (\n states['p']\n if self._params.enable_rhie_chow_correction else states['dp'])\n d4p_dx4 = self._kernel_op.apply_kernel_op_x(p_corr, 'k4d2x')\n d4p_dy4 = self._kernel_op.apply_kernel_op_y(p_corr, 'k4d2y')\n d4p_dz4 = self._kernel_op.apply_kernel_op_z(p_corr, 'k4d2z',\n 'k4d2zsh')\n\n # Compute velocity gradient based on interpolated values on cell faces.\n coeff_x = dt / (4. * coeff_rho * dx**2)\n du = self._kernel_op.apply_kernel_op_x(momentum_x, 'kDx')\n du_dx = [\n du_i / (2. * dx) + coeff_x * d4p_dx4_i\n for du_i, d4p_dx4_i in zip(du, d4p_dx4)\n ]\n\n coeff_y = dt / (4. * coeff_rho * dy**2)\n dv = self._kernel_op.apply_kernel_op_y(momentum_y, 'kDy')\n dv_dy = [\n dv_i / (2. * dy) + coeff_y * d4p_dy4_i\n for dv_i, d4p_dy4_i in zip(dv, d4p_dy4)\n ]\n\n coeff_z = dt / (4. * coeff_rho * dz**2)\n dw = self._kernel_op.apply_kernel_op_z(momentum_z, 'kDz', 'kDzsh')\n dw_dz = [\n dw_i / (2. * dz) + coeff_z * d4p_dz4_i\n for dw_i, d4p_dz4_i in zip(dw, d4p_dz4)\n ]\n\n return [\n du_dx_i + dv_dy_i + dw_dz_i\n for du_dx_i, dv_dy_i, dw_dz_i in zip(du_dx, dv_dy, dw_dz)\n ]\n\n def add_factor(\n v,\n factor,\n ):\n return [factor * v_i for v_i in v]\n\n b_terms = {\n _B_TERM_SOURCE_RHO: add_factor(src_rho, inv_dt),\n }\n if isinstance(rho_info, ConstantDensityInfo):\n b_terms.update({\n _B_TERM_DIV:\n add_factor(\n div(rho_info.rho, states['u'], states['v'], states['w']),\n inv_dt * rho_info.rho),\n _B_TERM_DRHO_DT: [\n tf.zeros_like(src_rho_i) for src_rho_i in src_rho\n ],\n })\n\n elif isinstance(rho_info, VariableDensityInfo):\n b_terms.update({\n _B_TERM_DIV:\n add_factor(\n div(1.0, states['rho_u'], states['rho_v'], states['rho_w']),\n inv_dt),\n _B_TERM_DRHO_DT:\n add_factor(rho_info.drho_dt, inv_dt),\n })\n\n else:\n raise ValueError('`rho_info` has to be either `ConstantDensityInfo` or '\n '`VariableDensityInfo`.')\n\n # pylint: disable=g-complex-comprehension\n return [(div_i + drho_dt_i - src_rho_i)\n for div_i, drho_dt_i, src_rho_i in zip(\n b_terms[_B_TERM_DIV],\n b_terms[_B_TERM_DRHO_DT],\n b_terms[_B_TERM_SOURCE_RHO],\n )], b_terms\n # pylint: enable=g-complex-comprehension", "def carsurf_loop(config):\n site_names = config[\"site_names\"][0]\n print(site_names)\n total_sites = len(site_names)\n\n dx = config[\"dx\"].copy()\n\n # how many hours back should footprints be calculated?\n # roughly, how long should time_before_observation be?\n length = config[\"length\"].copy()\n # I think this is the right way to adjust it\n # We have particle releases at the beginning and end of the `OBS_WINDOW`\n # both of these will need `length` bins back to put observations in\n footprint_nbins = math.ceil((length + OBS_WINDOW) / FLUX_WINDOW)\n # how many days before the first day of the month the simulation goes\n # how far back did LPD calculate trajectories?\n lag = int(config[\"lag\"])\n\n site_alt = config[\"alt\"][0].copy()\n site_lon = config[\"lon\"][0].copy()\n site_lat = config[\"lat\"][0].copy()\n\n dimx = int(config[\"dimx\"])\n dimy = int(config[\"dimy\"])\n\n days_tot = config[\"num_days\"]\n\n out_dir = config[\"outdir\"][0].decode(\"ascii\")\n\n year = int(config[\"year\"][0])\n month = int(config[\"month\"][0])\n\n simulation_earliest_obs = datetime.datetime(year, month, 1)\n # technically the start of the first observation of the next month\n simulation_latest_obs = (simulation_earliest_obs +\n dateutil.relativedelta.relativedelta(months=+1))\n simulation_zero = (simulation_earliest_obs -\n datetime.timedelta(days=lag))\n # obs_time_bounds = dateutil.rrule.rrule(\n # dateutil.rrule.HOURLY, dtstart=simulation_earliest_obs,\n # interval=OBS_WINDOW, until=simulation_latest_obs,\n # cache=True)\n n_obs_bins = ((simulation_latest_obs - simulation_earliest_obs) //\n datetime.timedelta(hours=OBS_WINDOW))\n\n print(\"Simulation zero: \", simulation_zero)\n print(\"Earliest release time:\", simulation_earliest_obs)\n print(\"Last release time: \", simulation_latest_obs)\n\n def obs_var_to_index(sec_since_start):\n \"\"\"Get the index for the bin.\n\n Parameters\n ----------\n bin: int\n\n Returns\n -------\n int\n The index in the NetCDF file created\n 0 is the beginning of the simulation,\n at the end of the time window.\n \"\"\"\n sec_since_first_obs = (sec_since_start -\n (simulation_earliest_obs -\n simulation_zero).total_seconds())\n bin_num = int(sec_since_first_obs // (SECONDS_PER_HOUR * OBS_WINDOW))\n\n # alternate: netCDF4.numtodate(sec_since_start, lpdm_obs_time_unit)\n # - simulation_unit\n # // datetime.timedelta(hours=OBS_WINDOW)\n\n # use time at the end of the window, not the start\n return n_obs_bins - bin_num\n\n print(\"Bin index for last release: \",\n obs_var_to_index((simulation_latest_obs -\n simulation_zero).total_seconds()))\n print(\"Bin index for first release:\",\n obs_var_to_index((simulation_earliest_obs -\n simulation_zero).total_seconds()))\n\n # int is more precise than float for this range (up to 4 billion)\n # as we can only have a million particles at a time\n # (for now, run_lprm maxnp)\n # this should also be faster\n # final = np.zeros((total_sites, length, dimy, dimx),\n # dtype=np.int32)\n\n # list of cubes with influence function\n # final_list = collections.deque((), config[\"lpdm_terase\"]//3600)\n # list of release times corresponding to those cubes\n # release_times = collections.deque((), config[\"lpdm_terase\"]//3600)\n # file_name_list = collections.deque((), config[\"lpdm_terase\"]//3600)\n\n wrf_out = read_wrf_grid(config[\"wrf_file\"][0])\n\n # LPDM works in minutes for the most part\n time_unit = \"minutes since {start:{date_fmt:s}}\".format(\n start=simulation_zero, date_fmt=UDUNITS_DATE)\n\n print(\"About to create file\")\n ds = netCDF4.Dataset(\n os.path.join(\n out_dir,\n \"LPDM_{year:04d}_{month:02d}_{flux_window:02d}\"\n \"hrly_{dx:03d}km_molar_footprints.nc4\".format(\n year=year, month=month, flux_window=FLUX_WINDOW,\n dx=int(dx))),\n \"w\", format=\"NETCDF4\")\n set_global_attributes(ds)\n\n ds.time_coverage_start = simulation_earliest_obs.strftime(ACDD_DATE)\n ds.time_coverage_end = simulation_latest_obs.strftime(ACDD_DATE)\n ds.time_coverage_duration = \"P0000-01-00T00:00:00\"\n ds.time_coverage_resolution = \"P0000-00-00T{obs_window:02d}:00:00\".format(\n obs_window=OBS_WINDOW)\n\n infl_fun_var = set_up_file(\n ds, total_sites, footprint_nbins,\n dimy, dimx, wrf_out, time_unit, site_names)\n\n ds.variables[\"site_lats\"][:] = site_lat\n ds.variables[\"site_lons\"][:] = site_lon\n ds.variables[\"site_heights\"][:] = site_alt\n print(\"Created file\")\n # ds.variables[\"site_names\"][:] = np.char.ljust(\n # site_names, int(site_names.dtype.str[2:]), \" \")\n\n # loop over input files\n # loop goes backward in time from first file output to last\n for step, current_time in zip(\n range(int(HOURS_PER_DAY * days_tot), 0, -FLUX_WINDOW),\n reversed(tuple(dateutil.rrule.rrule(\n dateutil.rrule.HOURLY,\n simulation_zero,\n FLUX_WINDOW,\n until=simulation_latest_obs)))):\n print(\"Day: \", step // HOURS_PER_DAY - 1, step/HOURS_PER_DAY,\n \"\\tHour: \", step % HOURS_PER_DAY)\n\n # which file to open first (r_{first_file:d}m.dat)\n first_file = step * MINUTES_PER_HOUR\n\n # end of the period for flux integration\n # earliest file to open? (minutes)\n # now unused.\n # end_flights = first_file - length * MINUTES_PER_HOUR\n\n print(\"Current output time:\", current_time)\n\n # set up the cube to receive the data\n # current_time = simulation_zero + datetime.timedelta(minutes=flights)\n\n # LPDM output codes release time in seconds\n # in a given file, we will have observations from the current time\n # forward for lag days (fluxes influence future obs)\n\n # last release we care about\n # oldest particles in the first file this iteration\n last_obs = next_larger_multiple(\n (min(current_time + datetime.timedelta(hours=float(length)),\n simulation_latest_obs) -\n simulation_zero).total_seconds(),\n OBS_WINDOW * SECONDS_PER_HOUR)\n print(\"Last release in this iteration:\",\n simulation_zero + datetime.timedelta(seconds=last_obs))\n # first release we care about\n # newest particles in the last file this iteration\n first_obs = next_smaller_multiple(\n (max(current_time - datetime.timedelta(hours=FLUX_WINDOW),\n simulation_earliest_obs) -\n simulation_zero).total_seconds(),\n OBS_WINDOW * SECONDS_PER_HOUR)\n print(\"First release in this iteration:\",\n simulation_zero + datetime.timedelta(seconds=first_obs))\n print(\"Last release should be no later than:\",\n simulation_zero + datetime.timedelta(seconds=first_obs) +\n datetime.timedelta(hours=FLUX_WINDOW))\n\n n_obs_bins_here = (last_obs - first_obs) // SECONDS_PER_HOUR // OBS_WINDOW\n\n # new_cube = create_vars...\n # final_list.append(new_cube)\n # file_name_list.append(\n # \"INFUN_{date:M%m_D%d_H%H}.nc4\".format(date=current_time))\n # release_times.append(current_time)\n\n # go through the files for the hour\n # increase the dtype if LPDM maxnp * n_files_per_hour\n # goes above about 3 billion\n # length should be footprint_nbins\n flux_window_data = np.zeros((dimx, dimy, total_sites, n_obs_bins_here),\n dtype=np.int16)\n file_per_hour = int(config[\"num_file_per_h\"])\n minutes_per_file = MINUTES_PER_HOUR // file_per_hour\n\n flux_time_var = ds.variables[\"flux_time\"]\n flux_time_bounds_var = ds.variables[\"flux_time_bnds\"]\n\n obs_time_var = ds.variables[\"observation_time\"]\n obs_time_bounds_var = ds.variables[\"observation_time_bnds\"]\n\n print(\"Reading LPD output\")\n\n # loop over flux files in this window\n for i in range(FLUX_WINDOW):\n for minute in range(MINUTES_PER_HOUR, 0, -minutes_per_file):\n # get the data from the file\n # Probably in C\n data = np.genfromtxt(\n os.path.join(\n config[\"indir\"][0].decode(\"ascii\"),\n \"r_{fli:d}m.dat\".format(\n fli=(first_file - i * MINUTES_PER_HOUR - minute))),\n # number of lines determined from file\n skip_header=1,\n # particle id not needed\n usecols=(1, 2, 3, 4, 5),\n )\n\n # given as x, y, z, site, obs_time?\n # obs_time in seconds, apparently\n mins = ( 0, 0, 0,\n 1, first_obs)\n maxs = (float(dimx*dx), float(dimy*dx), CLOSE_TO_GROUND,\n total_sites, last_obs)\n\n # probably in C\n binned_data, bin_desc = np.histogramdd(\n data, bins=(dimx, dimy, 1, total_sites, n_obs_bins_here),\n # also kind of cheating\n range=np.column_stack((mins, maxs))\n )\n del data\n\n # drop the z dimension from the counts\n # flux_window_data += np.asanyarray(binned_data[:,:,0,:,:],\n # dtype=np.int32)\n # binned_data is a float array,\n # so need unsafe casting to bring back integer counts\n # C\n np.add(flux_window_data, binned_data[:,:,0,:,:],\n out=flux_window_data, casting=\"unsafe\")\n del binned_data, bin_desc\n\n print(\"Read LPD output; writing data\")\n\n # find the indicies where the data should go\n # no data for particles released before first_obs yet\n # problem does not seem to be here, given range semantics\n obs_start = obs_var_to_index(first_obs)\n obs_end = obs_var_to_index(last_obs)\n # print(obs_end, obs_start)\n\n # simplify the logic and write all times\n # it's a 1-D coord with bounds\n all_dates = tuple(dateutil.rrule.rrule(\n dateutil.rrule.HOURLY,\n simulation_zero + datetime.timedelta(seconds=first_obs),\n OBS_WINDOW,\n until=simulation_zero + datetime.timedelta(seconds=last_obs)))\n # print(all_dates[0], all_dates[-1])\n # print(simulation_zero + datetime.timedelta(seconds=first_obs))\n # print(simulation_zero + datetime.timedelta(seconds=last_obs))\n\n # observation_time is monotone decreasing by design\n # so the index for the chronologically last time will be\n # lower than that of the chronologically earlier time\n for obs_ind, obs_time_val in zip(\n range(obs_end, obs_start),\n reversed(all_dates)):\n # print(\"time is\", obs_time_val, \"mapping to index\",\n # obs_var_to_index((obs_time_val -\n # simulation_zero).total_seconds()),\n # \"\\nIndex being used:\", obs_ind)\n obs_time_var[obs_ind] = netCDF4.date2num(\n obs_time_val,\n time_unit, CALENDAR)\n obs_time_bounds_var[obs_ind, :] = netCDF4.date2num(\n (obs_time_val,\n obs_time_val - datetime.timedelta(hours=OBS_WINDOW)),\n time_unit, CALENDAR)\n print(\"Wrote obs times\")\n\n # get loop invariants\n curr_flux_time = netCDF4.date2num(current_time, time_unit, CALENDAR)\n curr_flux_bounds = netCDF4.date2num((\n current_time,\n current_time - datetime.timedelta(hours=FLUX_WINDOW)),\n time_unit, CALENDAR)\n print(curr_flux_time, time_unit, \"corresponds to\", current_time)\n\n # first obs is at simulation_earliest_obs\n # we are looking at times from current_time\n # to current_time - FLUX_WINDOW\n # first obs is in this window if obs_start == n_obs_bins\n # if obs_start - obs_end is less than footprint_nbins,\n # need to start writing at time_back=difference\n write_offset = footprint_nbins - ((obs_start - obs_end) *\n OBS_WINDOW // FLUX_WINDOW)\n if obs_end == 0:\n # the other time this can occur (the beginning)\n write_offset = 0\n print(\"Writing data with an offset of\", write_offset)\n\n # now add the data to the file\n # reversing a range is rather annoying.\n for obs_bin_num, obs_ind in enumerate(\n reversed(range(obs_end, obs_start))):\n # np.transpose reverses all dimensions if no spec given\n data_part = np.transpose(flux_window_data[:,:,:,obs_bin_num])\n\n # final_list[travel_time][:,-travel_time,:,:] = (\n # CONVERSION_FACTOR * data_part)\n # dataset = netCDF4.Dataset(file_name_list[travel_time], \"a\")\n # infl_fun = dataset.variables[\"H\"]\n\n print(infl_fun_var.shape, obs_start, obs_ind, obs_end,\n data_part.shape)\n\n # This should support OBS_WINDOW != FLUX_WINDOW, in as\n # much generality as necessary.\n print(obs_start - obs_end, n_obs_bins, obs_bin_num)\n\n back_bin_num = obs_bin_num * OBS_WINDOW // FLUX_WINDOW\n infl_fun_var[obs_ind, :, back_bin_num+write_offset, :, :] = data_part\n flux_time_var[obs_ind, back_bin_num+write_offset] = curr_flux_time\n flux_time_bounds_var[\n obs_ind, back_bin_num+write_offset, :] = curr_flux_bounds\n del data_part\n ds.sync()\n del flux_window_data\n del curr_flux_time, curr_flux_bounds\n print(\"Data written\")\n\n # if len(final_list) == final_list.maxlen:\n # # no more data to be added to the cube\n # # time to save it and free the memory\n # finished_cube = final_list.popleft()\n # release_time = release_times.popleft()", "def invert_simple(forward, meas, geom):\n\n surface = forward.surface\n RT = forward.RT\n instrument = forward.instrument\n\n vswir_present = False\n if any(forward.surface.wl < 2600):\n vswir_present = True \n\n tir_present = False\n if any(forward.surface.wl > 2600):\n tir_present = True \n\n # First step is to get the atmosphere. We start from the initial state\n # and estimate atmospheric terms using traditional heuristics.\n x = forward.init.copy()\n x_surface, x_RT, x_instrument = forward.unpack(x)\n\n if vswir_present:\n x[forward.idx_RT] = heuristic_atmosphere(RT, instrument, \n x_RT, x_instrument, meas, geom)\n\n # Now, with atmosphere fixed, we can invert the radiance algebraically\n # via Lambertian approximations to get reflectance\n x_surface, x_RT, x_instrument = forward.unpack(x)\n rfl_est, Ls_est, coeffs = invert_algebraic(surface, RT,\n instrument, x_surface, x_RT,\n x_instrument, meas, geom)\n\n # Condition thermal part on the VSWIR portion. Only works for\n # Multicomponent surfaces. Finds the cluster nearest the VSWIR heuristic\n # inversion and uses it for the TIR suface initialization.\n if tir_present:\n tir_idx = np.where(forward.surface.wl > 3000)[0]\n\n if vswir_present:\n x_surface_temp = x_surface.copy()\n x_surface_temp[:len(rfl_est)] = rfl_est\n mu = forward.surface.xa(x_surface_temp, geom)\n rfl_est[tir_idx] = mu[tir_idx]\n else:\n rfl_est = 0.03 * np.ones(len(forward.surface.wl))\n\n # Now we have an estimated reflectance. Fit the surface parameters.\n x_surface[forward.idx_surface] = forward.surface.fit_params(rfl_est, geom)\n\n # Find temperature of emissive surfaces\n if tir_present:\n\n # Estimate the total radiance at sensor, leaving out surface emission\n # Radiate transfer calculations could take place at high spectral resolution\n # so we upsample the surface reflectance\n rfl_hi = forward.upsample(forward.surface.wl, rfl_est)\n rhoatm, sphalb, transm, solar_irr, coszen, transup = coeffs\n\n L_atm = RT.get_L_atm(x_RT, geom)\n L_down_transmitted = RT.get_L_down_transmitted(x_RT, geom)\n L_total_without_surface_emission = \\\n L_atm + L_down_transmitted * rfl_hi / (1. - sphalb * rfl_hi)\n\n # These tend to have high transmission factors; the emissivity of most\n # materials is nearly 1 for these bands, so they are good for\n # initializing the surface temperature.\n clearest_wavelengths = [10125., 10390.00, 10690.00]\n\n # This is fragile if other instruments have different wavelength\n # spacing or range\n clearest_indices = [np.argmin(np.absolute(RT.wl - w))\n for w in clearest_wavelengths]\n\n # Error function for nonlinear temperature fit\n def err(z):\n T = z\n emissivity = forward.surface.emissivity_for_surface_T_init\n Ls_est, d = emissive_radiance(emissivity, T,\n forward.surface.wl[clearest_indices])\n resid = transup[clearest_indices] * Ls_est + \\\n L_total_without_surface_emission[clearest_indices] - \\\n meas[clearest_indices]\n return sum(resid**2)\n\n # Fit temperature, set bounds, and set the initial values\n idx_T = forward.surface.surf_temp_ind\n Tinit = np.array([forward.surface.init[idx_T]])\n Tbest = minimize(err, Tinit).x\n T = max(forward.surface.bounds[idx_T][0]+eps,\n min(Tbest, forward.surface.bounds[idx_T][1]-eps))\n x_surface[idx_T] = Tbest\n forward.surface.init[idx_T] = T\n\n # Update the full state vector\n x[forward.idx_surface] = x_surface\n\n # We record these initial values in the geometry object - the only\n # \"stateful\" part of the retrieval\n geom.x_surf_init = x[forward.idx_surface]\n geom.x_RT_init = x[forward.idx_RT]\n\n return x", "def InitialCondition():\n maxX = getX(C.N + 1,C.N+1,C.alpha_max)\n y0 = np.zeros(maxX,dtype=complex)\n for i in range(0, C.N+2):\n for j in range(0, C.N+2):\n for alpha in [1]:\n\n X = getX(i, j, alpha)\n\n y0[X] = 1./2./C.N * (1-delta(i, C.N+1))*(1-delta(j, C.N+1))+1./2*delta(i, C.N+1)*delta(j, C.N+1) +\\\n 1./2./(C.N)**0.5 * ((1-delta(i, C.N+1)) *\n delta(j, C.N+1)+(1-delta(j, C.N+1))*delta(i, C.N+1))", "def prepare_fg(\n self, times, wavelength, spectra, stellar, intensities, telluric, area=None\n ):\n\n if area is None:\n orb = Orbit(self.star, self.planet)\n area = orb.stellar_surface_covered_by_planet(times)\n\n model = stellar * telluric\n\n # Normalize the profile of the observations\n profile = np.nanmean(spectra, axis=1)\n model_profile = np.nanmean(model, axis=1)\n norm = profile / model_profile\n\n # Normalize the spectrum\n # model = stellar * telluric * norm[:, None]\n # profile = np.median(spectra, axis=0)\n # model_profile = np.median(model, axis=0)\n\n # nm = np.nanmedian(profile / model_profile)\n # norm *= nm\n\n # model = stellar * telluric * norm[:, None]\n # diff = spectra - model\n\n # model = np.nanmedian(spectra, axis=0)\n\n # f = -(\n # # np.nan_to_num(intensities) *\n # self.area_atmosphere\n # / self.area_planet\n # * area[:, None]\n # # * np.nan_to_num(telluric, nan=1)\n # * norm[:, None]\n # )\n # f = np.nan_to_num(intensities) * np.nan_to_num(telluric, nan=1) * norm[:, None]\n area *= self.area_atmosphere / self.area_planet\n f = -np.nan_to_num(intensities, nan=1) * area[:, None]\n if hasattr(f, \"to_value\"):\n f = f.to_value(1)\n\n # g = spectra - stellar * telluric * norm[:, None]\n # if self.n_sysrem is not None:\n # g = sysrem(g, self.n_sysrem)\n\n g = spectra\n if self.n_sysrem is not None:\n # Use SVD directly instead of Sysrem\n g = sysrem(spectra, self.n_sysrem)\n # u, s, vh = np.linalg.svd(spectra, full_matrices=False)\n # s[: self.n_sysrem] = 0\n # s[80:] = 0\n # ic = (u * s) @ vh\n # g = ic\n else:\n # g = spectra - stellar * telluric * norm[:, None]\n gen = np.random.default_rng()\n tmp = sysrem(spectra, 5)\n g = gen.normal(\n loc=np.nanmean(tmp), scale=np.nanstd(tmp), size=spectra.shape\n )\n # g *= np.nanstd() # std of random is 1 (in theory)\n\n # norm = np.nanstd(g, axis=0)\n # f /= norm\n # g /= norm\n\n # plt.imshow(g, aspect=\"auto\", origin=\"lower\")\n # plt.xlabel(\"Wavelength\")\n # plt.ylabel(\"Time\")\n # plt.title(f\"N_Sysrem: {self.n_sysrem}\")\n # plt.savefig(f\"spectra_sysrem_{self.n_sysrem}.png\")\n\n return wavelength, f, g", "def simulated_reflectivity(pressure, temperature, vapor_mixing_ratio, liquid_mixing_ratio, snow_mixing_ratio=None,\n graupel_mixing_ratio=None, use_varint=False, use_liqskin=False):\n # Set values for constants with variable intercept\n R1 = 1e-15\n RON = 8e6\n RON2 = 1e10\n SON = 2e7\n GON = 5e7\n RON_MIN = 8e6\n RON_QR0 = 0.00010\n RON_DELQR0 = 0.25*RON_QR0\n RON_CONST1R = (RON2-RON_MIN)*0.5\n RON_CONST2R = (RON2+RON_MIN)*0.5\n\n # set constant intercepts\n rno_l = 8e6\n rno_s = 2e7\n rno_g = 4e6\n\n qvapor = da.clip(vapor_mixing_ratio, 0., None)\n qliquid = da.clip(liquid_mixing_ratio, 0., None)\n\n # If qgraupel but not qsnow, set qgraupel = qsnow\n if snow_mixing_ratio is None:\n if graupel_mixing_ratio is None:\n qsnow = da.zeros_like(qliquid)\n qgraupel = da.zeros_like(qliquid)\n else:\n qgraupel = da.clip(graupel_mixing_ratio, 0., None)\n qsnow = da.zeros_like(graupel_mixing_ratio)\n qsnow[temperature <= 273.15] = qgraupel[temperature <= 273.15]\n else:\n qsnow = da.clip(snow_mixing_ratio, 0., None)\n qgraupel = da.clip(graupel_mixing_ratio, 0., None)\n\n # density for liquid, snow, and graupel (kg m-3)\n rho_l = 1000. # liquid\n rho_i = 100. # snow\n rho_g = 400. # graupel\n\n # constant evaluation of gamma distribution\n gamma = 720.\n\n # Alpha constant\n alpha = 0.224\n\n # constant multiplication factors\n factor_l = gamma * 1e18 * (1./(np.pi*rho_l))**1.75\n s = gamma * 1e18 * (1./(np.pi*rho_i))**1.75 * (rho_i/rho_l)**2 * alpha\n g = gamma * 1e18 * (1./(np.pi*rho_g))**1.75 * (rho_g/rho_l)**2 * alpha\n\n # calculate virtual temperature\n virtual_t = virtual_temperature(temperature, qvapor)\n\n # dry gas constant\n Rd = 287.\n rho_air = pressure/(Rd*virtual_t)\n\n # adjust for brightband if use_liqskin=True\n if use_liqskin:\n raise NotImplementedError('Liquid skin correction not implemented')\n # factor_s = da.full_like(temperature, s)\n # factor_g = da.full_like(temperature, g)\n # try:\n # factor_s[temperature >= 273.15] = factor_s[temperature >= 273.15] / da.array([alpha])\n # factor_g[temperature >= 273.15] = factor_g[temperature >= 273.15] / da.array([alpha])\n # except ValueError:\n # factor_s = s\n # factor_g = g\n else:\n factor_s = s\n factor_g = g\n\n # calculate variable intercept if use_varint=True\n if use_varint:\n raise NotImplementedError('Variable intercepts not yet implemented')\n # temp_c = da.clip(temperature-273.15, temperature.min(), -0.001)\n # sonv = MIN(2.0D8, 2.0D6*EXP(-0.12D0*temp_c))\n #\n # gonv = gon\n # IF (qgr(i,j,k) .GT. R1) THEN\n # gonv = 2.38D0 * (PI*RHO_G/(rhoair*qgr(i,j,k)))**0.92D0\n # gonv = MAX(1.D4, MIN(gonv,GON))\n # END IF\n #\n # ronv = RON2\n # IF (qra(i,j,k) .GT. R1) THEN\n # ronv = RON_CONST1R*TANH((RON_QR0 - qra(i,j,k))/RON_DELQR0) + RON_CONST2R\n # END IF\n else:\n ronv = rno_l\n sonv = rno_s\n gonv = rno_g\n\n # Total equivalent reflectivity factor (z_e, in mm^6 m^-3) is\n # the sum of z_e for each hydrometeor species:\n z_e = (((factor_l*(rho_air*qliquid)**1.75)/(ronv**.75)) +\n ((factor_s*(rho_air*qsnow)**1.75)/(sonv**.75)) +\n ((factor_g*(rho_air*qgraupel)**1.75)/(gonv**.75)))\n\n # Adjust small values of Z_e so that dBZ is no lower than -30\n z_e = da.clip(z_e, .001, None)\n\n # Convert to dBZ\n dbz = 10.*da.log10(z_e)\n return dbz", "def test_double_ended_wls_fix_alpha_fix_gamma_estimate_synthetic():\n from dtscalibration import DataStore\n\n cable_len = 100.0\n nt = 500\n time = np.arange(nt)\n x = np.linspace(0.0, cable_len, 100)\n ts_cold = np.ones(nt) * 4.0\n ts_warm = np.ones(nt) * 20.0\n\n C_p = 15246\n C_m = 2400.0\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask = x < 0.5 * cable_len\n warm_mask = np.invert(cold_mask) # == False\n temp_real = np.ones((len(x), nt))\n temp_real[cold_mask] *= ts_cold + 273.15\n temp_real[warm_mask] *= ts_warm + 273.15\n\n st = (\n C_p\n * np.exp(-(dalpha_r + dalpha_p) * x[:, None])\n * np.exp(gamma / temp_real)\n / (np.exp(gamma / temp_real) - 1)\n )\n ast = (\n C_m\n * np.exp(-(dalpha_r + dalpha_m) * x[:, None])\n / (np.exp(gamma / temp_real) - 1)\n )\n rst = (\n C_p\n * np.exp(-(dalpha_r + dalpha_p) * (-x[:, None] + cable_len))\n * np.exp(gamma / temp_real)\n / (np.exp(gamma / temp_real) - 1)\n )\n rast = (\n C_m\n * np.exp(-(dalpha_r + dalpha_m) * (-x[:, None] + cable_len))\n / (np.exp(gamma / temp_real) - 1)\n )\n\n alpha = np.mean(np.log(rst / rast) - np.log(st / ast), axis=1) / 2\n alpha -= alpha[0] # the first x-index is where to start counting\n\n ds = DataStore(\n {\n \"st\": ([\"x\", \"time\"], st),\n \"ast\": ([\"x\", \"time\"], ast),\n \"rst\": ([\"x\", \"time\"], rst),\n \"rast\": ([\"x\", \"time\"], rast),\n \"userAcquisitionTimeFW\": ([\"time\"], np.ones(nt)),\n \"userAcquisitionTimeBW\": ([\"time\"], np.ones(nt)),\n \"cold\": ([\"time\"], ts_cold),\n \"warm\": ([\"time\"], ts_warm),\n },\n coords={\"x\": x, \"time\": time},\n attrs={\"isDoubleEnded\": \"1\"},\n )\n\n sections = {\n \"cold\": [slice(0.0, 0.5 * cable_len)],\n \"warm\": [slice(0.5 * cable_len, cable_len)],\n }\n\n # WLS\n ds.calibration_double_ended(\n sections=sections,\n st_var=1e-7,\n ast_var=1e-7,\n rst_var=1e-7,\n rast_var=1e-7,\n method=\"wls\",\n solver=\"sparse\",\n fix_gamma=(gamma, 0.0),\n fix_alpha=(alpha, np.zeros_like(alpha)),\n )\n\n assert_almost_equal_verbose(ds.gamma.values, gamma, decimal=18)\n assert_almost_equal_verbose(ds.alpha.values, alpha, decimal=18)\n assert_almost_equal_verbose(ds.tmpf.values, temp_real - 273.15, decimal=11)\n assert_almost_equal_verbose(ds.tmpb.values, temp_real - 273.15, decimal=11)\n assert_almost_equal_verbose(ds.tmpw.values, temp_real - 273.15, decimal=11)\n\n pass", "def contrast_curve_core(\n star_data,\n plate_scale,\n fwhm=1,\n radius_size=None,\n center=None,\n):\n\n # make copy of data array\n data = star_data.copy()\n\n# data = np.abs(data) #DO NOT DO THIS!!!! It's making the standard deviation too small later.\n\n ################## establish center ########\n\n x, y = np.indices((data.shape))\n\n if type(center) == type(None):\n center = np.array(\n [(x.max() - x.min()) / 2.0, (y.max() - y.min()) / 2.0]\n )\n\n if type(radius_size) == type(None):\n radius_size = fwhm\n\n ########## set up radial coordinate system ########\n\n radii = np.sqrt((x - center[0]) ** 2 + (y - center[1]) ** 2)\n radii = radii.astype(np.int64)\n\n ones = np.ones_like(data)\n\n number_of_a = int(radii.max() / radius_size)\n\n pie_edges = np.arange(0, 390, 30)\n\n ######## set up aperture array ##########\n center_ap = CircularAperture([center[0], center[1]], radius_size)\n\n all_apers, all_apers_areas, all_masks = (\n [center_ap],\n [center_ap.area],\n [center_ap.to_mask(method=\"exact\")],\n )\n\n all_data, all_weights = [all_masks[0].multiply(data)], [\n all_masks[0].multiply(ones)\n ]\n\n all_stds = [twoD_weighted_std(all_data[0], all_weights[0])]\n\n ######## construct the apertures of the annuli #######\n sigma_clip = SigmaClip(sigma=3.0)\n bkgrms = StdBackgroundRMS(sigma_clip)\n\n medians = np.zeros((number_of_a, len(pie_edges) - 1))\n stds = np.zeros((number_of_a, len(pie_edges) - 1))\n seps = np.zeros(number_of_a)\n for j in range(int(number_of_a)):\n r_in = j * radius_size + fwhm\n r_out = j * radius_size + radius_size + fwhm\n seps[j] = (r_in+r_out)/2.*plate_scale\n\n # terminate if completely outside 10 arcseconds\n if (r_in * plate_scale) > 10:\n break\n\n # create aperture\n aper = CircularAnnulus(\n [center[0], center[1]],\n r_in=r_in,\n r_out=r_out,\n )\n\n # multiply the data by the aperture mask and store it\n all_apers.append(aper)\n all_apers_areas.append(aper.area)\n mask = aper.to_mask(method=\"exact\")\n all_masks.append(mask)\n mask_data = mask.multiply(data)\n\n mask_weight = mask.multiply(ones)\n\n for i, pie_edge_near in enumerate(pie_edges[:-1]):\n pie_edge_far = pie_edges[i + 1]\n mask_data_new = mask_data.copy()\n mask_data_new = check_boundaries(\n mask_data_new, pie_edge_near, pie_edge_far\n )\n medians[j, i] = np.nanmedian(mask_data_new)\n mask_data_masked = mask_data_new[~np.isnan(mask_data_new)]\n\n mean, std = meanclip(mask_data_masked, 3, converge_num=0.2)\n stds[j, i] = std\n\n #Return only the medians and stds for distances within the desired range\n seps = seps[0:j]\n medians = medians[0:j,:]\n stds = stds[0:j,:]\n return seps, medians, stds", "def drfl_dsurfaceb(self, x_surface, geom):\n\n return np.zeros((self.n_wl, 1))", "def distribute(self, date_time, air_temp, vapor_pressure=None,\n dew_point=None, cloud_factor=None):\n\n self._logger.debug('%s Distributing thermal' % date_time)\n\n # calculate clear sky thermal\n if self.clear_sky_method == 'marks1979':\n cth = np.zeros_like(air_temp, dtype=np.float64)\n envphys_c.ctopotherm(\n air_temp, dew_point,\n self.dem,\n self.sky_view_factor,\n cth,\n self.config['marks1979_nthreads'])\n\n elif self.clear_sky_method == 'dilley1998':\n cth = clear_sky.Dilly1998(air_temp, vapor_pressure/1000)\n\n elif self.clear_sky_method == 'prata1996':\n cth = clear_sky.Prata1996(air_temp, vapor_pressure/1000)\n\n elif self.clear_sky_method == 'angstrom1918':\n cth = clear_sky.Angstrom1918(air_temp, vapor_pressure/1000)\n\n # terrain factor correction\n if (self.sky_view_factor is not None) and \\\n (self.clear_sky_method != 'marks1979'):\n # apply (emiss * skvfac) + (1.0 - skvfac) to the longwave\n cth = cth * self.sky_view_factor + (1.0 - self.sky_view_factor) * \\\n STEF_BOLTZ * air_temp**4\n\n # make output variable\n self.thermal_clear = cth.copy()\n\n # correct for the cloud factor\n # ratio of measured/modeled solar indicates the thermal correction\n if self.correct_cloud:\n if self.cloud_method == 'garen2005':\n cth = cloud.Garen2005(cth,\n cloud_factor)\n\n elif self.cloud_method == 'unsworth1975':\n cth = cloud.Unsworth1975(cth,\n air_temp,\n cloud_factor)\n\n elif self.cloud_method == 'kimball1982':\n cth = cloud.Kimball1982(cth,\n air_temp,\n vapor_pressure/1000,\n cloud_factor)\n\n elif self.cloud_method == 'crawford1999':\n cth = cloud.Crawford1999(cth,\n air_temp,\n cloud_factor)\n\n # make output variable\n self.thermal_cloud = cth.copy()\n\n # correct for vegetation\n if self.correct_veg:\n cth = vegetation.thermal_correct_canopy(cth,\n air_temp,\n self.veg_tau,\n self.veg_height)\n\n # make output variable\n self.thermal_veg = cth.copy()\n\n self.thermal = utils.set_min_max(cth, self.min, self.max)", "def main():\n strikes, dips, normals, slip = generate_normal_ss_data(330, 60, n=500, porp=1)\n #strikes, dips, normals, slip = generate_normal_data(330, 60, n=500, porp=10)\n sigma = invert_plane_stress(normals, slip)\n plot(sigma, strikes, dips)\n plt.show()", "def solid_surface_density(M, a, delta_a):\n sigma_solid = (M*gen.Mearth*1e3)/(2.*np.pi*(a*gen.AU)*(delta_a*gen.AU))\n return sigma_solid", "def calc_saturation_curves(self):\n HEOS = CP.AbstractState(self.additional_backend, self.fluid)\n PCSAFT = CP.AbstractState(self.backend, self.fluid)\n self.dictL, self.dictV = {}, {}\n for Q, dic in zip([0, 1], [self.dictL, self.dictV]):\n # rhomolar, smolar, hmolar, T, p, umolar = [], [], [], [], [], []\n rhomolar, T, p = [], [], []\n for _T in np.logspace(np.log10(HEOS.keyed_output(CP.iT_triple)), np.log10(HEOS.keyed_output(CP.iT_critical)), 500):\n try:\n PCSAFT.update(CP.QT_INPUTS, Q, _T)\n # print('T', PCSAFT.T())\n # print('p', PCSAFT.p())\n # print('rhomolar', PCSAFT.rhomolar())\n if (PCSAFT.p() < 0): raise ValueError('P is negative:' + str(PCSAFT.p()))\n PCSAFT.T(), PCSAFT.p(), PCSAFT.rhomolar()\n # PCSAFT.hmolar(), PCSAFT.smolar(), PCSAFT.umolar()\n\n T.append(PCSAFT.T())\n p.append(PCSAFT.p())\n rhomolar.append(PCSAFT.rhomolar())\n # hmolar.append(PCSAFT.hmolar())\n # smolar.append(PCSAFT.smolar())\n # umolar.append(PCSAFT.umolar())\n except ValueError as VE:\n myprint(1, 'satT error:', VE, '; T:', '{T:0.16g}'.format(T=_T), 'T/Tc:', _T / HEOS.keyed_output(CP.iT_critical))\n\n dic.update(dict(T=np.array(T),\n P=np.array(p),\n Dmolar=np.array(rhomolar)))\n # Hmolar=np.array(hmolar),\n # Smolar=np.array(smolar)))\n # Umolar=np.array(umolar)))", "def P_AI_Rocky(in_dict):\n # START\n fs = 16\n plt.rc('font', size=fs)\n fig = plt.figure(figsize=(14,12))\n ds = nc.Dataset(in_dict['fn'])\n\n # PLOT CODE\n aa = [-122.8, -122.54, 47.92, 48.22]\n import cmocean\n cmap = cmocean.cm.balance\n # cmap = 'RdYlBu_r'\n\n from warnings import filterwarnings\n filterwarnings('ignore') # skip some warning messages\n \n # plot Code\n \n # calculate divergence and vorticity\n uu = ds['u'][0, -1, :, :]\n vv = ds['v'][0, -1, :, :]\n u = zfun.fillit(uu)\n v = zfun.fillit(vv)\n u[np.isnan(u)] = 0\n v[np.isnan(v)] = 0\n \n G = zrfun.get_basic_info(in_dict['fn'], only_G=True)\n \n dive = ((np.diff(u, axis=1)/G['DX'][:, 1:-1])[1:-1, :]\n + (np.diff(v, axis = 0)/G['DY'][1:-1, :])[:, 1:-1])\n #dive[G['mask_rho'][1:-1,1:-1]==False] = np.nan\n \n vort = np.diff(v, axis=1)/G['DX'][1:,1:] - np.diff(u, axis=0)/G['DY'][1:,1:]\n #vort[G['mask_rho'][1:,1:]==False] = np.nan\n \n scl = 2e-3\n \n # panel 1\n ax = fig.add_subplot(121)\n # cs = plt.pcolormesh(G['lon_psi'], G['lat_psi'], dive/scl, cmap=cmap,\n # vmin=-1, vmax=1)\n cs = plt.pcolormesh(G['lon_rho'][1:-1,1:-1], G['lat_rho'][1:-1,1:-1], dive/scl, cmap=cmap,\n vmin=-1, vmax=1, shading='gouraud')\n tstr = (r'Surface Divergence (%0.1e $s^{-1}$)' % (scl))\n #pfun.add_bathy_contours(ax, ds, txt=True)\n pfun.add_coast(ax)\n ax.axis(aa)\n pfun.dar(ax)\n ax.set_xlabel('Longitude')\n ax.set_ylabel('Latitude')\n ax.set_title(tstr)\n pfun.add_info(ax, in_dict['fn'])\n ax.set_xticks([-122.8, -122.7, -122.6])\n ax.set_yticks([48, 48.1, 48.2])\n #\n # panel 2\n ax = fig.add_subplot(122)\n # cs = plt.pcolormesh(G['lon_rho'], G['lat_rho'], vort/scl, cmap=cmap,\n # vmin=-1, vmax=1)\n cs = plt.pcolormesh(G['lon_psi'], G['lat_psi'], vort/scl, cmap=cmap,\n vmin=-1, vmax=1, shading='gouraud')\n tstr = (r'Surface Vorticity (%0.1e $s^{-1}$)' % (scl))\n ax.set_xticks([-122.8, -122.7, -122.6])\n ax.set_yticks([])\n #fig.colorbar(cs)\n \n # Inset colorbar\n from mpl_toolkits.axes_grid1.inset_locator import inset_axes\n cbaxes = inset_axes(ax, width=\"4%\", height=\"40%\", loc='lower left')\n fig.colorbar(cs, cax=cbaxes, orientation='vertical')\n \n #pfun.add_bathy_contours(ax, ds)\n pfun.add_coast(ax)\n ax.axis(aa)\n pfun.dar(ax)\n ax.set_xlabel('Longitude')\n ax.set_title(tstr) \n \n #fig.tight_layout()\n # FINISH\n ds.close()\n if len(in_dict['fn_out']) > 0:\n plt.savefig(in_dict['fn_out'])\n plt.close()\n else:\n plt.show()\n plt.rcdefaults()", "def govardovskii2000_template(\n wavelengths: np.ndarray,\n alpha_max: Union[float, np.ndarray],\n A_alpha: Union[float, np.ndarray] = 69.7,\n a_alpha1: Union[float, np.ndarray] = 0.8795,\n a_alpha2: Union[float, np.ndarray] = 0.0459,\n a_alpha3: Union[float, np.ndarray] = 300.0,\n a_alpha4: Union[float, np.ndarray] = 11940.0,\n B_alpha: Union[float, np.ndarray] = 28.0,\n b_alpha: Union[float, np.ndarray] = 0.922,\n C_alpha: Union[float, np.ndarray] = -14.9,\n c_alpha: Union[float, np.ndarray] = 1.104,\n D_alpha: Union[float, np.ndarray] = 0.674,\n A_beta: Union[float, np.ndarray] = 0.26,\n beta_max1: Union[float, np.ndarray] = 189.0,\n beta_max2: Union[float, np.ndarray] = 0.315,\n d_beta1: Union[float, np.ndarray] = -40.5,\n d_beta2: Union[float, np.ndarray] = 0.195,\n) -> np.ndarray:\n x_alpha = (wavelengths / alpha_max) ** -1\n a_alpha = a_alpha1 + a_alpha2 * np.exp(-((alpha_max - a_alpha3) ** 2) / a_alpha4)\n\n alpha_band = (\n np.exp(A_alpha * (a_alpha - x_alpha))\n + np.exp(B_alpha * (b_alpha - x_alpha))\n + np.exp(C_alpha * (c_alpha - x_alpha))\n + D_alpha\n ) ** -1\n\n beta_max = beta_max1 + beta_max2 * alpha_max\n d_beta = d_beta1 + d_beta2 * alpha_max\n beta_band = np.exp(-(((wavelengths - beta_max) / d_beta) ** 2))\n\n return alpha_band + A_beta * beta_band", "def compute_surface_temperature(heat_flux):\n\n return 1.1e-4*heat_flux + 323", "def calc_VPD(t_celsius, rel_humidity):\n # according to Licor LI-6400 manual pg 14-10\n # and Buck AL (1981). New equations for computing vapor pressure and\n # enhancement factor. J Appl Meteor 20:1527-1532\n vp_sat = 0.61365 * math.exp((17.502 * t_celsius) / (240.97 + t_celsius))\n\n vp_air = vp_sat * rel_humidity\n return vp_sat - vp_air # or vp_sat * (1 - rel_humidity)", "def get_sky(plate, mjd, output_path, verbose=False):\n tag = f'PLATE {plate:05d} MJD {mjd:05d} PATH {output_path}'\n if verbose:\n print('Starting {}'.format(tag))\n # Initialize output data.\n last_nexp = None\n plugmaps = []\n wlens = {'b': [], 'r': []}\n wdisps = {'b': [], 'r': []}\n fluxes = {'b': [], 'r': []}\n ivars = {'b': [], 'r': []}\n flats = {'b': [], 'r': []}\n rdnoises = {'b': [], 'r': []}\n masks = {'b': [], 'r': []}\n obskeys = ('EXPOSURE', 'TAI-BEG', 'EXPTIME', 'AZ', 'ALT', 'AIRMASS',\n 'PRESSURE', 'AIRTEMP',\n 'RDNOISE0', 'RDNOISE1', 'RDNOISE2', 'RDNOISE3')\n obsvals = {key: [] for key in obskeys}\n # Size of each amplifier in raw image pixels along (wlen, tracex) axes.\n ampsize = {'b': (2056, 2048), 'r': (2064, 2057)}\n # ampx[band] tabulates whether each wavelength index is readout by\n # amplifier 0/2 (=0) or 1/3 (=1).\n ampx = {'b': 1 * (np.arange(4112) >= 2056),\n 'r': 1 * (np.arange(4128) >= 2064)}\n # amplifer[band] is a function that takes a traceset as input an returns an\n # array that tabulates whether each wavelength index is readout by\n # amplifier 0-3.\n amplifier = {'b': lambda x: 2 * (x >= 2048) + ampx['b'],\n 'r': lambda x: 2 * (x >= 2057) + ampx['r']}\n # Scaling such that RMS = rdnoise_scale * RDNOISEn * neff.\n rdnoise_scale = (4 * np.pi) ** 0.25\n # Conversion from constant log-lambda pixels to wavelength ratio.\n wdisp_const = 1e-4 * np.log(10)\n # Allowed pixel mask bits.\n valid_mask = (1 << 32) - 1\n # Slices of valid data to save. These trim pixels at each end where\n # IVAR=0 or other serious pixel mask bits are often set.\n valid_slices = {'b': slice(767, 3299), 'r': slice(483, 3668) }\n # Initialize data access.\n finder = bossdata.path.Finder()\n mirror = bossdata.remote.Manager()\n # Loop over spectrographs.\n expected_fibers = []\n for specidx in 1, 2:\n # Load the list of science exposures used for this spectrograph's coadd.\n fiber = 500 * (specidx - 1) + 1\n spec_name = finder.get_spec_path(plate, mjd, fiber=fiber, lite=True)\n exposures = bossdata.spec.SpecFile(mirror.get(spec_name)).exposures\n for band in 'b', 'r':\n camera = '{}{}'.format(band, specidx)\n use = valid_slices[band]\n # Loop over science exposures for this camera.\n nexp = exposures.num_by_camera[camera]\n if not (last_nexp is None or nexp == last_nexp):\n print(f'Different nexp for {camera} {tag}')\n return None\n last_nexp = nexp\n for expidx in range(nexp):\n # Load this camera's spFrame file.\n name = exposures.get_exposure_name(expidx, camera, 'spFrame')\n path = mirror.get(finder.get_plate_path(plate, name))\n spFrame = bossdata.plate.FrameFile(path, calibrated=False)\n # Lookup this spectrograph's sky fibers.\n sky_name = binary_type('SKY ', 'ascii')\n fiberidx = np.where(\n spFrame.plug_map['OBJTYPE'] == sky_name)[0]\n if expidx == 0 and band == 'b':\n # Save plugmap metadata.\n plugmaps.append(spFrame.plug_map[\n ['FIBERID','RA','DEC','XFOCAL','YFOCAL']][fiberidx])\n if specidx == 2:\n plugmap = astropy.table.vstack(plugmaps)\n if specidx == 1 and band == 'b':\n # Record observation metadata.\n for key in obskeys:\n try:\n value = spFrame.header[key]\n except KeyError:\n value = -999 # invalid value for int/float types\n obsvals[key].append(value)\n # Load the sky fiber data.\n fibers = spFrame.plug_map['FIBERID'][fiberidx].data\n assert np.all(fiberidx == spFrame.get_fiber_offsets([fibers]))\n if expidx == 0 and band == 'b':\n expected_fibers.append(fibers)\n if verbose:\n print('Found {} sky fibers on spec{}: {}.'.format(\n len(fibers), specidx,\n ','.join([str(f) for f in fibers])))\n else:\n if not np.all(fibers == expected_fibers[specidx - 1]):\n print('Did not get expected fibers for {} exp {}'\n .format(camera, expidx))\n data = spFrame.get_valid_data(\n fibers, include_sky=True, include_wdisp=True, use_ivar=True,\n pixel_quality_mask=valid_mask)\n if verbose:\n print('Reading {} for exposure {} / {}...'\n .format(camera, expidx + 1, nexp))\n assert data.shape == (len(fibers), 2 * ampsize[band][0])\n mask = spFrame.get_pixel_masks(fibers)\n masks[band].append(mask[:, use])\n # Identify pixels with valid data.\n valid = ~data['ivar'].mask\n bad_fibers = ~np.any(valid, axis=1)\n if verbose and np.any(bad_fibers):\n print(' bad fibers: {}'.format(fibers[bad_fibers]))\n ivar = data['ivar'].data\n assert np.all(ivar[valid] > 0)\n ivars[band].append(ivar[:, use])\n # Load the superflat and trace vectors for sky fibers.\n superflat = spFrame.get_superflat(fibers)\n tracex = spFrame.hdulist[7].read()[fiberidx]\n # Load fiberflat and neff vectors from this camera's spFlat.\n name = exposures.get_exposure_name(expidx, camera, 'spFlat')\n path = mirror.get(finder.get_plate_path(plate, name))\n with fits.open(path) as spFlat:\n fiberflat = spFlat[0].data[fiberidx]\n neff = bossdata.plate.TraceSet(spFlat[3]).get_y()[fiberidx]\n if np.any(neff[valid] <= 0):\n print(f'WARNING: neff <= 0 for {camera} {expidx} {tag}')\n # Lookup the per-amplifier readnoise values.\n readnoises = np.array([\n spFrame.header['RDNOISE{}'.format(amp)]\n for amp in range(4)], dtype=np.float32)\n # Determine which amplifier (0-3) each pixel along the trace is\n # read out by and scale to RMS readnoise per wavelength pixel.\n amp = amplifier[band](tracex)\n rdnoise = rdnoise_scale * readnoises[amp] * neff\n rdnoises[band].append(rdnoise[:, use].astype(np.float32))\n # Combine the superflat and fiberflat.\n flat = superflat * fiberflat\n assert np.all(flat[valid] > 0)\n flats[band].append(flat[:, use])\n # Save wavelength solutions in angstroms.\n wlen = data['wavelength'].data\n wlens[band].append(wlen[:, use])\n # Save wavelength dispersions in angstroms.\n wdisp = data['wdisp'].data\n assert np.all(wdisp[valid] > 0)\n wdisp = wlen * np.expm1(wdisp_const * wdisp)\n wdisps[band].append(wdisp[:, use])\n # Save the combined flat-fielded sky models + residuals,\n # which might be negative due to readnoise.\n flux = data['flux'].data + data['sky'].data\n fluxes[band].append(flux[:, use])\n # Build observation metadata table.\n obslist = astropy.table.Table()\n for key in obskeys:\n obslist[key] = obsvals[key]\n # Build the output HDU list.\n hdus = fits.HDUList()\n cards = dict(PLATE=plate, MJD=mjd, NFIBERS=len(plugmap), NEXP=nexp)\n hdus.append(fits.PrimaryHDU(header=fits.Header(cards)))\n hdus.append(fits.table_to_hdu(obslist))\n hdus[-1].name = 'OBSLIST'\n hdus.append(fits.table_to_hdu(plugmap))\n hdus[-1].name = 'PLUGMAP'\n for band in 'b', 'r':\n Band = band.upper()\n # Combine arrays for each band and save an an image HDU.\n hdus.append(fits.ImageHDU(np.vstack(wlens[band]),\n name='{}WLEN'.format(Band)))\n hdus.append(fits.ImageHDU(np.vstack(wdisps[band]),\n name='{}WDISP'.format(Band)))\n hdus.append(fits.ImageHDU(np.vstack(rdnoises[band]),\n name='{}RDNOISE'.format(Band)))\n hdus.append(fits.ImageHDU(np.vstack(flats[band]),\n name='{}FLAT'.format(Band)))\n hdus.append(fits.ImageHDU(np.vstack(fluxes[band]),\n name='{}FLUX'.format(Band)))\n hdus.append(fits.ImageHDU(np.vstack(ivars[band]),\n name='{}IVAR'.format(Band)))\n hdus.append(fits.ImageHDU(np.vstack(masks[band]),\n name='{}MASK'.format(Band)))\n name = os.path.join(output_path, 'sky-{}-{}.fits'.format(plate, mjd))\n hdus.writeto(name, overwrite=True)\n print('Completed {}'.format(tag))\n return obslist", "def spec_helm_decomp(k,Cu,Cv,GM=False):\n dk = k[1]-k[0]\n s = np.log(k)\n\n Fphi = np.zeros_like(Cu)\n Fpsi = np.zeros_like(Cu)\n Cphi = np.zeros_like(Cu)\n Cpsi = np.zeros_like(Cu)\n\n # assume GM for decomposing into wave and vortex\n if GM:\n gm = np.load(\"/Users/crocha/Projects/dp_spectra/GM/gm_omega_star.npz\")\n f2omg2 = gm['rgm']\n ks = gm['k']*1.e3\n\n for i in range(s.size-1):\n\n ds = np.diff(s[i:])\n\n sh = sinh(s[i]-s[i:])\n ch = cosh(s[i]-s[i:])\n\n # the function to integrate\n Fp = Cu[i:]*sh + Cv[i:]*ch\n Fs = Cv[i:]*sh + Cu[i:]*ch\n\n # integrate using Simpson's rule\n Fpsi[i] = integrate.simps(Fs,s[i:])\n Fphi[i] = integrate.simps(Fp,s[i:])\n\n # zero out unphysical values\n Fpsi[Fpsi < 0.] = 0.\n Fphi[Fphi < 0.] = 0.\n\n # compute rotational and divergent components\n Cpsi = Fpsi - Fphi + Cu\n Cphi = Fphi - Fpsi + Cv\n\n if GM:\n\n f2omg2i = np.interp(k,ks,f2omg2)\n\n Cv_w = f2omg2i*Fphi - Fpsi + Cv\n Cv_v = Cv - Cv_w\n \n kdkromg = diff_central(ks, f2omg2)\n kdkromg = np.interp(k,ks[1:-1],kdkromg)\n\n dFphi = diff_central(k, Fphi)\n #dFphi = np.gradient(Fphi,k)\n dFphi = np.interp(k,k[1:-1],dFphi.real)\n E_w = Fphi - k*dFphi\n\n Cu_w = -k*kdkromg*Fphi + f2omg2i*(-Fpsi+Cv) + Fphi\n Cu_v = Cu - Cu_w\n\n Cb_w = E_w - (Cu_w + Cv_w)/2.\n\n return Cpsi,Cphi, Cu_w,Cv_w, Cu_v,Cv_v, E_w, Cb_w\n\n else:\n return Cpsi,Cphi", "def test_double_ended_ols_wls_fix_gamma_estimate_synthetic():\n from dtscalibration import DataStore\n import numpy as np\n\n np.random.seed(0)\n\n cable_len = 100.\n nt = 500\n time = np.arange(nt)\n x = np.linspace(0., cable_len, 100)\n ts_cold = np.ones(nt) * 4.\n ts_warm = np.ones(nt) * 20.\n\n C_p = 15246\n C_m = 2400.\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask = x < 0.5 * cable_len\n warm_mask = np.invert(cold_mask) # == False\n temp_real = np.ones((len(x), nt))\n temp_real[cold_mask] *= ts_cold + 273.15\n temp_real[warm_mask] *= ts_warm + 273.15\n\n st = C_p * np.exp(-dalpha_r * x[:, None]) * \\\n np.exp(-dalpha_p * x[:, None]) * np.exp(gamma / temp_real) / \\\n (np.exp(-gamma / temp_real) - 1)\n ast = C_m * np.exp(-dalpha_r * x[:, None]) * \\\n np.exp(-dalpha_m * x[:, None]) / (np.exp(-gamma / temp_real) - 1)\n rst = C_p * np.exp(-dalpha_r * (-x[:, None] + cable_len)) * \\\n np.exp(-dalpha_p * (-x[:, None] + cable_len)) * \\\n np.exp(gamma / temp_real) / (np.exp(-gamma / temp_real) - 1)\n rast = C_m * np.exp(-dalpha_r * (-x[:, None] + cable_len)) * np.exp(\n -dalpha_m * (-x[:, None] + cable_len)) / \\\n (np.exp(-gamma / temp_real) - 1)\n\n alpha = np.mean(np.log(rst / rast) - np.log(st / ast), axis=1) / 2\n\n ds = DataStore({\n 'st': (['x', 'time'], st),\n 'ast': (['x', 'time'], ast),\n 'rst': (['x', 'time'], rst),\n 'rast': (['x', 'time'], rast),\n 'userAcquisitionTimeFW': (['time'], np.ones(nt)),\n 'userAcquisitionTimeBW': (['time'], np.ones(nt)),\n 'cold': (['time'], ts_cold),\n 'warm': (['time'], ts_warm)\n },\n coords={\n 'x': x,\n 'time': time},\n attrs={\n 'isDoubleEnded': '1'})\n\n sections = {\n 'cold': [slice(0., 0.5 * cable_len)],\n 'warm': [slice(0.5 * cable_len, cable_len)]}\n\n # OLS\n ds.calibration_double_ended(sections=sections,\n st_label='st',\n ast_label='ast',\n rst_label='rst',\n rast_label='rast',\n method='ols',\n solver='sparse',\n fix_gamma=(gamma, 0.))\n\n np.testing.assert_almost_equal(\n ds.gamma.values, gamma, decimal=6)\n np.testing.assert_almost_equal(\n ds.alpha.values, alpha, decimal=8)\n np.testing.assert_almost_equal(\n ds.TMPF.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPB.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPW.values, temp_real - 273.15, decimal=4)\n\n # WLS\n ds.calibration_double_ended(sections=sections,\n st_label='st',\n ast_label='ast',\n rst_label='rst',\n rast_label='rast',\n st_var=1e-7,\n ast_var=1e-7,\n rst_var=1e-7,\n rast_var=1e-7,\n method='wls',\n solver='sparse',\n tmpw_mc_size=5,\n fix_gamma=(gamma, 0.))\n\n np.testing.assert_almost_equal(\n ds.gamma.values, gamma, decimal=6)\n np.testing.assert_almost_equal(\n ds.alpha.values, alpha, decimal=7)\n np.testing.assert_almost_equal(\n ds.TMPF.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPB.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPW.values, temp_real - 273.15, decimal=4)\n\n pass", "def cooled_surface_temp(T:np.ndarray) -> float:\n \n return T.dot(cs_temp_weights)", "def spring_particle(name, num_trajectories, NUM_PARTS, T_max, dt, sub_sample_rate, noise_std, seed):\n num_particles = NUM_PARTS\n collater = {}\n\n def diffeq_hyper(t, q, k, m, nparts):\n num_particles = nparts\n vels = q[2 * num_particles:]\n xs = q[:2 * num_particles]\n xs = xs.reshape(-1, 2)\n forces = np.zeros(xs.shape)\n new_k = np.repeat(k, num_particles) * np.tile(k, num_particles)\n new_k = np.repeat(new_k, 2).reshape(-1, 2)\n dx = np.repeat(xs, num_particles, axis=0) - np.tile(xs, (num_particles, 1))\n resu = -new_k * dx\n forces = np.add.reduceat(resu, np.arange(0, nparts * nparts, nparts)).ravel()\n\n return np.concatenate([vels / np.repeat(m, 2), forces]).ravel()\n\n def hamiltonian(vec, m, k, num_particles):\n num_particles = num_particles\n x = vec[:num_particles * 2]\n p = vec[2 * num_particles:]\n xs = x.reshape(-1, 2)\n ps = p.reshape(-1, 2)\n U1 = 0\n K = 0\n for i in range(num_particles):\n for j in range(i + 1, num_particles):\n U1 += .5 * k[i] * k[j] * ((xs[i] - xs[j]) ** 2).sum()\n K += 0.5 * ((ps[i] ** 2).sum()) / m[i]\n return K, U1\n\n theta = []\n dtheta = []\n energy = []\n mass_arr = []\n ks_arr = []\n lagrangian = []\n np.random.seed(seed)\n\n for traj in range(num_trajectories):\n ks = np.ones(NUM_PARTS)#np.random.uniform(.5, 1, size=(NUM_PARTS))\n positions = np.random.uniform(-1, 1, size=(NUM_PARTS, 2))\n velocities = np.random.uniform(-3, 3, size=(NUM_PARTS, 2))\n masses = np.ones(NUM_PARTS)#np.random.uniform(0.1, 1, size=NUM_PARTS)\n momentum = np.multiply(velocities, np.repeat(masses, 2).reshape(-1, 2))\n q = np.concatenate([positions, momentum]).ravel()\n qnrk = rk(lambda t, y: diffeq_hyper(t, y, ks, masses, num_particles), (0, T_max), q,\n t_eval=np.arange(0, T_max, dt),\n rtol=1e-12, atol=1e-12, method='DOP853')\n accum = qnrk.y.T\n ssr = int(sub_sample_rate / dt)\n accum = accum[::ssr]\n daccum = np.array([diffeq_hyper(0, accum[i], ks, masses, num_particles) for i in range(accum.shape[0])])\n energies = []\n lags = []\n for i in range(accum.shape[0]):\n ktmp, utmp = hamiltonian(accum[i], masses, ks, NUM_PARTS)\n energies.append(ktmp + utmp)\n lags.append(ktmp - utmp)\n\n accum += np.random.randn(*accum.shape) * noise_std\n daccum += np.random.randn(*daccum.shape) * noise_std\n\n theta.append(accum)\n dtheta.append(daccum)\n energy.append(energies)\n mass_arr.append(masses)\n ks_arr.append(ks)\n lagrangian.append(lags)\n\n collater['x'] = np.concatenate(theta)\n collater['dx'] = np.concatenate(dtheta)\n collater['energy'] = np.concatenate(energy)\n collater['lagrangian'] = np.concatenate(lagrangian)\n\n collater['mass'] = mass_arr\n collater['ks'] = ks_arr\n\n f = open(name + \".pkl\", \"wb\")\n pickle.dump(collater, f)\n f.close()\n\n return collater", "def velocity_dispersion_from(\r\n self, redshift_0: float, redshift_1: float, einstein_radius: float\r\n ) -> float:\r\n const = constants.c.to(\"kpc / s\")\r\n\r\n angular_diameter_distance_to_redshift_0_kpc = (\r\n self.angular_diameter_distance_to_earth_in_kpc_from(redshift=redshift_1)\r\n )\r\n\r\n angular_diameter_distance_to_redshift_1_kpc = (\r\n self.angular_diameter_distance_to_earth_in_kpc_from(redshift=redshift_1)\r\n )\r\n\r\n angular_diameter_distance_between_redshifts_kpc = (\r\n self.angular_diameter_distance_between_redshifts_in_kpc_from(\r\n redshift_0=redshift_0, redshift_1=redshift_1\r\n )\r\n )\r\n\r\n kpc_per_arcsec = self.kpc_per_arcsec_from(redshift=redshift_0)\r\n\r\n einstein_radius_kpc = einstein_radius * kpc_per_arcsec\r\n\r\n velocity_dispersion_kpc = const * np.sqrt(\r\n (einstein_radius_kpc * angular_diameter_distance_to_redshift_1_kpc)\r\n / (\r\n 4\r\n * np.pi\r\n * angular_diameter_distance_to_redshift_0_kpc\r\n * angular_diameter_distance_between_redshifts_kpc\r\n )\r\n )\r\n\r\n return velocity_dispersion_kpc.to(\"km/s\").value", "def scale_fixed_M2V(seed=425, th=150, fmass=1, fb=1, fv=1, rfig=False):\n \n # impact parameters\n M = 1e8*u.Msun\n B0 = 19.85*u.kpc\n V0 = 220*u.km/u.s\n phi = coord.Angle(0*u.deg)\n theta0 = coord.Angle(th*u.deg)\n Tenc = 0.01*u.Gyr\n T = 0.5*u.Gyr\n dt = 0.05*u.Myr\n rs = 0*u.pc\n \n # potential parameters\n potential = 3\n Vh = 220*u.km/u.s\n q = 1*u.Unit(1)\n rhalo = 20*u.pc\n par_pot = np.array([Vh.si.value, q.value, rhalo.si.value])\n \n # setup tube\n Nstar = 3000\n wx = 30*u.kpc\n wy = 0*u.pc\n wz = 0*u.pc\n sx = 0*u.km/u.s\n \n np.random.seed(seed)\n observer = {'z_sun': 27.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': 60*u.deg, 'galcen_coord': coord.SkyCoord(ra=300*u.deg, dec=-90*u.deg, frame='icrs')}\n vobs = {'vcirc': 220*u.km/u.s, 'vlsr': [0, 0, 0]*u.km/u.s}\n wangle = 180*u.deg\n \n xphi = np.linspace(-0.3*np.pi,0.3*np.pi, Nstar)\n Bs = 20*u.kpc\n xr = Bs + np.random.randn(Nstar)*0.0*u.kpc\n x = np.sin(xphi) * xr\n y = np.cos(xphi) * xr\n z = x * 0\n vx = -np.cos(xphi) * Vh\n vy = np.sin(xphi) * Vh\n vz = vx * 0\n \n # generate stream model\n potential_perturb = 1\n par_perturb = np.array([M.si.value, 0., 0., 0.])\n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B0.si.value, phi.rad, V0.si.value, theta0.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream = {}\n stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal = coord.Galactocentric(stream['x'], **observer)\n xeq = xgal.transform_to(coord.ICRS)\n veq_ = gc.vgal_to_hel(xeq, stream['v'], **vobs)\n veq = [None] * 3\n veq[0] = veq_[0].to(u.mas/u.yr)\n veq[1] = veq_[1].to(u.mas/u.yr)\n veq[2] = veq_[2].to(u.km/u.s)\n \n # rotate to native coordinate system\n R = find_greatcircle(xeq.ra.deg[::10], xeq.dec.deg[::10])\n xi, eta = myutils.rotate_angles(xeq.ra, xeq.dec, R)\n xi = coord.Angle(xi*u.deg)\n \n # unperturbed stream\n par_perturb = np.array([0*M.si.value, 0., 0., 0.])\n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B0.si.value, phi.rad, V0.si.value, theta0.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream0 = {}\n stream0['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream0['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal0 = coord.Galactocentric(stream0['x'], **observer)\n xeq0 = xgal0.transform_to(coord.ICRS)\n veq0_ = gc.vgal_to_hel(xeq0, stream0['v'], **vobs)\n veq0 = [None] * 3\n veq0[0] = veq0_[0].to(u.mas/u.yr)\n veq0[1] = veq0_[1].to(u.mas/u.yr)\n veq0[2] = veq0_[2].to(u.km/u.s)\n \n # rotate to native coordinate system\n xi0, eta0 = myutils.rotate_angles(xeq0.ra, xeq0.dec, R)\n xi0 = coord.Angle(xi0*u.deg)\n \n farray = np.array([0.3, 0.5, 1, 2, 3])\n #farray = np.array([0.5, 1, 2])\n #farray = np.array([0.5, 1])\n \n rasterized = False\n if rfig:\n rasterized = True\n \n plt.close()\n fig, ax = plt.subplots(1,1,figsize=(12,12), sharex=True, squeeze=False)\n \n for e, f in enumerate(farray):\n fsqrt = np.sqrt(f)\n par_perturb = np.array([f*fmass*M.si.value, 0., 0., 0.])\n #B = B0\n \n dB = (B0 - Bs)*fb\n B = dB + Bs\n \n vpar = Vh + np.cos(theta0.rad)*V0\n vperp = np.sin(theta0.rad)*V0\n \n vpar_scaled = vpar*f\n vperp_scaled = vperp*f\n \n V = np.sqrt((vpar_scaled-Vh)**2 + vperp_scaled**2)\n theta = coord.Angle(np.arctan2(vperp_scaled, vpar_scaled-Vh))\n \n #fi = np.abs(V*T/(dB/f)).decompose()\n fi = np.abs(dB/(vperp_scaled)).to(u.Myr)\n #print(fi)\n \n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream = {}\n stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal = coord.Galactocentric(stream['x'], **observer)\n xeq = xgal.transform_to(coord.ICRS)\n veq_ = gc.vgal_to_hel(xeq, stream['v'], **vobs)\n veq = [None] * 3\n veq[0] = veq_[0].to(u.mas/u.yr)\n veq[1] = veq_[1].to(u.mas/u.yr)\n veq[2] = veq_[2].to(u.km/u.s)\n \n # rotate to native coordinate system\n xi, eta = myutils.rotate_angles(xeq.ra, xeq.dec, R)\n xi = coord.Angle(xi*u.deg)\n \n color = '{:f}'.format(0.65 - 0.65*(e+1)/(np.size(farray)) + 0.35)\n ms = 1.5*(e+2)\n zorder = np.size(farray)-e\n label = 'f={:g}, $t_{{imp}}$={:.1f}'.format(f, fi)\n #print(e, p, color)\n \n plt.sca(ax[0][0])\n plt.plot(xi.wrap_at(wangle), eta, 'o', mec='none', color=color, ms=ms, zorder=zorder, label=label, rasterized=rasterized)\n \n #for i in range(3):\n #plt.sca(ax[i+1])\n #vexp = np.interp(xi.wrap_at(wangle), xi0.wrap_at(wangle), veq0[i].value) * veq0[i].unit\n #plt.plot(xi.wrap_at(wangle), veq[i]-vexp, 'o', mec='none', color=color, ms=ms, zorder=zorder, rasterized=rasterized)\n \n # label axes\n plt.sca(ax[0][0])\n plt.ylabel('$\\phi_1$ [deg]')\n plt.ylim(-10,10)\n plt.xlim(65,135)\n #plt.gca().set_aspect('equal')\n plt.legend(fontsize='x-small', loc=2)\n plt.title('f M, f V | M = {:g} | V = {:g} | $\\\\theta$ = {:.0f}'.format(fmass*M, V.to(u.km/u.s), theta.to(u.deg)), fontsize='medium')\n \n #vlabel = ['$\\mu_{\\\\alpha_\\star}$ [mas yr$^{-1}$]','$\\mu_{\\delta}$ [mas yr$^{-1}$]', '$V_r$ [km s$^{-1}$]']\n #ylims = [[-0.5, 0.5], [-0.5, 0.5], [-25,25]]\n #ylims = [[-1,1], [-1,1], [-50,50]]\n #for i in range(3):\n #plt.sca(ax[i+1])\n #plt.ylabel('$\\Delta$ {}'.format(vlabel[i]))\n #plt.ylim(*ylims[i])\n\n plt.xlabel('$\\phi_2$ [deg]')\n \n plt.tight_layout()\n \n if rfig:\n return fig\n else:\n plt.savefig('../plots/scale_MV_th{:03d}_{:.1f}_{:.1f}.png'.format(th, fmass, fv))", "def get_hc_external(self, weather, surface, h_surface, terrain):\r\n roughness = surface.construction[0].roughness_unit # Change back to this line...left as below to match Na's\r\n if roughness == \"VeryRough\":\r\n D = 11.58\r\n E = 5.894\r\n F = 0\r\n elif roughness == \"Rough\":\r\n D = 12.49\r\n E = 4.065\r\n F = 0.028\r\n elif roughness == \"MediumRough\":\r\n D = 10.79\r\n E = 4.192\r\n F = 0.0\r\n elif roughness == \"MediumSmooth\":\r\n D = 8.23\r\n E = 4.0\r\n F = -0.057\r\n elif roughness == \"Smooth\":\r\n D = 10.22\r\n E = 3.1\r\n F = 0.0\r\n elif roughness == \"VerySmooth\":\r\n D = 8.23\r\n E = 3.33\r\n F = -0.036\r\n else:\r\n D = 8.23\r\n E = 4.0\r\n F = -0.057\r\n print \"No Roughness Value Found so Set Default Values of 8.23,4.0,-0.057\"\r\n\r\n wind_speed_temp = weather[\"wind_speed\"]\r\n # Terrain Lookup Table\r\n if terrain == 'Flat or Open Countryside':\r\n sigma = 270\r\n a = 0.14\r\n elif terrain == 'Rough or Wooded Country':\r\n sigma = 370\r\n a = 0.22\r\n elif terrain == 'Towns and City Scapes':\r\n sigma = 460\r\n a = 0.33\r\n elif terrain == 'Ocean Front Areas':\r\n sigma = 210\r\n a = 0.10\r\n elif terrain == 'Urban, Industrial, or Forest':\r\n sigma = 370\r\n a = 0.22\r\n else:\r\n sigma = 370\r\n a = 0.22\r\n print \"No Terrain Type Found so Set Default Values of 370,0.22\"\r\n terrain_sigma = sigma\r\n terrain_cof = a\r\n\r\n # Adjust the wind speed...Stable air above human inhabited areas:\r\n #wind_speed = wind_speed_temp * ((h_surface / 10) ** 0.5) # This was the line used to get wind_speed before terrain was added\r\n # Wind speed corrected for terrain differences;\r\n wind_speed = wind_speed_temp * ((270/10) ** 0.14) * (h_surface/terrain_sigma) ** terrain_cof\r\n #print wind_speed\r\n # Calculate the hc_external\r\n # hc_external= D+E*Wind_speed+F*Wind_speed^2\r\n hc_external = D + (E * wind_speed) + (F * wind_speed ** 2)\r\n\r\n # depending on the direction of the wind adjust the hc_external...as of versions 3 and 4 this part seems omitted\r\n #x = abs(wind_speed_dir - azimuth)\r\n #if x > 100:\r\n # if x < 260:\r\n # hc_external *= 0.5\r\n #print \"hc_external : \", hc_external, D, E, F\r\n\r\n return round(hc_external, 5)", "def get_focal_point(patches, shell_point, num_rays=20):\n focal_point = Point3D(0.0, 0.0, 0.0)\n for patch in patches:\n #create a bunch of parallel rays coming from the eye\n ray_vector = normalize(shell_point)\n \n ##TODO: remove me\n #ray_vector = normalize(patch.shell_point)\n \n ray_rotation = numpy.zeros((3, 3))\n optics.rotation_matrix.R_2vect(ray_rotation, PRINCIPAL_RAY, ray_vector)\n rays = []\n for x in numpy.linspace(-LIGHT_RADIUS, LIGHT_RADIUS, num_rays*2+1):\n for y in numpy.linspace(-LIGHT_RADIUS, LIGHT_RADIUS, num_rays*2+1):\n start_point = ray_rotation.dot(Point3D(x, y, 0.0))\n rays.append(Ray(start_point, start_point + ray_vector))\n \n #find the point such that the spot size is minimized on the screen.\n #can average the normal of the reflected rays to get approximately where the screen goes\n #then iteratively try different distances until we've minimized the spot size there\n focal_point = Point3D(0.0, 0.0, 0.0)\n reflected_rays = [ray for ray in patch.reflect_rays_no_bounds(rays) if ray != None]\n approximate_screen_normal = sum([normalize(ray.start - ray.end) for ray in reflected_rays]) / len(reflected_rays)\n if optics.debug.PATCH_FOCAL_REFLECTIONS:\n #TODO: all rays don't come from the origin. draw all rays from their actual start points, and draw non-reflected rays going past the surface\n #also, only draw the part of the surface that is real and should be reflected from\n axes = matplotlib.pyplot.subplot(111, projection='3d')\n size = 5\n num_points = 10\n x, y = numpy.meshgrid(numpy.linspace(-size, size, num_points), numpy.linspace(-size, size, num_points))\n axes.scatter(x, y, patch.poly.get_z_for_plot(x, y), c='r', marker='o').set_label('patch')\n for ray in reflected_rays:\n debug_dist = 2*numpy.linalg.norm(ORIGIN - ray.start)\n rays_to_draw = numpy.array([\n patch.poly_space.point_to_space(ORIGIN),\n patch.poly_space.point_to_space(ray.start),\n patch.poly_space.point_to_space(debug_dist * normalize(ray.end-ray.start) + ray.start)\n ])\n axes.plot(rays_to_draw[:, 0], rays_to_draw[:, 1], rays_to_draw[:, 2], label=\"ray\")\n axes.set_xlabel('X')\n axes.set_ylabel('Y')\n axes.set_zlabel('Z')\n matplotlib.pyplot.legend()\n matplotlib.pyplot.show()\n def calculate_spot_size(distance):\n \"\"\"\n :returns: average distance from the central point for the plane at this distance\n \"\"\"\n screen_plane = Plane(distance * approximate_screen_normal * -1.0 + shell_point, approximate_screen_normal)\n points = []\n for ray in reflected_rays:\n points.append(screen_plane.intersect_line(ray.start, ray.end))\n average_point = sum(points) / len(points)\n errors = [numpy.linalg.norm(p - average_point) for p in points]\n if optics.debug.PATCH_FOCAL_SPOT_SIZE:\n #use coordinate space to move everything to the xy plane\n space = CoordinateSpace(screen_plane._point, screen_plane._normal)\n transformed_points = numpy.array([space.point_to_space(p) for p in points])\n matplotlib.pyplot.plot(transformed_points[:, 0], transformed_points[:, 1], \"r\", linestyle='None', marker='o', label=\"rays at %s\" % (distance))\n matplotlib.pyplot.legend()\n matplotlib.pyplot.show()\n #keep a fixed scale to x and y so that each graph can be compared with the previous\n #should probably print the errors as well\n print errors\n print sum(errors) / len(errors)\n return sum(errors) / len(errors)\n previous_distance = numpy.linalg.norm(patch.shell_point - patch.screen_point)\n min_dist = previous_distance * 0.9\n max_dist = previous_distance * 1.1\n num_iterations = 20\n tolerance = 0.0001\n best_dist = scipy.optimize.fminbound(calculate_spot_size, min_dist, max_dist, maxfun=num_iterations, xtol=tolerance, full_output=False, disp=0)\n focal_point += best_dist * approximate_screen_normal * -1.0 + shell_point\n return focal_point / len(patches)", "def expansionConservationHotHeightDefined(self, mat: str, isotope: str):\n hotHeight = 1.0\n\n circle1 = Circle(\"circle\", mat, self.tCold, self.tWarm, self.coldOuterDiameter)\n circle2 = Circle(\"circle\", mat, self.tCold, self.tHot, self.coldOuterDiameter)\n\n # mass density is proportional to Fe number density and derived from\n # all the number densities and atomic masses\n self.assertAlmostEqual(\n circle1.p.numberDensities[isotope] / circle2.p.numberDensities[isotope],\n circle1.density() / circle2.density(),\n )\n\n # the colder one has more because it is the same cold outer diameter\n # but it would be taller at the same temperature\n mass1 = circle1.density() * circle1.getArea() * hotHeight\n mass2 = circle2.density() * circle2.getArea() * hotHeight\n self.assertGreater(mass1, mass2)\n\n # they are off by factor of thermal exp\n self.assertAlmostEqual(\n mass1 * circle1.getThermalExpansionFactor(),\n mass2 * circle2.getThermalExpansionFactor(),\n )\n\n # material.pseudoDensity is the 2D density of a material\n # material.density is true density and not equal in this case\n for circle in [circle1, circle2]:\n # 2D density is not equal after application of coldMatAxialExpansionFactor\n # which happens during construction\n self.assertNotAlmostEqual(\n circle.density(),\n circle.material.pseudoDensity(Tc=circle.temperatureInC),\n )\n # 2D density is off by the material thermal exp factor\n percent = circle.material.linearExpansionPercent(Tc=circle.temperatureInC)\n thermalExpansionFactorFromColdMatTemp = 1 + percent / 100\n self.assertAlmostEqual(\n circle.density() * thermalExpansionFactorFromColdMatTemp,\n circle.material.pseudoDensity(Tc=circle.temperatureInC),\n )\n self.assertAlmostEqual(\n circle.density(),\n circle.material.density(Tc=circle.temperatureInC),\n )\n\n # brief 2D expansion with set temp to show mass is conserved\n # hot height would come from block value\n warmMass = circle1.density() * circle1.getArea() * hotHeight\n circle1.setTemperature(self.tHot)\n hotMass = circle1.density() * circle1.getArea() * hotHeight\n self.assertAlmostEqual(warmMass, hotMass)\n circle1.setTemperature(self.tWarm)\n\n # Change temp to circle 2 temp to show equal to circle2\n # and then change back to show recoverable to original values\n oldArea = circle1.getArea()\n initialDens = circle1.density()\n\n # when block.setHeight is called (which effectively changes component height)\n # component.setNumberDensity is called (for solid isotopes) to adjust the number\n # density so that now the 2D expansion will be approximated/expanded around\n # the hot temp which is akin to these adjustments\n heightFactor = circle1.getHeightFactor(self.tHot)\n circle1.adjustDensityForHeightExpansion(self.tHot) # apply temp at new height\n circle1.setTemperature(self.tHot)\n\n # now its density is same as hot component\n self.assertAlmostEqual(\n circle1.density(),\n circle2.density(),\n )\n\n # show that mass is conserved after expansion\n circle1NewHotHeight = hotHeight * heightFactor\n self.assertAlmostEqual(\n mass1, circle1.density() * circle1.getArea() * circle1NewHotHeight\n )\n\n self.assertAlmostEqual(\n circle1.density(),\n circle1.material.density(Tc=circle1.temperatureInC),\n )\n # change back to old temp\n circle1.adjustDensityForHeightExpansion(self.tWarm)\n circle1.setTemperature(self.tWarm)\n\n # check for consistency\n self.assertAlmostEqual(initialDens, circle1.density())\n self.assertAlmostEqual(oldArea, circle1.getArea())\n self.assertAlmostEqual(mass1, circle1.density() * circle1.getArea() * hotHeight)", "def coeff_display_M202(Nstar=1,seeing=[0.9,0.,0.],npix=npix,zenith=0,filter='r', theta=0., phi=0,corrector='corrector',x=0.,y=0.,z=0.,zernike_max_order=20,regular=False):\n hdu = genImgVallCCD(Nstar=Nstar,seeing=seeing,npix=npix,zenith=zenith,filter=filter, theta=theta,phi=phi, corrector=corrector,x=x,y=y,z=z,regular=regular)\n nn = len(hdu)\n data = []\n colnames = ['x','y','M20','M22','M31','M33']\n for hdui in hdu[1:]:\n Nobj = hdui.data.shape[0]\n M20=np.zeros(Nobj)\n M22=np.zeros(Nobj).astype(complex)\n M31=np.zeros(Nobj).astype(complex)\n M33=np.zeros(Nobj).astype(complex)\n for i in range(Nobj):\n img = hdui.data[i][4:].reshape(npix,npix)\n img = rebin(img,(40,40))\n M20,M22,M31,M33=complexMoments(data=img,sigma=4.)\n x=hdui.header['ccdXcen']\n y=hdui.header['ccdYcen']\n data.append([x,y,np.median(M20), np.median(M22), np.median(M31), np.median(M33)])\n data=np.array(data) \n betaAll=[]\n betaErrAll=[]\n R2adjAll=[]\n beta,betaErr,R2_adj,fitted = zernikeFit(data[:,0].real,data[:,1].real,data[:,2].real,max_order=zernike_max_order)\n betaAll.append(beta)\n betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n for i in range(3,6):\n beta,betaErr,R2_adj,fitted = zernikeFit(data[:,0].real,data[:,1].real,data[:,i].real,max_order=zernike_max_order)\n betaAll.append(beta)\n betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n beta,betaErr,R2_adj,fitted = zernikeFit(data[:,0].real,data[:,1].real,data[:,i].imag,max_order=zernike_max_order)\n betaAll.append(beta)\n betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n betaAll = np.array(betaAll)\n betaErrAll = np.array(betaErrAll)\n R2adjAll = np.array(R2adjAll)\n ind = np.arange(len(betaAll[0]))\n momname = ('M20','M22.Real','M22.imag','M31.real','M31.imag','M33.real','M33.imag')\n fmtarr = ['bo-','ro-','go-','co-','mo-','yo-','ko-']\n pl.figure(figsize=(17,7))\n for i in range(3):\n pl.subplot(4,1,i+1)\n pl.errorbar(ind[1:],betaAll[i][1:],yerr = betaErrAll[i][1:],fmt=fmtarr[i])\n if i == 0:\n pl.title('x: '+str(hdu[0].header['x'])+' y: '+str(hdu[0].header['y'])+' z: '+str(hdu[0].header['z'])+' tilt: '+str(hdu[0].header['theta'])+' fwhm: '+str(hdu[0].header['s_fwhm'])+' e1: '+str(hdu[0].header['e1'])+' e2: '+str(hdu[0].header['e2']))\n pl.grid()\n pl.xlim(-1,len(betaAll[i])+1)\n pl.ylim(min(betaAll[i][1:])-0.5,max(betaAll[i][1:])+0.5)\n #pl.ylim(-0.1,0.1)\n pl.xticks(ind,('','','','','','','','','','','','','','','','','','','',''))\n pl.ylabel(momname[i])\n pl.xticks(ind,('Piston','Tip','Tilt','Astignism','Defocus','Astignism','Trefoil','Coma','Coma','Trefoil','Ashtray','Astigm.5th','Spherical','Astigm.5th','Ashtray','16','17','18','19','20'),rotation=90)\n pl.xlabel('Zernike Coefficients')\n return betaAll,betaErrAll", "def scale_fixed_V2B(seed=425, th=150, fmass=1, fb=1, fv=1, rfig=False):\n \n # impact parameters\n M = 1e8*u.Msun\n B0 = 19.85*u.kpc\n V0 = 220*u.km/u.s\n phi = coord.Angle(0*u.deg)\n theta = coord.Angle(th*u.deg)\n Tenc = 0.01*u.Gyr\n T = 0.5*u.Gyr\n dt = 0.1*u.Myr\n rs = 0*u.pc\n \n # potential parameters\n potential = 3\n Vh = 220*u.km/u.s\n q = 1*u.Unit(1)\n rhalo = 20*u.pc\n par_pot = np.array([Vh.si.value, q.value, rhalo.si.value])\n \n # setup tube\n Nstar = 3000\n wx = 30*u.kpc\n wy = 0*u.pc\n wz = 0*u.pc\n sx = 0*u.km/u.s\n \n np.random.seed(seed)\n observer = {'z_sun': 27.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': 60*u.deg, 'galcen_coord': coord.SkyCoord(ra=300*u.deg, dec=-90*u.deg, frame='icrs')}\n vobs = {'vcirc': 220*u.km/u.s, 'vlsr': [0, 0, 0]*u.km/u.s}\n wangle = 180*u.deg\n \n xphi = np.linspace(-0.3*np.pi,0.3*np.pi, Nstar)\n Bs = 20*u.kpc\n xr = Bs + np.random.randn(Nstar)*0.0*u.kpc\n x = np.sin(xphi) * xr\n y = np.cos(xphi) * xr\n z = x * 0\n vx = -np.cos(xphi) * Vh\n vy = np.sin(xphi) * Vh\n vz = vx * 0\n \n # generate stream model\n potential_perturb = 1\n par_perturb = np.array([M.si.value, 0., 0., 0.])\n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B0.si.value, phi.rad, V0.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream = {}\n stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal = coord.Galactocentric(stream['x'], **observer)\n xeq = xgal.transform_to(coord.ICRS)\n veq_ = gc.vgal_to_hel(xeq, stream['v'], **vobs)\n veq = [None] * 3\n veq[0] = veq_[0].to(u.mas/u.yr)\n veq[1] = veq_[1].to(u.mas/u.yr)\n veq[2] = veq_[2].to(u.km/u.s)\n \n # rotate to native coordinate system\n R = find_greatcircle(xeq.ra.deg[::10], xeq.dec.deg[::10])\n xi, eta = myutils.rotate_angles(xeq.ra, xeq.dec, R)\n xi = coord.Angle(xi*u.deg)\n \n # unperturbed stream\n par_perturb = np.array([0*M.si.value, 0., 0., 0.])\n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B0.si.value, phi.rad, V0.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream0 = {}\n stream0['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream0['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal0 = coord.Galactocentric(stream0['x'], **observer)\n xeq0 = xgal0.transform_to(coord.ICRS)\n veq0_ = gc.vgal_to_hel(xeq0, stream0['v'], **vobs)\n veq0 = [None] * 3\n veq0[0] = veq0_[0].to(u.mas/u.yr)\n veq0[1] = veq0_[1].to(u.mas/u.yr)\n veq0[2] = veq0_[2].to(u.km/u.s)\n \n # rotate to native coordinate system\n xi0, eta0 = myutils.rotate_angles(xeq0.ra, xeq0.dec, R)\n xi0 = coord.Angle(xi0*u.deg)\n \n farray = np.array([0.3, 0.5, 1, 2, 3])\n \n rasterized = False\n if rfig:\n rasterized = True\n \n plt.close()\n fig, ax = plt.subplots(4,1,figsize=(12,12), sharex=True)\n \n for e, f in enumerate(farray):\n fsqrt = np.sqrt(f)\n par_perturb = np.array([fmass*M.si.value, 0., 0., 0.])\n #B = B0\n \n dB = (B0 - Bs)*fb\n B = dB*f + Bs\n \n V = fv*V0/fsqrt\n \n #fi = np.abs(V*T/(dB/f)).decompose()\n fi = np.abs(dB/V).to(u.Myr)\n #print(fi)\n \n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream = {}\n stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal = coord.Galactocentric(stream['x'], **observer)\n xeq = xgal.transform_to(coord.ICRS)\n veq_ = gc.vgal_to_hel(xeq, stream['v'], **vobs)\n veq = [None] * 3\n veq[0] = veq_[0].to(u.mas/u.yr)\n veq[1] = veq_[1].to(u.mas/u.yr)\n veq[2] = veq_[2].to(u.km/u.s)\n \n # rotate to native coordinate system\n xi, eta = myutils.rotate_angles(xeq.ra, xeq.dec, R)\n xi = coord.Angle(xi*u.deg)\n \n color = '{:f}'.format(0.65 - 0.65*(e+1)/(np.size(farray)) + 0.35)\n ms = 1.5*(e+2)\n zorder = np.size(farray)-e\n label = 'f={:g}, $t_{{imp}}$={:.1f}'.format(f, fi)\n #print(e, p, color)\n \n plt.sca(ax[0])\n plt.plot(xi.wrap_at(wangle), eta, 'o', mec='none', color=color, ms=ms, zorder=zorder, label=label, rasterized=rasterized)\n \n for i in range(3):\n plt.sca(ax[i+1])\n vexp = np.interp(xi.wrap_at(wangle), xi0.wrap_at(wangle), veq0[i].value) * veq0[i].unit\n plt.plot(xi.wrap_at(wangle), veq[i]-vexp, 'o', mec='none', color=color, ms=ms, zorder=zorder, rasterized=rasterized)\n \n # label axes\n plt.sca(ax[0])\n plt.ylabel('$\\phi_1$ [deg]')\n plt.ylim(-10,10)\n plt.xlim(65,135)\n #plt.gca().set_aspect('equal')\n plt.legend(fontsize='x-small', loc=2)\n plt.title('f M, f B | M = {:g} | B = {:g} | $\\\\theta$ = {:.0f}'.format(fmass*M, dB.to(u.pc), theta), fontsize='medium')\n \n vlabel = ['$\\mu_{\\\\alpha_\\star}$ [mas yr$^{-1}$]','$\\mu_{\\delta}$ [mas yr$^{-1}$]', '$V_r$ [km s$^{-1}$]']\n ylims = [[-0.5, 0.5], [-0.5, 0.5], [-25,25]]\n ylims = [[-1,1], [-1,1], [-50,50]]\n for i in range(3):\n plt.sca(ax[i+1])\n plt.ylabel('$\\Delta$ {}'.format(vlabel[i]))\n plt.ylim(*ylims[i])\n\n plt.xlabel('$\\phi_2$ [deg]')\n \n plt.tight_layout()\n \n if rfig:\n return fig\n else:\n plt.savefig('../plots/scale_VB_th{:03d}_{:.1f}_{:.1f}.png'.format(th, fv, fb))", "def pwl(site_residuals, azSpacing=0.5,zenSpacing=0.5):\n tdata = res.reject_absVal(site_residuals,100.)\n del site_residuals \n data = res.reject_outliers_elevation(tdata,5,0.5)\n del tdata\n\n Bvec_complete = []\n Sol_complete = []\n model_complete = []\n meas_complete = []\n\n numd = np.shape(data)[0]\n numZD = int(90.0/zenSpacing) + 1\n numAZ = int(360./azSpacing)\n print(\"numAZ\",numAZ)\n pwl_All = np.zeros((numAZ,numZD))\n pwlSig_All = np.zeros((numAZ,numZD))\n #pwl_All = np.zeros((numZD,numAZ))\n #pwlSig_All = np.zeros((numZD,numAZ))\n\n for j in range(0,numAZ):\n # Find only those value within this azimuth bin:\n if(j - azSpacing/2. < 0) :\n criterion = (data[:,1] < (j + azSpacing/2.)) | (data[:,1] > (360. - azSpacing/2.) )\n else:\n criterion = (data[:,1] < (j + azSpacing/2.)) & (data[:,1] > (j - azSpacing/2.) )\n ind = np.array(np.where(criterion))[0]\n azData =data[ind,:]\n numd = np.shape(azData)[0]\n if numd < 2:\n continue\n\n # Neq is acting like a constrain on the model a small value 0.001\n # let the model vary by 1000 mm\n # will let it vary more. a large value -> 1 will force the model to be closer to 0\n Neq = np.eye(numZD,dtype=float) * 0.001\n Apart = np.zeros((numd,numZD))\n #aiz = j* int(np.floor(360./zenSpacing))\n \n for i in range(0,numd):\n iz = int(np.floor(azData[i,2]/zenSpacing)) #+ aiz\n Apart[i,iz] = (1.-(azData[i,2]-float(iz)*zenSpacing)/zenSpacing)\n Apart[i,iz+1] = (azData[i,2]-float(iz)*zenSpacing)/zenSpacing\n #Apart_1 = (1.-(azData[i,2]-float(iz)*zenSpacing)/zenSpacing)\n #Apart_2 = (azData[i,2]-float(iz)*zenSpacing)/zenSpacing\n\n prechi = np.dot(azData[:,3].T,azData[:,3])\n\n Neq = np.add(Neq, np.dot(Apart.T,Apart) )\n Bvec = np.dot(Apart.T,azData[:,3])\n for val in Bvec:\n Bvec_complete.append(val)\n Cov = np.linalg.pinv(Neq)\n Sol = np.dot(Cov,Bvec)\n\n for val in Sol:\n Sol_complete.append(val)\n\n postchi = prechi - np.dot(Bvec.T,Sol)\n pwlsig = np.sqrt(np.diag(Cov) *postchi/numd)\n \n model = np.dot(Apart,Sol)\n\n for d in range(0,numd):\n meas_complete.append(azData[d,3])\n model_complete.append(model[d])\n\n pwl_All[j,:] = Sol \n pwlSig_All[j,:] = pwlsig\n\n #print(\"Sol:\",Sol)\n #print(\"PWL:\",pwl_All[j,:])\n\n #pwl_All[:,j] = Sol \n #print(\"Sol:\",np.shape(Sol),np.shape(pwl_All))\n #pwlSig_All[:,j] = pwlsig\n del Sol,pwlsig,Cov,Bvec,Neq,Apart,azData,ind\n\n # Calculate the AIC and BIC values...\n f = loglikelihood(np.array(meas_complete),np.array(model_complete))\n numd = np.size(meas_complete)\n dof = numd - np.shape(Sol_complete)[0]\n aic = calcAIC(f,dof)\n bic = calcBIC(f,dof,numd)\n\n prechi = np.dot(np.array(meas_complete).T,np.array(meas_complete))\n postchi = prechi - np.dot(np.array(Bvec_complete).T,np.array(Sol_complete))\n\n #print(\"My loglikelihood:\",f,aic,bic,dof,numd)\n #print(\"STATS:\",numd,np.sqrt(prechi/numd),np.sqrt(postchi/numd),np.sqrt((prechi-postchi)/numd),aic,bic)\n stats = {}\n stats['prechi'] = np.sqrt(prechi/numd)\n stats['postchi'] = np.sqrt(postchi/numd)\n stats['chi_inc'] = np.sqrt((prechi-postchi)/numd)\n stats['aic'] = aic\n stats['bic'] = bic\n\n return pwl_All, pwlSig_All, stats", "def rho(SA, CT, p):\n\n SA = np.maximum(SA, 0)\n\n xs = np.sqrt(sfac * SA + soffset)\n ys = CT * 0.025\n z = p * 1e-4\n\n specific_volume = (v000\n + xs * (v100 + xs * (v200 + xs * (v300 + xs * (v400 + xs * (v500\n + xs * v600)))))\n + ys * (v010\n + xs * (v110 + xs * (v210 + xs * (v310 + xs * (v410 + xs * v510))))\n + ys * (v020 + xs * (v120 + xs * (v220 + xs * (v320 + xs * v420)))\n + ys * (v030 + xs * (v130 + xs * (v230 + xs * v330))\n + ys * (v040 + xs * (v140 + xs * v240)\n + ys * (v050 + xs * v150 + ys * v060)))))\n + z * (v001\n + xs * (v101 + xs * (v201 + xs * (v301 + xs * (v401 + xs * v501))))\n + ys * (v011 + xs * (v111 + xs * (v211 + xs * (v311 + xs * v411)))\n + ys * (v021 + xs * (v121 + xs * (v221 + xs * v321))\n + ys * (v031 + xs * (v131 + xs * v231)\n + ys * (v041 + xs * v141 + ys * v051))))\n + z * (v002\n + xs * (v102 + xs * (v202 + xs * (v302 + xs * v402)))\n + ys * (v012 + xs * (v112 + xs * (v212 + xs * v312))\n + ys * (v022 + xs * (v122 + xs * v222)\n + ys * (v032 + xs * v132 + ys * v042)))\n + z * (v003\n + xs * (v103 + xs * v203)\n + ys * (v013 + xs * v113 + ys * v023)\n + z * (v004 + xs * v104 + ys * v014\n + z * (v005 + z * v006))))))\n\n return 1. / specific_volume", "def update_pressure(self):\n m_multipliers = np.ones(self.mesh.get_number_of_cells())\n\n\n rhs_current = np.zeros(self.mfd.get_number_of_dof()) \n rhs_current += self.rhs_mfd\n\n\n for cell_index in range(self.mesh.get_number_of_cells()):\n density = -self.ref_pressure\n density += self.current_pressure[cell_index]\n density *= self.compressibility\n density += 1.\n density *= self.ref_density\n\n # We multiply by the inverse of \\frac{\\rho}{\\mu}\n m_multipliers[cell_index] = self.viscosity/density\n\n c_entry = self.compressibility\n c_entry *= self.porosities[cell_index]\n c_entry /= self.delta_t\n c_entry *= self.mesh.get_cell_volume(cell_index)\n\n rhs_current[self.mesh.get_number_of_faces()+\n cell_index] += c_entry*self.current_pressure[cell_index]\n\n self.lhs_coo.data[self.c_start+cell_index] = c_entry\n\n for [index, cell_index] in enumerate(self.rate_wells):\n rhs_current[self.mesh.get_number_of_faces()+cell_index] += \\\n self.rate_wells_rate[index]\n\n self.mfd.update_m(self.lhs_coo.data[:self.m_x_coo_length], m_multipliers)\n\n solution = dsolve.spsolve(self.lhs_coo.tocsr(), rhs_current)\n self.prev_pressure = self.current_pressure\n self.current_pressure = solution[self.mesh.get_number_of_faces():]\n self.current_velocity = solution[:self.mesh.get_number_of_faces()]", "def plotblackbody(_zband, _yband, _jband, _hband, _kband, _parallax, _perr):\n # Set pyplot style to be consistent within the program\n plt.style.use('seaborn-whitegrid')\n # Import raw data to plot Hertzsprung-Russell diagram\n _hrdata = inithr('hr.dat')\n # Determine distance in parsecs\n _distance = 1 / np.tan(_parallax * 10**-3)\n _derr = (_perr * 10**-3) / ((_parallax * 10**-3)**2)\n # Create single data array with all bands\n _bands = [_zband, _yband, _jband, _hband, _kband]\n _lambda = [0.9, 1.02, 1.22, 1.63, 2.2]\n # Set up empty arrays for each star\n _largestar = np.zeros((1, 2))\n _smallstar = np.zeros((1, 2))\n\n # Determine the spectral flux density from the large star\n i = 0\n while i < 5:\n # Determine the maximum and minimum values of the observed band\n _max, _min = lightcurve.maxminvals(_bands[i])\n # The large star uses the maximum flux value (smallest magnitude)\n _largestar = np.append(_largestar, np.array([_lambda[i], (magtoflux(_min, i))], ndmin=2), axis=0)\n i += 1\n # Delete first empty row of the array\n _largestar = np.delete(_largestar, 0, axis=0)\n\n # Determine the spectral flux density from the small star\n i = 0\n while i < 5:\n # Determine the maximum and minimum values of the observed band\n _max, _min = lightcurve.maxminvals(_bands[i])\n # Smaller star flux value is combined value minus the large star\n _smallstar = np.append(_smallstar, np.array([_lambda[i], (magtoflux(_max, i) -\n magtoflux(_min, i))], ndmin=2), axis=0)\n i += 1\n # Delete the first empty row of the array\n _smallstar = np.delete(_smallstar, 0, axis=0)\n\n # Determine the luminosity and effective temperature of each star\n _luma, _lumaerr, _wiena = getwientemp(_largestar, _distance, _derr, 1)\n _lumb, _lumberr, _wienb = getwientemp(_smallstar, _distance, _derr, 2)\n\n # Calculate luminosities in solar units\n _solluma = _luma / (3.828*10**26)\n _sollumb = _lumb / (3.828*10**26)\n _lumaerr = _lumaerr / (3.828*10**26)\n _lumberr = _lumberr / (3.828*10**26)\n\n # Calculate masses using the mass/luminosity relation in solar mass units\n # N.B. only works as an approximation for main sequence stars, giants and dwarfs are not sutiable for this\n # approximation\n _solmassa = np.power(_solluma, 1/3.5)\n _solmassaerr = ((_solmassa * (1/3.5) * _lumaerr) / _solluma)**2\n _solmassb = np.power(_sollumb, 1/3.5)\n _solmassberr = ((_solmassb * (1 / 3.5) * _lumberr) / _sollumb) ** 2\n\n # Calculate stellar radius in solar radii using the relationship between luminosity, surface area and temperature\n _solrada = np.sqrt(_solluma / np.power(_wiena / 5778, 4))\n _solradb = np.sqrt(_sollumb / np.power(_wienb / 5778, 4))\n _solradaerr = ((_solrada * 0.5 * _lumaerr) / _solluma)**2\n _solradberr = ((_solradb * 0.5 * _lumberr) / _sollumb)**2\n\n # Output determined values to the screen and write to file\n print('Values for the large star:')\n print('Effective temperature: ' + str(round_sig(_wiena)))\n print('Solar luminosities: ' + str(round_sig(_solluma)) + ', error: ' + str(round_sig(_lumaerr)))\n print('Solar radii: ' + str(round_sig(_solrada)) + ', error: ' + str(round_sig(_solradaerr)))\n print('Solar masses: ' + str(round_sig(_solmassa)) + ', error: ' + str(round_sig(_solmassaerr)))\n print('-----------------------------------------------------')\n print('Values for the small star:')\n print('Effective temperature: ' + str(round_sig(_wienb)))\n print('Solar luminosities: ' + str(round_sig(_sollumb)) + ', error: ' + str(round_sig(_lumberr)))\n print('Solar radii: ' + str(round_sig(_solradb)) + ', error: ' + str(round_sig(_solradberr)))\n print('Solar masses: ' + str(round_sig(_solmassb)) + ', error: ' + str(round_sig(_solmassberr)))\n\n # Convert from luminosity to magnitude in solar units\n _luma = -2.5 * np.log10(_luma / (3.0128 * 10**28))\n _lumb = -2.5 * np.log10(_lumb / (3.0128 * 10**28))\n\n # Plot Hertzsprung-Russell diagram using provided array\n plt.scatter(_hrdata[:, 1], _hrdata[:, 0], s=0.5)\n # Plot determined values for each star\n plt.scatter(_wiena, _luma, s=16, c='red', label='Larger Star')\n plt.scatter(_wienb, _lumb, s=16, c='green', label='Smaller Star')\n # Set the x and y axis limits to sensible values\n plt.legend()\n plt.xlim(3000, 10000)\n plt.ylim(-10, 20)\n # Invert both axes as convention\n plt.gca().invert_xaxis()\n plt.gca().invert_yaxis()\n # Save figure to current folder\n plt.savefig('hr.png')\n # Display to screen\n plt.show()", "def find_metallicity_distribution(redshifts, min_logZ_COMPAS, max_logZ_COMPAS,\n mu0=0.035, muz=-0.23, sigma_0=0.39, sigma_z=0.0, alpha =0.0,\n min_logZ =-12.0, max_logZ =0.0, step_logZ = 0.01): \n ##################################\n # Log-Linear redshift dependence of sigma\n sigma = sigma_0* 10**(sigma_z*redshifts)\n \n ##################################\n # Follow Langer & Norman 2006 in assuming that mean metallicities evolve in z as:\n mean_metallicities = mu0 * 10**(muz * redshifts) \n \n # Now we re-write the expected value of the log-skew-normal to retrieve mu\n beta = alpha/(np.sqrt(1 + (alpha)**2))\n PHI = NormDist.cdf(beta * sigma) \n mu_metallicities = np.log(mean_metallicities/2. * 1./(np.exp(0.5*sigma**2) * PHI ) ) \n\n ##################################\n # create a range of metallicities (the x-values, or random variables)\n log_metallicities = np.arange(min_logZ, max_logZ + step_logZ, step_logZ)\n metallicities = np.exp(log_metallicities)\n\n\n ##################################\n # probabilities of log-skew-normal (without the factor of 1/Z since this is dp/dlogZ not dp/dZ)\n dPdlogZ = 2./(sigma[:,np.newaxis]) * NormDist.pdf((log_metallicities - mu_metallicities[:,np.newaxis])/sigma[:,np.newaxis]) * NormDist.cdf(alpha * (log_metallicities - mu_metallicities[:,np.newaxis])/sigma[:,np.newaxis] )\n\n ##################################\n # normalise the distribution over all metallicities; this choice of normalisation assumes that metallicities outside the COMPAS range have yields of zero\n norm = dPdlogZ.sum(axis=-1) * step_logZ\n dPdlogZ = dPdlogZ /norm[:,np.newaxis]\n\n ##################################\n # assume a flat in log distribution in sampled metallicity to find probability of drawing Z in COMPAS\n p_draw_metallicity = 1 / (max_logZ_COMPAS - min_logZ_COMPAS)\n \n return dPdlogZ, metallicities, p_draw_metallicity" ]
[ "0.6823951", "0.68156433", "0.64645", "0.62532675", "0.5977594", "0.5888927", "0.5858084", "0.5850966", "0.5778458", "0.5767043", "0.5753279", "0.5737354", "0.5723255", "0.5714657", "0.57088953", "0.5705945", "0.56355387", "0.56164163", "0.561608", "0.56118447", "0.5599761", "0.5590967", "0.55508405", "0.5537181", "0.55344886", "0.55212325", "0.55204123", "0.55125654", "0.55106556", "0.55104506", "0.5502348", "0.549432", "0.5489594", "0.5481778", "0.5480972", "0.54730856", "0.54723644", "0.5470736", "0.54617864", "0.54612345", "0.54589766", "0.54563797", "0.54483247", "0.5432313", "0.5431978", "0.5412071", "0.54118717", "0.5411295", "0.5404955", "0.53949845", "0.538309", "0.5355145", "0.53549194", "0.5351914", "0.53510827", "0.53498095", "0.53405094", "0.53391707", "0.5337755", "0.5337485", "0.53251255", "0.5322845", "0.53223085", "0.5319866", "0.531541", "0.53142065", "0.5308977", "0.53061444", "0.5301485", "0.5292237", "0.5288209", "0.52842253", "0.52799654", "0.5278964", "0.5268154", "0.5267537", "0.5264818", "0.5264683", "0.5252801", "0.52524686", "0.52524114", "0.5248835", "0.52457625", "0.5239873", "0.5238028", "0.52317595", "0.52234656", "0.5214218", "0.52119714", "0.52110606", "0.5210366", "0.5208881", "0.5207216", "0.5206482", "0.52026397", "0.5200289", "0.51998776", "0.5199765", "0.5191052", "0.5190776" ]
0.7354267
0
Test of cooking the same product twice. Test passed if second cooking of same product raise ValueError
def test_cook_twice(cook_not_busy, product_for_cook): cook_not_busy.cook_dish(product_for_cook) with pytest.raises(ValueError): cook_not_busy.cook_dish(product_for_cook)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checker(self, product):\n for item in self.instock:\n if item == product:\n return True\n return False", "def test_buyTicket_AlreadySold():\n assert not testUser2.buyTicket(testTicket1)\n assert testTicket1 in testUser1.inventory\n assert testTicket1 not in testUser2.inventory\n assert not testTicket1.for_sale\n assert testUser2.wallet == 500", "def test_create_same_product(self):\n url = reverse('products:list')\n data = {\n 'name': 'Eggs',\n 'description': '''\n Bird and reptile eggs consist of a protective eggshell,\n albumen (egg white), and vitellus (egg yolk),\n contained within various thin membranes.\n The most commonly consumed eggs are chicken eggs.\n Other poultry eggs including those of duck and quail\n also are eaten.\n '''\n }\n product_count_before = models.Product.objects.count()\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(models.Product.objects.count(), product_count_before)", "def test_DECISION_repeat_conflict(self, commit):\n self.assertRaises(AssertionError, lambda:\n self.node.fake_message(Decision(slot=1, proposal=PROPOSAL2)))", "def test_product_buy_more_then_have(self):\n result_buy = self.info_list.product_buy(\"соль 1 кг\", 50)\n self.assertFalse(result_buy)", "def test_phonebook_with_duplicate_entries_is_inconsostent(self):\n self.phonebook.add(\"Bob\", \"12345\")\n self.phonebook.add(\"Mary\", \"12345\")\n self.assertFalse(self.phonebook.is_consistent())", "def test_6(self):\n toothpaste = Store.Product(11, \"toothpaste\", \"dental\", 2, 4)\n milk = Store.Product(12, \"milk\", \"dairy\", 2, 3)\n eggs = Store.Product(14, \"eggs\", \"dairy\", 2, 2)\n apple_juice = Store.Product(13, \"apple juice\", \"drink\", 1, 1)\n\n s = Store.Store()\n s.add_product(toothpaste)\n s.add_product(milk)\n s.add_product(eggs)\n s.add_product(apple_juice)\n\n henry = Store.Customer(\"henry\", \"mrh\", False)\n s.add_member(henry)\n\n s.add_product_to_member_cart(11, \"mrh\")\n s.add_product_to_member_cart(12, \"mrh\")\n s.add_product_to_member_cart(14, \"mrh\")\n self.assertAlmostEqual(s.check_out_member(\"mrh\"), 6.42, \"not the correct checkout amount\")", "def test_save_item_with_duplicate_value_on_unique_field_raises(\n test_store, andy, pandy, candy\n):\n\n person = next(test_store.get_by(name=\"Andy\"))\n person.name = \"Pandy\"\n\n with pytest.raises(NotUniqueException):\n test_store.save(person)\n\n items = list(test_store.get_by())\n assert len(items) == 3\n assert andy in items\n assert pandy in items\n assert candy in items", "def checker(product):\n for item in INSTOCK:\n if item == product:\n return True\n return False", "def test_cart_creation_duplicate_name(self):\n cart_name = 'cart name'\n self.cart_item_manager.create_cart('123', cart_name, False)\n self.cart_item_manager.create_cart('124', cart_name, False)\n with self.assertRaises(DuplicateItemError):\n self.cart_item_manager.create_cart('123', cart_name, False)", "def test_market_1_2(self):\n\n def check_1_2(buyers: List[float], sellers: List[float], expected_num_of_deals: int,\n expected_prices: List[float]):\n market = Market([\n AgentCategory(\"buyer\", buyers),\n AgentCategory(\"seller\", sellers),\n ])\n ps_recipe = [1, 2]\n self._check_market(market, ps_recipe, expected_num_of_deals, expected_prices)\n\n check_1_2(buyers=[9], sellers=[-4, -3],\n expected_num_of_deals=0, expected_prices=[9, -4.5])\n check_1_2(buyers=[9, 8, 7, 6], sellers=[-6, -5, -4, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n check_1_2(buyers=[9, 8], sellers=[-4, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n check_1_2(buyers=[9, 8], sellers=[-6, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n check_1_2(buyers=[9, 8], sellers=[-4, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n\n # PRICE CROSSES ZERO AT FIRST PHASE\n check_1_2(buyers=list(range(20)), sellers=[-3, -2, -1],\n expected_num_of_deals=1, expected_prices=[18, -9])", "def test_update_cart_name_duplicate(self):\n user_id = '123'\n cart_id = self.cart_item_manager.create_cart(user_id, 'Cart1', False)\n self.cart_item_manager.create_cart(user_id, 'Cart2', False)\n with self.assertRaises(DuplicateItemError):\n self.cart_item_manager.update_cart(user_id, cart_id, {'CartName': 'Cart2'})", "def test_duplicate_entries(self):", "def test_check_for_existing_reaction_keeps_identical_reactions_with_duplicate_flag(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n spcA = Species().from_smiles('[H]')\n spcB = Species().from_smiles('C=C[CH2]C')\n spcC = Species().from_smiles('C=C=CC')\n spcD = Species().from_smiles('[H][H]')\n spcA.label = '[H]'\n spcB.label = 'C=C[CH2]C'\n spcC.label = 'C=C=CC'\n spcD.label = '[H][H]'\n spcB.generate_resonance_structures()\n\n cerm.add_species_to_core(spcA)\n cerm.add_species_to_core(spcB)\n cerm.add_species_to_core(spcC)\n cerm.add_species_to_core(spcD)\n\n reaction_in_model = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Csd', 'H'],\n duplicate=True)\n reaction_in_model.reactants.sort()\n reaction_in_model.products.sort()\n\n reaction_to_add = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Cs12345', 'H'],\n duplicate=True)\n cerm.add_reaction_to_core(reaction_in_model)\n cerm.register_reaction(reaction_in_model)\n\n found, rxn = cerm.check_for_existing_reaction(reaction_to_add)\n\n self.assertFalse(found, 'check_for_existing_reaction failed to identify duplicate template reactions')", "def test_allow_multiples(self):\r\n o1 = self.b1.get(self.key)\r\n o2 = self.b2.get(self.key)\r\n\r\n o1.set_data(\"object-1\")\r\n o1.store()\r\n o2.set_data(\"object-2\")\r\n o2.store()\r\n\r\n conflicted = self.b1.get(self.key)\r\n siblings = filter(bool, (s.get_data() for s in conflicted.get_siblings()))\r\n self.assertEqual(len(siblings), 2)", "def test_product_buy_missing_goods(self):\n result_buy = self.info_list.product_buy(\"хлеб серый хлебозавод\", 3)\n self.assertFalse(result_buy)", "def test_multiple_creates_do_not_increase_products(self):\n for i in xrange(0, 10):\n modified_po = copy.deepcopy(base_purchase_order)\n self.assertEqual(Supply.objects.get(pk=1).quantity, 10)\n \n resp = self.client.post('/api/v1/purchase-order/', format='json', data=modified_po)\n \n self.assertEqual(resp.status_code, 201, msg=resp)\n \n po_data = resp.data\n self.assertEqual(po_data['status'], 'AWAITING APPROVAL')\n\n item1 = po_data['items'][0]\n #self.assertEqual(item1['supply']['id'], 1)\n self.assertEqual(item1['status'], u'Ordered')\n\n item2 = po_data['items'][1]\n #self.assertEqual(item1['supply']['id'], 2)\n self.assertEqual(item1['status'], u'Ordered')\n \n #Test database values\n po = PurchaseOrder.objects.get(pk=resp.data['id'])\n self.assertEqual(po.status, 'AWAITING APPROVAL')\n for item in po.items.all():\n self.assertEqual(item.status, u\"Ordered\")\n \n supplier = Supplier.objects.get(pk=1)\n\n supply = Supply.objects.get(pk=1)\n self.assertEqual(supply.quantity, 10)\n self.assertEqual(supply.products.filter(supplier=supplier).count(), 1)\n\n supply = Supply.objects.get(pk=2)\n self.assertEqual(supply.quantity, 10)\n self.assertEqual(supply.products.filter(supplier=supplier).count(), 1)", "def check_duplicate(self, state):\n pass", "def test_unique_together(self):\n form = PriceForm({'price': '6.00', 'quantity': '1'})\n self.assertTrue(form.is_valid())\n form.save()\n form = PriceForm({'price': '6.00', 'quantity': '1'})\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 1)\n self.assertEqual(form.errors['__all__'], ['Price with this Price and Quantity already exists.'])", "def test_add_with_not_right_shelf_life(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"-30\", \"40\", \"2020-12-30\", \n \"-14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def test_not_repeat_combination(self):\n self.assertTrue(\"-Yao Ming Zydrunas Ilgauskas\", show_players_sumheigh_is_input(177))\n self.assertFalse(show_players_sumheigh_is_input(177), \"-Zydrunas Ilgauskas Yao Ming\")", "def test_check_for_existing_reaction_eliminates_identical_reactions_without_duplicate_flag(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n spcA = Species().from_smiles('[H]')\n spcB = Species().from_smiles('C=C[CH2]C')\n spcC = Species().from_smiles('C=C=CC')\n spcD = Species().from_smiles('[H][H]')\n spcA.label = '[H]'\n spcB.label = 'C=C[CH2]C'\n spcC.label = 'C=C=CC'\n spcD.label = '[H][H]'\n spcB.generate_resonance_structures()\n\n cerm.add_species_to_core(spcA)\n cerm.add_species_to_core(spcB)\n cerm.add_species_to_core(spcC)\n cerm.add_species_to_core(spcD)\n\n reaction_in_model = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Csd', 'H'],\n duplicate=False)\n reaction_in_model.reactants.sort()\n reaction_in_model.products.sort()\n\n reaction_to_add = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Cs12345', 'H'],\n duplicate=False)\n cerm.add_reaction_to_core(reaction_in_model)\n cerm.register_reaction(reaction_in_model)\n\n found, rxn = cerm.check_for_existing_reaction(reaction_to_add)\n\n self.assertTrue(found, 'check_for_existing_reaction failed to eliminate reactions without duplicate tag')", "def test_validate_duplicate_wire(self, circuit):\n with pytest.raises(ValueError, match=\"Wire ID 0 is specified more than once.\"):\n circuit._validate_wire_ids(wire_ids=[0, 0])", "def test_add_item_with_duplicate_value_on_unique_field_raises(\n test_store, andy, pandy, candy\n):\n\n person_with_duplicate_name = Person(name=\"Andy\", age=80)\n\n with pytest.raises(NotUniqueException):\n test_store.add(person_with_duplicate_name)\n\n items = list(test_store.get_by())\n assert len(items) == 3\n assert andy in items\n assert pandy in items\n assert candy in items", "def test_busy_cook(cook_busy, product_for_cook):\n with pytest.raises(CustomWarning):\n assert cook_busy.cook_dish(product_for_cook)", "def test_cart_creation_duplicate_default_will_not_create_new_cart(self):\n test_user_id = '123'\n cart_id_1 = self.cart_item_manager.create_cart(test_user_id, 'Cart1', True)\n cart_id_2 = self.cart_item_manager.create_cart(test_user_id, 'Cart3', True)\n self.assertEqual(cart_id_1, cart_id_2)\n self.assertEqual(1, len(self.cart_item_manager.get_user_carts(test_user_id)))", "def test_get_similar_recipes(self):\n pass", "def test_check_for_duplicates_with_duplicates(self):\n quotes = [api.Quote(\" This is an added quote.\", \"Another author\", \"Publication\", [\"tag1, tag2\"]),\n api.Quote(\" This is an added quote.\", \"Another author2\", \"Publication\", [\"tag1, tag2\"]),\n api.Quote(\" This is an added quote.\", \"Another author3\", \"Publication\", [\"tag1, tag2\"])]\n\n with self.assertRaisesRegexp(Exception, \"a duplicate quote was found on line 2 of 'stdin'. \"\n \"Quote: \\\"This is an added quote.\\\".\"):\n\n api._check_for_duplicates(quotes, \"stdin\")", "def products_made(self, product) -> bool:\n return self.product_idx(product) is not None", "def _check_sn_uniqueness(self):\n if self.product_tracking == 'serial' and self.lot_producing_id:\n sml = self.env['stock.move.line'].search_count([\n ('lot_id', '=', self.lot_producing_id.id),\n ('location_id.usage', '=', 'production'),\n ('qty_done', '=', 1),\n ('state', '=', 'done')\n ])\n if sml:\n raise UserError(_('This serial number for product %s has already been produced', self.product_id.name))\n\n for move in self.move_finished_ids:\n if move.has_tracking != 'serial' or move.product_id == self.product_id:\n continue\n for move_line in move.move_line_ids:\n domain = [\n ('lot_id', '=', move_line.lot_id.id),\n ('qty_done', '=', 1),\n ('state', '=', 'done')\n ]\n message = _('The serial number %(number)s used for byproduct %(product_name)s has already been produced',\n number=move_line.lot_id.name,\n product_name=move_line.product_id.name)\n co_prod_move_lines = self.move_finished_ids.move_line_ids.filtered(lambda ml: ml.product_id != self.product_id)\n domain_unbuild = domain + [\n ('production_id', '=', False),\n ('location_dest_id.usage', '=', 'production')\n ]\n\n # Check presence of same sn in previous productions\n duplicates = self.env['stock.move.line'].search_count(domain + [\n ('location_id.usage', '=', 'production')\n ])\n if duplicates:\n # Maybe some move lines have been compensated by unbuild\n duplicates_unbuild = self.env['stock.move.line'].search_count(domain_unbuild + [\n ('move_id.unbuild_id', '!=', False)\n ])\n if not (duplicates_unbuild and duplicates - duplicates_unbuild == 0):\n raise UserError(message)\n # Check presence of same sn in current production\n duplicates = co_prod_move_lines.filtered(lambda ml: ml.qty_done and ml.lot_id == move_line.lot_id) - move_line\n if duplicates:\n raise UserError(message)\n\n for move in self.move_raw_ids:\n if move.has_tracking != 'serial':\n continue\n for move_line in move.move_line_ids:\n if float_is_zero(move_line.qty_done, precision_rounding=move_line.product_uom_id.rounding):\n continue\n domain = [\n ('lot_id', '=', move_line.lot_id.id),\n ('qty_done', '=', 1),\n ('state', '=', 'done')\n ]\n message = _('The serial number %(number)s used for component %(component)s has already been consumed',\n number=move_line.lot_id.name,\n component=move_line.product_id.name)\n co_prod_move_lines = self.move_raw_ids.move_line_ids\n domain_unbuild = domain + [\n ('production_id', '=', False),\n ('location_id.usage', '=', 'production')\n ]\n\n # Check presence of same sn in previous productions\n duplicates = self.env['stock.move.line'].search_count(domain + [\n ('location_dest_id.usage', '=', 'production')\n ])\n if duplicates:\n # Maybe some move lines have been compensated by unbuild\n duplicates_unbuild = self.env['stock.move.line'].search_count(domain_unbuild + [\n ('move_id.unbuild_id', '!=', False)\n ])\n if not (duplicates_unbuild and duplicates - duplicates_unbuild == 0):\n raise UserError(message)\n # Check presence of same sn in current production\n duplicates = co_prod_move_lines.filtered(lambda ml: ml.qty_done and ml.lot_id == move_line.lot_id) - move_line\n if duplicates:\n raise UserError(message)", "def test_single_quant_assign_correct_quant(self):\n Quant = self.env[\"stock.quant\"]\n\n # Create a bunch of identical quants in the same location\n quants = Quant.browse()\n for i in range(5):\n quants |= self.create_quant(self.apple.id, self.test_stock_location_01.id, 10)\n self.assertEqual(len(quants), 5)\n\n quant = quants[2]\n pick = quant.create_picking(self.picking_type_pick, confirm=True, assign=True)\n self.assertEqual(pick.state, \"assigned\")\n self.assertEqual(quant.reserved_quantity, 10)", "def test_only_two_card_petitions(self):\n f = gtrutils.check_petition_combos\n\n self.assertTrue( f( 0, 0, [0], True, False))\n\n self.assertFalse( f( 1, 0, [], True, False))\n self.assertFalse( f( 1, 0, [1], True, False))\n self.assertTrue( f( 1, 0, [2], True, False))\n self.assertFalse( f( 1, 0, [3], True, False))\n self.assertFalse( f( 1, 0, [4], True, False))\n\n self.assertTrue( f( 1, 1, [], True, False))\n self.assertFalse( f( 1, 1, [2], True, False))\n\n self.assertFalse( f( 2, 0, [2], True, False))\n self.assertFalse( f( 2, 0, [3], True, False))\n self.assertTrue( f( 2, 0, [4], True, False))\n self.assertFalse( f( 2, 0, [5], True, False))\n \n self.assertTrue( f( 2, 1, [2], True, False))\n self.assertFalse( f( 2, 1, [3], True, False))\n self.assertFalse( f( 2, 1, [4], True, False))\n\n self.assertTrue( f(13, 26, [], True, False))\n self.assertTrue( f(13, 0, [26], True, False))\n self.assertTrue( f(13, 14, [12], True, False))\n self.assertTrue( f(13, 13, [10], True, False))\n self.assertFalse( f(13, 15, [11], True, False))\n\n self.assertFalse( f( 6, 1, [2,4,6], True, False))\n self.assertTrue( f( 7, 1, [2,4,6], True, False))\n self.assertFalse( f( 8, 1, [2,4,6], True, False))", "def test_create_id_dupe(self):\r\n user = random.getrandbits(32)\r\n courses = modulestore().get_courses()\r\n with self.assertRaises(DuplicateCourseError):\r\n dupe_course_key = courses[0].location.course_key\r\n modulestore().create_course(dupe_course_key.org, dupe_course_key.offering, user)", "def test_compute_correlation_paired_incompatible_samples(self):\r\n self.assertRaises(ValueError, _compute_correlation,\r\n self.taxa_summary1, self.taxa_summary3, 'paired',\r\n 'spearman', 'high', 9, 0.22222)", "def test_is_valid_return_only_good_products(self):\n self.assertTrue(ProductValidator().is_valid(self.good_product))\n self.assertFalse(ProductValidator().is_valid(self.bad_product))", "def testduplicate(self):\n a = AttributeAbility(['ST',], 3)\n self.assertTrue(a.duplicate(a))\n self.assertTrue(a.duplicate(AttributeAbility(['ST',], 3)))\n self.assertTrue(a.duplicate(AttributeAbility(['ST',], 5)))\n self.assertFalse(a.duplicate(AttributeAbility(['DX',], 5)))", "def is_satisfied(self, item: Product) -> bool:\n return item.size == self.size", "def test_confirm_duplicated_consent(self):\n # We create the flow request\n res = self._add_flow_request(flow_request=self.flow_request)\n confirm_id = res.json()['confirm_id']\n process_id = res.json()['process_id']\n callback_url = 'http://127.0.0.1/'\n\n # Then we login as mouse since the mock is configured to return 400 with \"mouse\" login\n self.client.login(username='mouse', password='duck')\n # Then we confirm the request.\n res = self.client.get('/v1/flow_requests/confirm/?confirm_id={}&callback_url={}&action=add'.format(\n confirm_id, callback_url))\n self.assertRedirects(res, \"{}?process_id={}&success=false&error={}\".format(callback_url, process_id,\n ERRORS_MESSAGE['ALL_CONSENTS_ALREADY_CREATED']),\n fetch_redirect_response=False)", "def test_identical(self):\n write this test!", "def test_single_quant(self):\n pick = self.quant_1.create_picking(self.picking_type_pick)\n # Confirm made in state draft\n self.assertEqual(pick.state, \"draft\")\n # Confirm quant location used if non specified\n self.assertEqual(pick.location_id, self.test_stock_location_01)\n # Confirm default dest location used if non specified\n self.assertEqual(pick.location_dest_id, self.picking_type_pick.default_location_dest_id)\n # Confirm correct picking type id associated\n self.assertEqual(pick.picking_type_id, self.picking_type_pick)\n # Check default priority is 0 = 'Normal'\n self.assertEqual(pick.priority, \"0\")\n # Check picking has correct products associated to it\n self.assertEqual(pick.product_id, self.apple)\n # Check picking has correct quantities associated to it\n self.assertEqual(pick.move_lines.product_id, self.apple)\n self.assertEqual(pick.move_lines.product_qty, 10)", "def test_cannot_sell_more_than_stock(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":15\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Only 10 NY_denims available right now!')\n self.assertEqual(resp.status_code, 400)", "def test_add_with_end_shelf_life(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"-30\", \"40\", \"2020-12-1\", \n \"3\", \"2020-12-1\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def test_is_consistent(self):\n self.assertTrue(self.phonebook.is_consistent())\n self.phonebook.add(\"Bob\", \"12345\")\n self.assertTrue(self.phonebook.is_consistent())\n self.phonebook.add(\"Mary\", \"012345\")\n self.assertTrue(self.phonebook.is_consistent())\n self.phonebook.add(\"Sue\", \"12345\") # identical to Bob\n self.assertTrue(self.phonebook.is_consistent())\n self.phonebook.add(\"Sue\", \"123\") # prefix of Bob\n self.assertTrue(self.phonebook.is_consistent())", "def test_raises(self):\n no_replicates = 25\n try:\n replicate(experiment3, no_replicates)\n except RuntimeError as err:\n self.assertEqual(err, FAKE_ERROR)\n else:\n assert False", "def test_roll_once(self):\n\n self.assertIn(self.new_die.roll(), self.possible_values, \"Rolled value was not in possible die values\")", "def test_check_for_existing_reaction_eliminates_identical_reactions(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n spcA = Species().from_smiles('[H]')\n spcB = Species().from_smiles('C=C[CH2]C')\n spcC = Species().from_smiles('C=C=CC')\n spcD = Species().from_smiles('[H][H]')\n spcA.label = '[H]'\n spcB.label = 'C=C[CH2]C'\n spcC.label = 'C=C=CC'\n spcD.label = '[H][H]'\n spcB.generate_resonance_structures()\n\n cerm.add_species_to_core(spcA)\n cerm.add_species_to_core(spcB)\n cerm.add_species_to_core(spcC)\n cerm.add_species_to_core(spcD)\n\n reaction_in_model = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Csd', 'H'])\n reaction_in_model.reactants.sort()\n reaction_in_model.products.sort()\n\n reaction_to_add = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Csd', 'H'])\n cerm.add_reaction_to_core(reaction_in_model)\n cerm.register_reaction(reaction_in_model)\n\n found, rxn = cerm.check_for_existing_reaction(reaction_to_add)\n\n self.assertTrue(found, 'check_for_existing_reaction failed to identify existing reaction')", "def test_post_duplicate_question(self):\n self.post_question(self.valid_question2)\n\n\n response = self.post_question(self.valid_question2)\n self.assertEqual(response.status_code, 400)", "def testProtractedNSESanityChecks(self):\n self.assertGreater(self.c3.get_species_richness(1), self.c2.get_species_richness(1))\n self.assertLess(self.c4.get_species_richness(1), self.c3.get_species_richness(1))", "def test_repeated_calls_different_quotes(self):\n quoteSet = set()\n for i in range(5):\n quoteSet.add(getRandomJoke()[\"joke\"])\n self.assertEqual(len(quoteSet) > 1, True)", "def test_cant_create_order_twice(self):\n\t\to2 = BuyInfluenceOrder(\n\t\t\tplayer=self.p\n\t\t)\n\n\t\tself.assertRaises(OrderNotAvailable, o2.clean)", "def check_duplicate(triple: str, result: List[str]) -> bool:\n fields = triple.strip().split(', ')\n assert len(fields) == 13\n assert fields[9] == 'BERT'\n psuedo_triple = fields[:11]\n psuedo_triple[9] = 'RELEVANCE'\n return ', '.join(psuedo_triple) in result", "def test_duplicate_equality(self):\r\n def duplicate_and_verify(source_usage_key, parent_usage_key):\r\n usage_key = self._duplicate_item(parent_usage_key, source_usage_key)\r\n self.assertTrue(check_equality(source_usage_key, usage_key), \"Duplicated item differs from original\")\r\n\r\n def check_equality(source_usage_key, duplicate_usage_key):\r\n original_item = self.get_item_from_modulestore(source_usage_key, draft=True)\r\n duplicated_item = self.get_item_from_modulestore(duplicate_usage_key, draft=True)\r\n\r\n self.assertNotEqual(\r\n original_item.location,\r\n duplicated_item.location,\r\n \"Location of duplicate should be different from original\"\r\n )\r\n # Set the location and display name to be the same so we can make sure the rest of the duplicate is equal.\r\n duplicated_item.location = original_item.location\r\n duplicated_item.display_name = original_item.display_name\r\n\r\n # Children will also be duplicated, so for the purposes of testing equality, we will set\r\n # the children to the original after recursively checking the children.\r\n if original_item.has_children:\r\n self.assertEqual(\r\n len(original_item.children),\r\n len(duplicated_item.children),\r\n \"Duplicated item differs in number of children\"\r\n )\r\n for i in xrange(len(original_item.children)):\r\n if not check_equality(original_item.children[i], duplicated_item.children[i]):\r\n return False\r\n duplicated_item.children = original_item.children\r\n\r\n return original_item == duplicated_item\r\n\r\n duplicate_and_verify(self.problem_usage_key, self.seq_usage_key)\r\n duplicate_and_verify(self.html_usage_key, self.seq_usage_key)\r\n duplicate_and_verify(self.seq_usage_key, self.chapter_usage_key)\r\n duplicate_and_verify(self.chapter_usage_key, self.usage_key)", "def test_gather_success(self):\n gathered_items = self.Quant._gather(self.apple, self.test_stock_location_01)\n # Check the number of apple quants returned is correct\n self.assertEqual(len(gathered_items), 3)\n # Check that the products are all of expected type\n self.assertEqual(gathered_items.product_id, self.apple)\n\n # Unfold the returned quants\n _q1, second_quant, _q2 = gathered_items\n # Check when quant_ids is set in the context\n gathered_items_subset = self.Quant.with_context(quant_ids=[second_quant.id])._gather(\n self.apple, self.test_stock_location_01\n )\n self.assertEqual(len(gathered_items_subset), 1)\n self.assertEqual(gathered_items_subset.product_id, self.apple)\n self.assertEqual(gathered_items_subset, second_quant)", "def test_add_to_cart_item_not_in_system(self):\n # test sale products not in db\n\n response = self.client.get(\n '/self.base_url/sales/1999/2',\n headers=dict(Authorization=\"Bearer \" + self.attendant_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"This product does not exist\")\n self.assertEqual(response.status_code,200)\n\n\n # test add item which is at minimum stock", "def test_check_ticket_2(self):\n import_genome.check_ticket(\n self.tkt, type_set=self.type_set,\n description_field_set=self.description_field_set,\n eval_mode_set=self.eval_mode_set,\n id_dupe_set=set([1]), phage_id_dupe_set=set(),\n retain_set=self.retain_set, retrieve_set=self.retrieve_set,\n add_set=self.add_set, parse_set=self.parse_set)\n count = count_status(self.tkt, \"error\")\n self.assertEqual(count, 1)", "def test_no_oppose_different_sectors(self):\n battle = self.battle\n self.bob.sector = 7\n\n s1 = battle.create_skirmish(self.alice, 2)\n prev_skirmishes = self.sess.query(db.SkirmishAction).count()\n\n with self.assertRaises(db.WrongSectorException):\n s1.react(self.bob, 2)\n\n now_skirmishes = self.sess.query(db.SkirmishAction).count()\n self.assertEqual(prev_skirmishes, now_skirmishes)", "def testProtractedPostApplicationSanityChecks(self):\n self.assertLess(self.c.get_species_richness(1), self.c.get_species_richness(3))\n self.assertLess(self.c.get_species_richness(2), self.c.get_species_richness(4))\n self.assertLess(self.c.get_species_richness(5), self.c.get_species_richness(3))\n self.assertLess(self.c.get_species_richness(6), self.c.get_species_richness(4))\n self.assertEqual(4, self.c.get_species_richness(1))\n self.assertEqual(4, self.c.get_species_richness(2))\n self.assertEqual(7, self.c.get_species_richness(3))\n self.assertEqual(7, self.c.get_species_richness(4))\n self.assertEqual(4, self.c.get_species_richness(5))\n self.assertEqual(4, self.c.get_species_richness(6))\n self.assertEqual(21, self.c.get_species_richness(7))\n self.assertEqual(38, self.c.get_species_richness(8))", "def testProtractedPostApplicationSanityChecks(self):\n self.assertLess(self.c.get_species_richness(1), self.c.get_species_richness(3))\n self.assertLess(self.c.get_species_richness(2), self.c.get_species_richness(4))\n self.assertLess(self.c.get_species_richness(5), self.c.get_species_richness(3))\n self.assertLess(self.c.get_species_richness(6), self.c.get_species_richness(4))\n self.assertEqual(4, self.c.get_species_richness(1))\n self.assertEqual(4, self.c.get_species_richness(2))\n self.assertEqual(7, self.c.get_species_richness(3))\n self.assertEqual(7, self.c.get_species_richness(4))\n self.assertEqual(4, self.c.get_species_richness(5))\n self.assertEqual(4, self.c.get_species_richness(6))\n self.assertEqual(21, self.c.get_species_richness(7))\n self.assertEqual(38, self.c.get_species_richness(8))", "def testProtractedPostApplicationSanityChecks(self):\n self.assertLess(self.c.get_species_richness(1), self.c.get_species_richness(3))\n self.assertLess(self.c.get_species_richness(2), self.c.get_species_richness(4))\n self.assertLess(self.c.get_species_richness(5), self.c.get_species_richness(3))\n self.assertLess(self.c.get_species_richness(6), self.c.get_species_richness(4))\n self.assertEqual(4, self.c.get_species_richness(1))\n self.assertEqual(4, self.c.get_species_richness(2))\n self.assertEqual(7, self.c.get_species_richness(3))\n self.assertEqual(7, self.c.get_species_richness(4))\n self.assertEqual(4, self.c.get_species_richness(5))\n self.assertEqual(4, self.c.get_species_richness(6))\n self.assertEqual(21, self.c.get_species_richness(7))\n self.assertEqual(38, self.c.get_species_richness(8))", "def is_satisfied(self, item: Product) -> bool:\n return item.colour == self.colour", "def test_book_uniqueness(self):\n good_book = Book(key=\"bookkey/999999\",title=\"Romeo and Juliet\", author=\"Shakespeare\", description=\"Two star crossed romantics\", \n subjects=\"Romance, Fiction\")\n db.session.add(good_book)\n db.session.commit()\n\n invalid_book = Book(key=\"bookkey/999999\",title=\"Romeo and Juliet\", author=\"Shakespeare\", description=\"Two star crossed romantics\", \n subjects=\"Romance, Fiction\")\n #if we try to commit another book with the same key, it will raise error\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.add(invalid_book)\n db.session.commit()\n #with exception need to rollback commit\n db.session.rollback()", "def test_02_product_update(self):\n # Update new product state2 from default draft to sellable\n new_product = self.create_product()\n self.assertEqual(new_product.state2, 'draft')\n new_product.state2 = 'sellable'\n self.assertEqual(new_product.state2, 'sellable')\n\n # Same but to an existing demo product.\n demo_product = self.product_obj.browse(\n self.ref('product_lifecycle.product_product_4g'))\n self.assertEqual(demo_product.state2, 'sellable')\n demo_product.state2 = 'draft'\n self.assertEqual(demo_product.state2, 'draft')\n\n # Update new product invividual field (field defined in product.product\n # model).\n self.assertEqual(new_product.default_code, 'A2330')\n new_product.default_code = 'A2330-1'\n self.assertEqual(new_product.default_code, 'A2330-1')\n\n # Same but to an existing demo product.\n self.assertEqual(demo_product.default_code, 'A2329')\n demo_product.default_code = 'A2329-1'\n self.assertEqual(demo_product.default_code, 'A2329-1')\n\n # Update new product commom characteristic (field defined in\n # product.template) and check that affects the another product\n # variants\n self.assertFalse(new_product.description)\n new_product.description = 'This is a New Product'\n self.assertEqual(new_product.description, 'This is a New Product')\n self.assertEqual(demo_product.description, 'This is a New Product')\n demo_product.description = False\n self.assertFalse(demo_product.description)", "def test_creation_of_duplicate_service_in_store(self):\n create_store = self.client.post(create_store_url, data=json.dumps(self.shop_zero), headers=self.my_header)\n store_id = json.loads(create_store.data)\n store_id = json.loads(store_id['store_id'])\n store_id = store_id['$oid']\n response2 = self.client.post(store_url + store_id + '/service/',\n data=json.dumps(self.service_zero),\n headers=self.my_header)\n response3 = self.client.post(store_url + store_id + '/service/',\n data=json.dumps(self.service_zero),\n headers=self.my_header)\n self.assertEqual(response3.status, \"409 CONFLICT\")\n self.assertIn(\"Sorry. Live at the yard already exists in this store.\", str(response3.data))", "def test_teacher_check_homework_raises_homework_repeat_error_if_same_solution_was_already_submitted():\n with pytest.raises(HomeworkRepeatError):\n opp_teacher.check_homework(result_1)\n advanced_python_teacher.check_homework(result_1)\n Teacher.reset_results(oop_hw)", "def test_cook_set_free(cook_busy, product_for_cook):\n cook_busy.set_free(True)\n # if product needs to be cooked\n assert product_for_cook.get_need_cook_status() is True\n cook_busy.cook_dish(product_for_cook)\n assert product_for_cook.get_need_cook_status() is False", "def test_multiple_quants(self):\n # Get all quants in test package\n quants = self.quant_1 | self.quant_2\n pick = quants.create_picking(self.picking_type_pick)\n # Check picking has correct location\n self.assertEqual(pick.location_id, self.stock_location)\n # Check picking has correct products and quantities associated to it\n self.assertEqual(pick.move_lines.product_id, quants.product_id)\n self.assertEqual(pick.move_lines.mapped(\"product_qty\"), [10.0, 10.0])", "def test_product_not_available_by_stock(self):\n product = ProductFactory(stock_amount=2)\n\n for i in range(2):\n opr = OrderProductRelationFactory(product=product)\n order = opr.order\n order.paid = True\n order.save()\n\n self.assertEqual(product.left_in_stock, 0)\n self.assertFalse(product.is_stock_available)\n self.assertFalse(product.is_available())", "def test03_unique_stakeholdercategory(self):\n city1 = CaseStudyFactory(name='City1')\n city2 = CaseStudyFactory(name='City1')\n stakeholdercat1 = StakeholderCategoryFactory(\n casestudy=city1, name='Cat1')\n stakeholdercat2 = StakeholderCategoryFactory(\n casestudy=city1, name='Cat2')\n stakeholdercat3 = StakeholderCategoryFactory(\n casestudy=city2, name='Cat1')\n\n with self.assertRaisesMessage(\n ValidationError,\n 'StakeholderCategory Cat1 already exists in casestudy City1',\n ) as err:\n stakeholdercat3 = StakeholderCategoryFactory(\n casestudy=city2, name='Cat1')\n\n stakeholder_categories = city1.stakeholder_categories\n self.assertSetEqual(stakeholder_categories, {stakeholdercat1,\n stakeholdercat2})", "def test_single_quant_confirm(self):\n pick = self.quant_1.create_picking(self.picking_type_pick, confirm=True)\n # Check it is confirmed\n self.assertEqual(pick.state, \"confirmed\")", "def confirm_harvest_pickle(before, after):\n assert after.strategy.balanceOf > before.strategy.balanceOf\n assert after.strategy.pickleBalance == 0\n assert after.strategy.stakingRewards.stakedPickle == 0\n if before.sett.pricePerFullShare:\n assert after.sett.pricePerFullShare > before.sett.pricePerFullShare", "def _test_validate_c_tag_on_same_s_tag(self):\n s = Mock()\n s.id = 123\n s.c_tag = 111\n s.s_tag = 222\n s.onu_device = \"BRCM1234\"\n\n with self.assertRaises(Exception) as e:\n self.rcord_subscriber.save()\n\n self.assertEqual(e.exception.message, \"The c_tag you specified (111) has already been used by Subscriber with id 123 and the same s_tag: 222\")\n self.models_decl.RCORDSubscriber_decl.save.assert_not_called()", "def testduplicate(self):\n self.assertTrue(WeaponAbility('Guided').duplicate(\n WeaponAbility('Guided')))\n self.assertFalse(WeaponAbility('Guided').duplicate(\n WeaponAbility('Changling')))\n self.assertTrue(WeaponAbility('Animated', range=1).duplicate(\n WeaponAbility('Animated', range=3)))\n self.assertTrue(WeaponAbility('Defender', size=1).duplicate(\n WeaponAbility('Defender', size=3)))\n fire = MentalAbility('Fireball')\n ice = MentalAbility('Iceball')\n self.assertTrue(WeaponAbility('Enhanced', abilities=[ice,]).duplicate(\n WeaponAbility('Enhanced', abilities=[fire,])))", "def test_warning_on_duplicate_category(self):\n self.client.login(username='hodor', password='hodor')\n Perms.objects.create(user=self.user, access_level=2).save()\n response = self.client.post('/categories/add/', {'categoryType': 'tr0npr0n'})\n self.assertRedirects(response, '/categories/')\n response2 = self.client.post('/categories/add/', {'categoryType': 'tr0npr0n'})\n self.assertContains(response2, \"already exists\")", "def test_reproducible(self):\n model_1 = PoincareModel(self.data_large, seed=1, negative=3, burn_in=1)\n model_1.train(epochs=2)\n\n model_2 = PoincareModel(self.data_large, seed=1, negative=3, burn_in=1)\n model_2.train(epochs=2)\n self.assertTrue(np.allclose(model_1.kv.syn0, model_2.kv.syn0))", "def test_raise_duplicate_candidate_error(self):\n\n candidate1 = pyrankvote.Candidate(\"Per\")\n candidate2 = pyrankvote.Candidate(\"Per\")\n candidate3 = pyrankvote.Candidate(\"Aase\")\n\n def tester(_):\n pyrankvote.Ballot(ranked_candidates=[candidate1, candidate2, candidate3])\n\n msg = \"Candidate 1 and 2 is equal and should raise duplicate candidate error\"\n self.assertRaises(pyrankvote.models.DuplicateCandidatesError, tester, msg)\n\n # TEST THE OPPOSITE\n candidate1 = pyrankvote.Candidate(\"Per\")\n candidate2 = pyrankvote.Candidate(\"Maria\")\n candidate3 = pyrankvote.Candidate(\"Aase\")\n\n # This should NOT raise an error\n pyrankvote.Ballot(ranked_candidates=[candidate1, candidate2, candidate3])", "def test_hand_has_two_pair(hand, card_list, expected):\n hand.add_cards(card_list)\n assert hand.has_two_pair() == expected", "def testPassedAlready(self):\n _pass_move = self._pass_move()\n bid_move = self._move()\n context = self._context()\n bfpc = BiddingForPrivateCompany()\n\n self.assertTrue(bfpc.run(_pass_move, context), bfpc.errors())\n self.assertEqual(_pass_move.move_type, BidType.PASS)\n self.assertEqual(len(context.private_companies[1].passed_by), 1)\n\n self.assertFalse(bfpc.run(bid_move, context), bfpc.errors())\n self.assertIn(\"You can only keep bidding until you've passed once.\", bfpc.errors())", "def test_duplicate_cards(hand, card_list):\n with pytest.raises(AssertionError):\n hand.add_cards(card_list)", "def test_no_support_different_sectors(self):\n battle = self.battle\n self.carol.sector = 7\n\n s1 = battle.create_skirmish(self.alice, 2)\n prev_skirmishes = self.sess.query(db.SkirmishAction).count()\n with self.assertRaises(db.WrongSectorException):\n s1.react(self.carol, 2, hinder=False)\n now_skirmishes = self.sess.query(db.SkirmishAction).count()\n self.assertEqual(prev_skirmishes, now_skirmishes)", "def test_lots_of_coins_given(self):\n item, change, _ = give_item_and_change('apple', '1.00 0.5 0.2 0.1 0.1 0.05 0.02 0.02 0.01')\n self.assertEqual(item, 'apple')\n self.assertEqual(change, [1.0, 0.5, 0.05, 0.02])", "def test_cannot_sale_out_of_stock_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":20\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'NY_denims is out of stock!')\n self.assertEqual(resp.status_code, 404)", "def test_two_and_three_card_petitions(self):\n f = gtrutils.check_petition_combos\n\n self.assertTrue( f( 0, 0, [], True, True))\n\n self.assertFalse( f( 1, 0, [], True, True))\n self.assertFalse( f( 1, 0, [1], True, True))\n self.assertTrue( f( 1, 0, [2], True, True))\n self.assertTrue( f( 1, 0, [3], True, True))\n self.assertFalse( f( 1, 0, [4], True, True))\n self.assertTrue( f( 1, 1, [], True, True))\n self.assertTrue( f( 1, 2, [], True, True))\n self.assertTrue( f( 1, 3, [], True, True))\n self.assertFalse( f( 1, 4, [], True, True))\n\n self.assertFalse( f( 1, 1, [2], True, True))\n self.assertFalse( f( 1, 1, [3], True, True))\n self.assertFalse( f( 1, 2, [2], True, True))\n self.assertFalse( f( 1, 3, [2], True, True))\n self.assertFalse( f( 1, 3, [3], True, True))\n\n self.assertTrue( f( 2, 1, [2], True, True))\n self.assertTrue( f( 2, 1, [3], True, True))\n self.assertTrue( f( 2, 0, [4], True, True))\n self.assertTrue( f( 2, 0, [5], True, True))\n self.assertTrue( f( 2, 0, [6], True, True))\n self.assertTrue( f( 2, 4, [], True, True))\n self.assertTrue( f( 2, 5, [], True, True))\n self.assertTrue( f( 2, 6, [], True, True))\n \n self.assertTrue( f(13, 26, [], True, True))\n self.assertTrue( f(13, 39, [], True, True))\n self.assertTrue( f(13, 0, [26], True, True))\n self.assertTrue( f(13, 14, [12], True, True))\n self.assertTrue( f(13, 13, [10], True, True))\n self.assertTrue( f(13, 15, [11], True, True))\n self.assertFalse( f(13, 40, [], True, True))\n self.assertFalse( f(13, 11, [3], True, True))\n\n self.assertFalse( f(4, 1, [2,3,6], True, True))\n self.assertTrue( f(5, 1, [2,3,6], True, True))\n self.assertTrue( f(6, 1, [2,3,6], True, True))\n self.assertFalse( f(7, 1, [2,3,6], True, True))", "def testEquality(self):\n pass", "def test_meal_name_already_exists(self):\n\n with self.client:\n self.add_meal(\"fries\", 10000)\n response = self.add_meal(\"fries\", 10000)\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'), \"Meal name already exists\")\n self.assertEqual(response.status_code, 409)", "def test_create_dup(self):\n obj = self.provision_single_asset()\n p = self.post('widget', 409, params={'name': u'Testing'})\n assert 'duplicate value already' in p['message']", "def test_check_duplication_entry_at_restoring_two_chain(self):\n ref_entity = Entity.objects.create(name=\"ReferredEntity\", created_user=self._user)\n ref_entries = [\n Entry.objects.create(name=\"ref-%d\" % i, created_user=self._user, schema=ref_entity)\n for i in range(3)\n ]\n ref_entity_2 = Entity.objects.create(name=\"ReferredEntity2\", created_user=self._user)\n ref_entries_2 = [\n Entry.objects.create(name=\"ref2-%d\" % i, created_user=self._user, schema=ref_entity_2)\n for i in range(3)\n ]\n\n # initialize EntityAttrs\n attr_info = {\n \"obj\": {\"type\": AttrTypeValue[\"object\"], \"value\": ref_entries[0]},\n \"arr_obj\": {\n \"type\": AttrTypeValue[\"array_object\"],\n \"value\": ref_entries[1:],\n },\n }\n attr_info_2 = {\n \"obj\": {\"type\": AttrTypeValue[\"object\"], \"value\": ref_entries_2[0]},\n \"arr_obj\": {\n \"type\": AttrTypeValue[\"array_object\"],\n \"value\": ref_entries_2[1:],\n },\n }\n for attr_name, info in attr_info.items():\n # create EntityAttr object with is_delete_in_chain object\n attr = EntityAttr.objects.create(\n name=attr_name,\n type=info[\"type\"],\n is_delete_in_chain=True,\n created_user=self._user,\n parent_entity=self._entity,\n )\n\n if info[\"type\"] & AttrTypeValue[\"object\"]:\n attr.referral.add(ref_entity)\n\n self._entity.attrs.add(attr)\n\n for attr_name, info in attr_info_2.items():\n # create EntityAttr object with is_delete_in_chain object\n attr = EntityAttr.objects.create(\n name=attr_name,\n type=info[\"type\"],\n is_delete_in_chain=True,\n created_user=self._user,\n parent_entity=ref_entity,\n )\n\n if info[\"type\"] & AttrTypeValue[\"object\"]:\n attr.referral.add(ref_entity_2)\n\n ref_entity.attrs.add(attr)\n\n # initialize target entry\n entry = Entry.objects.create(name=\"entry\", schema=self._entity, created_user=self._user)\n entry.complement_attrs(self._user)\n\n for attr_name, info in attr_info.items():\n attr = entry.attrs.get(schema__name=attr_name)\n attr.add_value(self._user, info[\"value\"])\n\n ref_entries[0].complement_attrs(self._user)\n for attr_name, info in attr_info_2.items():\n attr = ref_entries[0].attrs.get(schema__name=attr_name)\n attr.add_value(self._user, info[\"value\"])\n\n # delete target entry at first\n entry.delete()\n # sync referral entries from database\n [x.refresh_from_db() for x in ref_entries]\n [x.refresh_from_db() for x in ref_entries_2]\n\n self.assertFalse(ref_entries_2[1].is_active)\n\n # create same name entry\n Entry.objects.create(name=\"ref2-1\", created_user=self._user, schema=ref_entity_2)\n\n # check duplicate entry\n ret = entry.check_duplication_entry_at_restoring(entry_chain=[])\n self.assertTrue(ret)", "def test_consecutive_queries_yield_different_individual_items(test_store):\n queried = next(test_store.get_by(name=\"Andy\"))\n other = next(test_store.get_by(name=\"Andy\"))\n\n assert queried is not other\n assert queried == other", "def check_inputs(self, item_data):\n if not item_data[0] in self.data['pizza']:\n print('Error: ' + item_data[0] + ' pizza does not exist.')\n return False\n\n if not item_data[1] in self.data['pizza'][item_data[0]]:\n print('Error: ' + item_data[1] + ' size does not exist for '\n + item_data[0] + ' pizza.')\n return False\n\n for topping in item_data[2]:\n if not topping in self.data['topping']:\n print('Error: Pizza topping ' + topping + ' does not exist.')\n return False\n return True", "def test_multi_same(nothing_list):\n result = multi_same_list(nothing_list)\n assert result[1][2] == 0\n assert result[0][2] == 0", "def test_present_in_both_db(self):\n for i in range(5):\n price = find_cheapest_price(\"Star Wars: Episode VI - Return of the Jedi\")\n if price is \"69.5\":\n break\n time.sleep(1)\n self.assertTrue(price == \"69.5\")", "def test_shoppingcart_must_not_update_if_closed(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id_prod:\n # then we can create the closed shoppingcart\n data = self.shoppingcart_data\n data[\"customer_id\"] = id\n data[\"product_id\"] = id\n data[\"is_closed\"] = True\n id_cart = self._create_model(\"shoppingcart\", data, [\"quantity\", \"discount_value\", \"is_closed\"])\n if id_cart:\n # then check for fail in update shoppingcart\n self.url = reverse(\"update-shoppingcart\")\n data[\"id\"] = id_cart\n response = self.client.post(self.url, data, **self.auth_headers)\n self.assertNotEqual(response.status_code, status.HTTP_200_OK)\n self.assertIsNotNone(id_cart)\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id)", "def test_give_correct_change(self):\n item, change, _ = give_item_and_change('coke', 1)\n self.assertEqual(item, 'coke')\n self.assertEqual(change, [.20, .05, .02])", "def test_add_duplicate_book(self):\n create_admin()\n response = self.client().post('/api/v1/login', json=self.test_admin)\n json_data = json.loads(response.data)\n access_token = json_data.get('access_token')\n self.client().post('/api/v1/products',\n headers={\"Authorization\": \"Bearer \" + access_token}, json=self.test_book)\n response = self.client().post('/api/v1/products',\n headers={\"Authorization\": \"Bearer \" + access_token}, json=self.test_book)\n json_data = json.loads(response.data)\n self.assertTrue(json_data.get('Error'))\n self.assertEqual(json_data.get('Error'), \"Book already exists\")\n self.assertEqual(response.status_code, 409)", "def _check_is_client_duped(client, client_id):\n try:\n other_client = CLIENTS[client_id]\n except KeyError:\n return\n \n if other_client is not client:\n raise RuntimeError(\n f'Creating the same client multiple times is not allowed; {client!r} already exists:, {other_client!r}.'\n )", "def test_eq_false_id(self):\n other = PrepSample('1.SKD8.640184', self.prep_template)\n self.assertFalse(self.tester == other)", "def test_extra_chocolates_multiple(self):\n _inpc = ChocolateFeast(6,2,2)\n self.assertEquals(5,_inpc.get_total_chocolates())", "def expect_duplicate(self):\n # Reset everything for this record\n self._expect_duplicate = False\n self.__dupcntr = 0\n self.__maxdup = 0\n # Get the probability to generate duplicate for next record\n if self.fake.random.random() < self.duplicate_cfg[\"Prob_duplicate\"]:\n self._expect_duplicate = True\n self.__maxdup = self.random_select_ndups()\n else:\n self._expect_duplicate = False\n self.__maxdup = 0\n\n self.__logger.debug(\"expect_duplicate ndups: %d\", self.__maxdup)", "def test_double_corrupt(pid: int, otId: int) -> bool:\n box_mon = BoxMon()\n box_mon.personality = pid\n box_mon.otId = otId\n box_mon.sub(0).type0.species = 308\n box_mon.sub(0).type0.experience = 2195\n box_mon.sub(0).type0.friendship = 70\n sub1 = box_mon.sub(1).type1\n sub1.moves[0] = 33\n sub1.moves[1] = 253\n sub1.moves[2] = 185\n sub1.pp[0] = 35\n sub1.pp[1] = 10\n sub1.pp[2] = 20\n sub2 = box_mon.sub(2).type2\n sub2.attackEV = 22\n sub2.hpEV = 8\n sub3 = box_mon.sub(3).type3\n sub3.metLocation = 28\n sub3.metLevel = 14\n sub3.metGame = 3\n sub3.pokeBall = 2\n sub3.otGender = 1\n sub3.unk = 977594907\n box_mon.checksum = box_mon.calc_checksum()\n sum1 = box_mon.checksum\n box_mon.encrypt()\n box_mon.personality |= 0x40000000\n box_mon.decrypt()\n sum2 = box_mon.calc_checksum()\n box_mon.encrypt()\n box_mon.otId |= 0x40000000\n box_mon.decrypt()\n sum3 = box_mon.calc_checksum()\n if sum1 == sum2 == sum3 and box_mon.sub(3).type3.isEgg == 0:\n box_mon.encrypt()\n return True\n return False", "def testduplicate(self):\n self.assertTrue(AmuletAbility('Control Dragon').duplicate(\n AmuletAbility('Control Dragon')))\n self.assertFalse(AmuletAbility('Control Dragon').duplicate(\n AmuletAbility('Control NPC')))\n self.assertTrue(AmuletAbility('Proof', element='Fire').duplicate(\n AmuletAbility('Proof', element='Fire')))\n self.assertFalse(AmuletAbility('Proof', element='Fire').duplicate(\n AmuletAbility('Proof', element='Water')))\n self.assertTrue(AmuletAbility('Attribute', attr='ST').duplicate(\n AmuletAbility('Attribute', attr='ST')))\n self.assertFalse(AmuletAbility('Attribute', attr='ST').duplicate(\n AmuletAbility('Attribute', attr='DX')))\n self.assertTrue(AmuletAbility('Skepticism', size=3).duplicate(\n AmuletAbility('Skepticism', size=3)))\n self.assertTrue(AmuletAbility('Skepticism', size=3).duplicate(\n AmuletAbility('Skepticism', size=5)))", "def test_is_product_entangled_state():\n ent_vec = max_entangled(3)\n np.testing.assert_equal(is_product_vector(ent_vec), False)" ]
[ "0.6690166", "0.63332933", "0.62514263", "0.61649024", "0.6153124", "0.605767", "0.6029322", "0.60229874", "0.6018796", "0.6007936", "0.5988192", "0.5973974", "0.5963615", "0.5908742", "0.58811826", "0.58582234", "0.585461", "0.5827044", "0.5807381", "0.58039653", "0.579343", "0.57912743", "0.5782794", "0.5782669", "0.5776542", "0.57610244", "0.5741526", "0.57412314", "0.57355934", "0.573267", "0.572312", "0.57103074", "0.56956005", "0.56811833", "0.56780577", "0.5677357", "0.5664046", "0.5661502", "0.56525564", "0.56419647", "0.56336665", "0.5626593", "0.56168795", "0.56055987", "0.56040925", "0.56030595", "0.5586377", "0.5570261", "0.55653244", "0.5552833", "0.55506665", "0.5538205", "0.55348957", "0.5528606", "0.5527455", "0.55268836", "0.5520179", "0.5520179", "0.5520179", "0.55138606", "0.5513529", "0.55094135", "0.54932934", "0.5485915", "0.5478565", "0.5474596", "0.54691243", "0.54662955", "0.5457378", "0.5448217", "0.5446862", "0.54446375", "0.5440086", "0.5435308", "0.5433268", "0.54311883", "0.5429177", "0.5427966", "0.54270405", "0.54258883", "0.5420095", "0.541043", "0.539998", "0.53991693", "0.5393312", "0.5390631", "0.53897274", "0.5389379", "0.53865707", "0.53809655", "0.53763366", "0.5370619", "0.5363661", "0.5362907", "0.5355402", "0.5355362", "0.5352605", "0.5350437", "0.5349947", "0.53495175" ]
0.7491278
0
Signs a transaction (in format of build_tx) with the given node, and returns the decoderawtransactiontype result again.
def sign (self, node, tx): signed = node.signrawtransactionwithwallet (tx["hex"]) res = node.decoderawtransaction (signed["hex"]) res.update (signed) return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sign_transaction():\n data = request.get_json()\n\n try:\n tx = Transaction.from_dict(data)\n except TypeError:\n response = dict(message='Improper transaction json provided.')\n status_code = 400\n return jsonify(response), status_code\n\n signature = tx.sign(node.wallet.private_key_rsa)\n response = dict(signature=signature)\n return jsonify(response), 200", "def sign_transaction(self, transaction, prvkey):\n return self.web3.eth.account.sign_transaction(transaction, prvkey)", "def signrawtransaction(self, given_transaction):\n if isinstance(given_transaction, str):\n given_bytes = x(given_transaction)\n elif isinstance(given_transaction, CMutableTransaction):\n given_bytes = given_transaction.serialize()\n else:\n raise FakeBitcoinProxyException(\"Wrong type passed to signrawtransaction.\")\n\n transaction = CMutableTransaction.deserialize(given_bytes)\n transaction_hex = b2x(transaction.serialize())\n return {\"hex\": transaction_hex}", "def sign_transaction(self, transaction):\n try:\n address = transaction.from_address\n private_key = self.addresses[address]['private_key']\n transaction.sign_transaction(private_key)\n except Exception as ex:\n print(\"Error signing transaction from address: \" + address + \" \" + str(ex))", "def sign_transaction(self):\n private_key=RSA.importKey(binascii.unhexlify(self.sender_private_key))\n signer=PKCS1_v1_5.new(private_key)\n h=SHA.new(str(self.to_dict()).encode('utf8'))\n return binascii.hexlify(signer.sign(h)).decode('ascii')", "def sign_tx(self, network, txn, inputs, change, use_ae_signatures=False):\n # 1st message contains txn and number of inputs we are going to send.\n # Reply ok if that corresponds to the expected number of inputs (n).\n base_id = 100 * random.randint(1000, 9999)\n params = {'network': network,\n 'txn': txn,\n 'num_inputs': len(inputs),\n 'use_ae_signatures': use_ae_signatures,\n 'change': change}\n\n reply = self._jadeRpc('sign_tx', params, str(base_id))\n assert reply\n\n # Send inputs and receive signatures\n return self._send_tx_inputs(base_id, inputs, use_ae_signatures)", "def sign_raw_transaction(hexstring):\n try:\n stdout = subprocess.check_output([\"litecoin-cli\", \"signrawtransaction\", hexstring])\n signed_tx = json.loads(stdout.decode())\n except:\n sys.exit(1)\n\n return signed_tx", "def _sign_ledger_tx(self, tx_message: TransactionMessage) -> Any:\n if tx_message.ledger_id == OFF_CHAIN:\n crypto_object = self.wallet.crypto_objects.get(\"ethereum\")\n # TODO: replace with default_ledger when recover_hash function is available for FETCHAI\n else:\n crypto_object = self.wallet.crypto_objects.get(tx_message.ledger_id)\n tx = tx_message.signing_payload.get(\"tx\")\n tx_signed = crypto_object.sign_transaction(tx)\n return tx_signed", "def sign_tx(self, tx):\n if self.privkey:\n log.info('signing tx', tx=tx, account=self)\n tx.sign(self.privkey)\n else:\n raise ValueError('Locked account cannot sign tx')", "def send_transaction(self, signd_txn):\n return self.web3.eth.send_raw_transaction(signd_txn.rawTransaction).hex()", "def sign_tx(self, tx: payloads.Transaction, password: str, magic: Optional[int] = None) -> None:\n if magic is None:\n magic = settings.network.magic\n\n self._validate_tx(tx)\n\n message = magic.to_bytes(4, byteorder=\"little\", signed=False) + tx.hash().to_array()\n signature = self.sign(message, password)\n\n invocation_script = vm.ScriptBuilder().emit_push(signature).to_array()\n # mypy can't infer that the is_watchonly check ensures public_key has a value\n verification_script = contracts.Contract.create_signature_redeemscript(self.public_key) # type: ignore\n tx.witnesses.insert(0, payloads.Witness(invocation_script, verification_script))", "def getrawtransaction(self, txid, verbose=True):\n if verbose:\n return TransactionInfo(**self.proxy.getrawtransaction(txid, 1))\n return self.proxy.getrawtransaction(txid, 0)", "def sign_transaction(self, private_key):\n\n to_be_hashed = (str(self.timestamp) +\n str(self.sender_address) +\n str(self.recipient_address) +\n str(self.amount) +\n # str(self.transaction_inputs) +\n # str(self.transaction_outputs) +\n str(self.transaction_id))\n\n # Create a hash value of the whole message\n sha_hash = SHA256.new(to_be_hashed.encode())\n\n # Import private key\n key = RSA.importKey(private_key)\n\n # print(sha_hash)\n\n # Construct an instance of the crypto object\n cipher = PKCS1_v1_5.new(key)\n\n # Create and return the signature\n self.transaction_signature = cipher.sign(sha_hash)", "def submit_transaction():\n data = request.get_json()\n\n # Create candidate transaction object\n try:\n tx = Transaction.from_dict(data['transaction'])\n except (KeyError, TypeError):\n response = dict(message='Improper transaction json provided.')\n status_code = 400\n return jsonify(response), status_code\n\n statuses = []\n # Broadcast if needed and turn off broadcasting for other nodes\n if request.args.get('broadcast', type=int, default=0):\n for node_ in node.network:\n if not node_['id'] == node.node_id:\n response = requests.post(\n node_['ip'] + '/transactions/submit?broadcast=0',\n json=dict(\n transaction=data['transaction'],\n signature=data['signature']\n )\n )\n statuses.append(response.status_code)\n\n if not response.status_code == 200:\n response = dict(message='Transaction rejected by the network.')\n return jsonify(response), 202\n\n # Validate transaction as-is\n val_result = validate_transaction_document(tx)\n if isinstance(val_result, str):\n response = dict(message=val_result)\n status_code = 400\n return jsonify(response), status_code\n\n # Verify signature\n # defined in backend/utils\n sign_result = verify_signature(tx, data['signature'])\n if isinstance(sign_result, str):\n response = dict(message=sign_result)\n status_code = 400\n return jsonify(response), status_code\n\n # Add transaction to local blockchain\n node.blkchain.add_transaction(tx)\n myurl = node.network[node.node_id]['ip']\n url = myurl + '/blockchain/mine_block'\n mine_resp = requests.get(url=url)\n if mine_resp.status_code == 200:\n block_dict = mine_resp.json()\n add_resp = requests.post(url=myurl + '/blockchain/add_block?\\\n broadcast=1', json=block_dict)\n # run consensus \n requests.get(url=myurl+'/blockchain/consensus')\n\n response = dict(message='Transaction added.')\n\n return jsonify(response), 200", "def post_transaction():\n tx_dict = encode_transaction(\"gautham=awesome\") \n print(tx_dict)\n\n tendermint_host = 'localhost'\n tendermint_port = 26657\n endpoint = 'http://{}:{}/'.format(tendermint_host, tendermint_port)\n\n payload = {\n 'method': 'broadcast_tx_commit',\n 'jsonrpc': '2.0',\n #'params': [encode_transaction(tx_dict)],\n 'params': [tx_dict],\n 'id': str(uuid4())\n }\n # TODO: handle connection errors!\n print(payload)\n return requests.post(endpoint, json=payload)", "def decoderawtransaction_asm_sighashtype(self):\n\n self.log.info(\"- various mainnet txs\")\n # this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs.\n tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000'\n rpc_result = self.nodes[0].decoderawtransaction(tx)\n assert_equal('304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm'])\n\n # this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs.\n # it's from James D'Angelo's awesome introductory videos about multisig: https://www.youtube.com/watch?v=zIbUSaZBJgU and https://www.youtube.com/watch?v=OSA1pwlaypc\n # verify that we have not altered scriptPubKey decoding.\n tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000'\n rpc_result = self.nodes[0].decoderawtransaction(tx)\n assert_equal('8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid'])\n assert_equal('0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm'])\n assert_equal('OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])\n assert_equal('OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])\n txSave = tx_from_hex(tx)\n\n self.log.info(\"- tx not passing DER signature checks\")\n # make sure that a specifically crafted op_return value will not pass all the IsDERSignature checks and then get decoded as a sighash type\n tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000'\n rpc_result = self.nodes[0].decoderawtransaction(tx)\n assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm'])\n\n self.log.info(\"- tx passing DER signature checks\")\n # verify that we have not altered scriptPubKey processing even of a specially crafted P2PKH pubkeyhash and P2SH redeem script hash that is made to pass the der signature checks\n tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000'\n rpc_result = self.nodes[0].decoderawtransaction(tx)\n assert_equal('OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])\n assert_equal('OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])\n\n # some more full transaction tests of varying specific scriptSigs. used instead of\n # tests in decodescript_script_sig because the decodescript RPC is specifically\n # for working on scriptPubKeys (argh!).\n push_signature = txSave.vin[0].scriptSig.hex()[2:(0x48*2+4)]\n signature = push_signature[2:]\n der_signature = signature[:-2]\n signature_sighash_decoded = der_signature + '[ALL]'\n signature_2 = der_signature + '82'\n push_signature_2 = '48' + signature_2\n signature_2_sighash_decoded = der_signature + '[NONE|ANYONECANPAY]'\n\n self.log.info(\"- P2PK scriptSig\")\n txSave.vin[0].scriptSig = bytes.fromhex(push_signature)\n rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())\n assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])\n\n # make sure that the sighash decodes come out correctly for a more complex / lesser used case.\n txSave.vin[0].scriptSig = bytes.fromhex(push_signature_2)\n rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())\n assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])\n\n self.log.info(\"- multisig scriptSig\")\n txSave.vin[0].scriptSig = bytes.fromhex('00' + push_signature + push_signature_2)\n rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())\n assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])\n\n self.log.info(\"- scriptSig that contains more than push operations\")\n # in fact, it contains an OP_RETURN with data specially crafted to cause improper decode if the code does not catch it.\n txSave.vin[0].scriptSig = bytes.fromhex('6a143011020701010101010101020601010101010101')\n rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())\n assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm'])", "def send_tx(coin, account, recipient, amount):\n if coin =='eth':\n txn = create_tx(coin, account, recipient, amount)\n signed_txn = w3.eth.account.signTransaction(txn)\n result = w3.eth.sendRawTransaction(signed_txn.rawTransaction)\n print(result.hex())\n return result.hex()\n\n else:\n tx_btctest= create_tx(coin, account, recipient, amount)\n sign_tx_btctest = account.sign_transaction(tx_btctest)\n from bit.network import NetworkAPI\n NetworkAPI.broadcast_tx_testnet(sign_tx_btctest) \n return sign_tx_btctest", "def send_raw_transaction(signed_tx):\n try:\n txid = subprocess.check_output([\"litecoin-cli\", \"sendrawtransaction\", signed_tx])\n except:\n sys.exit(1)\n return txid.strip()", "def sign_with(self, account: Account) -> CosignatureSignedTransaction:\n\n transaction_info = self.transaction.transaction_info\n if transaction_info is None:\n raise ValueError('Transaction info not present.')\n parent_hash = typing.cast(TransactionInfo, transaction_info).hash\n if parent_hash is None:\n raise ValueError('Transaction info to cosign has no hash.')\n\n signature = util.hexlify(account.sign_data(parent_hash))\n signer = account.public_key\n return CosignatureSignedTransaction(parent_hash, signature, signer)", "def signed_transaction(self) -> CustomSignedTransaction:\n enforce(\n self.is_set(\"signed_transaction\"),\n \"'signed_transaction' content is not set.\",\n )\n return cast(CustomSignedTransaction, self.get(\"signed_transaction\"))", "def sendrawtransaction(self, given_transaction):\n if isinstance(given_transaction, str):\n given_bytes = x(given_transaction)\n elif isinstance(given_transaction, CMutableTransaction):\n given_bytes = given_transaction.serialize()\n else:\n raise FakeBitcoinProxyException(\"Wrong type passed to sendrawtransaction.\")\n transaction = CMutableTransaction.deserialize(given_bytes)\n return b2lx(transaction.GetHash())", "def signrawtransaction(self, hexstring, previous_transactions=None, private_keys=None):\n return dict(self.proxy.signrawtransaction(hexstring, previous_transactions, private_keys))", "def submit_and_store_transaction(self, signed_transaction_data):\n return self._call_account_method(\n 'submitAndStoreTransaction', {\n 'signedTransactionData': signed_transaction_data\n }\n )", "def transaction(self, transaction):\n # Allow for a list of blocks..\n transaction = utils.request_type(transaction)\n\n res = r.get(self.url + self.tx_info + str(transaction))\n return self.execute(res)", "def test_new_transaction_return_type(self):\n transaction = self.blockchain.new_transaction(self.sender, self.recipient, self.amount)\n self.assertIsInstance(transaction, int)", "def sign_liquid_tx(self, network, txn, inputs, commitments, change, use_ae_signatures=False,\n asset_info=None):\n # 1st message contains txn and number of inputs we are going to send.\n # Reply ok if that corresponds to the expected number of inputs (n).\n base_id = 100 * random.randint(1000, 9999)\n params = {'network': network,\n 'txn': txn,\n 'num_inputs': len(inputs),\n 'trusted_commitments': commitments,\n 'use_ae_signatures': use_ae_signatures,\n 'change': change,\n 'asset_info': asset_info}\n\n reply = self._jadeRpc('sign_liquid_tx', params, str(base_id))\n assert reply\n\n # Send inputs and receive signatures\n return self._send_tx_inputs(base_id, inputs, use_ae_signatures)", "def create_transaction():\n data = request.get_json()\n response = None\n status_code = None\n\n # Proposed transaction document validity checks\n if balance() < (data['amount']):\n response = dict(message='Your balance is not enough to complete transaction')\n status_code = 400\n elif not (\n any(node_['public_key'] == data['sender_address'] for node_ in node.network) and\n any(node_['public_key'] == data['recipient_address'] for node_ in node.network) and\n isinstance((data['amount']), (int, float))\n ):\n response = dict(message='Please make sure the proposed transaction is valid.')\n status_code = 400\n\n if response and status_code:\n return jsonify(response), status_code\n\n transaction_id = str(uuid4())\n\n # Use as many utxos as necessary to create the new transaction inputs\n sender_address = data['sender_address']\n sum_ = 0\n tx_inputs = []\n for utxo in node.blkchain.utxos[sender_address]:\n if sum_ >= (data['amount']):\n break\n elif not node.blkchain.transaction_unconfirmed(utxo):\n sum_ += utxo.amount\n tx_inputs.append(TransactionInput.from_output(utxo))\n\n # Create 2 transaction outputs, one for the transfer and one for the sender's change\n tx_outputs = [\n TransactionOutput(\n transaction_id=transaction_id,\n recipient_address=data['recipient_address'],\n amount=(data['amount'])\n ),\n TransactionOutput(\n transaction_id=transaction_id,\n recipient_address=data['sender_address'],\n amount=sum_ - (data['amount'])\n )\n ]\n\n # Actual transaction object:\n tx = Transaction(\n sender_address=data['sender_address'],\n recipient_address=data['recipient_address'],\n amount=(data['amount']),\n transaction_inputs=tx_inputs,\n transaction_outputs=tx_outputs,\n transaction_id=transaction_id\n )\n\n response = tx.to_dict()\n return jsonify(response), 200", "def sign(self, payload):\n raise NotImplementedError", "def sign_trx(self, signture):\n self.trx_signature = signture", "def signSign(self):\r\n if \"signature\" in self: # or \"signatures\" in self ?\r\n self.pop(\"id\", False)\r\n try:\r\n self[\"signSignature\"] = dposlib.core.crypto.getSignature(\r\n self, self._secondPrivateKey,\r\n exclude_second_sig=True,\r\n )\r\n except AttributeError:\r\n raise Exception(\"no second private Key available\")\r\n else:\r\n raise Exception(\"transaction not signed\")", "def gettransaction(self, txid):\n return TransactionInfo(**self.proxy.gettransaction(txid))", "def sign(self, data: bytes, password: str) -> bytes:\n if self.is_watchonly:\n raise ValueError(\"Cannot sign transaction using a watch only account\")\n # mypy can't infer that the is_watchonly check ensures encrypted_key has a value\n private_key = self.private_key_from_nep2(self.encrypted_key.decode(\"utf-8\"), password) # type: ignore\n return cryptography.sign(data, private_key)", "def transaction():\n data = jsonpickle.decode(request.get_data())\n address = data[\"address\"]\n amount = int(data[\"amount\"])\n keyname = data[\"keyname\"]\n\n pkplus, pkminus = wallet.keys(keyname)\n\n my_balance = p2p.query(\"/balance\", address=pkplus)[\"balance\"]\n if my_balance < amount:\n abort(404, description=\"Not enough funds.\")\n\n my_utxo = p2p.query(\"/find-utxos\", address=pkplus, amount=amount)[\"utxos\"]\n rem = sum(utxo.amount for utxo in my_utxo) - amount\n address_amount = [(address, amount)]\n\n assert rem >= 0\n\n if rem > 0:\n address_amount.append((pkplus, rem))\n\n tx = build_transaction(my_utxo, address_amount, pkminus)\n try:\n p2p.broadcast(\"/transaction-pool\", transaction=tx)\n return SUCCESSFUL_PATCH\n except UnsuccessfulPatch:\n payload = jsonpickle.encode(\n {\"message\": \"Transaction wasn't accepted by the network.\"})\n return payload, 420, {\"ContentType\": \"application/json\"}", "def get_tx_signature(tx, private_key, btc_address, hashcode=SIGHASH_ALL):\n tx_obj = deserialize(tx)\n index = None\n\n for tx_in in tx_obj['ins']:\n prev_tx_hash = tx_in['outpoint']['hash']\n prev_tx_info = get_tx_info(prev_tx_hash)\n if btc_address in prev_tx_info['to']:\n index = prev_tx_info['to'].index(btc_address)\n\n if index is not None:\n signing_tx = signature_form(tx, index, mk_pubkey_script(btc_address), hashcode)\n signature = ecdsa_tx_sign(signing_tx, private_key, hashcode)\n response = signature, index\n else:\n response = \"Error, no input tx to sign\", -1\n\n return response", "def hash_transaction(transaction: SignedRawTransaction) -> str:\n hashable_transaction = transaction.SerializeToString()\n return Verification.hash_bytes_256(hashable_transaction)", "def ecdsa_tx_sign(txhash, priv):\n rawsig = ecdsa_raw_sign(txhash, priv)\n return der_encode_sig(*rawsig)", "def transaction_action(self):\n # trigger scene signal\n self.scene().node_transaction.emit(self.metadata)", "def _sign_tx_hash(self, tx_message: TransactionMessage) -> str:\n if tx_message.ledger_id == OFF_CHAIN:\n crypto_object = self.wallet.crypto_objects.get(\"ethereum\")\n # TODO: replace with default_ledger when recover_hash function is available for FETCHAI\n else:\n crypto_object = self.wallet.crypto_objects.get(tx_message.ledger_id)\n tx_hash = tx_message.signing_payload.get(\"tx_hash\")\n is_deprecated_mode = tx_message.signing_payload.get(\"is_deprecated_mode\", False)\n tx_signature = crypto_object.sign_message(tx_hash, is_deprecated_mode)\n return tx_signature", "def send_unsigned_transaction(self, tx: Dict[str, Any], private_key: Optional[str] = None,\n public_key: Optional[str] = None, retry: bool = False,\n block_identifier: Optional[str] = 'pending') -> bytes:\n if private_key:\n address = self.private_key_to_address(private_key)\n elif public_key:\n address = public_key\n else:\n logger.error('No ethereum account provided. Need a public_key or private_key')\n raise ValueError('Ethereum account was not configured or unlocked in the node')\n\n if tx.get('nonce') is None:\n tx['nonce'] = self.get_nonce_for_account(address, block_identifier=block_identifier)\n\n number_errors = 5\n while number_errors >= 0:\n try:\n if private_key:\n signed_tx = self.w3.eth.account.sign_transaction(tx, private_key=private_key)\n logger.debug('Sending %d wei from %s to %s', tx['value'], address, tx['to'])\n try:\n return self.send_raw_transaction(signed_tx.rawTransaction)\n except TransactionAlreadyImported as e:\n # Sometimes Parity 2.2.11 fails with Transaction already imported, even if it's not, but it's\n # processed\n tx_hash = signed_tx.hash\n logger.error('Transaction with tx-hash=%s already imported: %s' % (tx_hash.hex(), str(e)))\n return tx_hash\n elif public_key:\n tx['from'] = address\n return self.send_transaction(tx)\n except ReplacementTransactionUnderpriced as e:\n if not retry or not number_errors:\n raise e\n current_nonce = tx['nonce']\n tx['nonce'] = max(current_nonce + 1, self.get_nonce_for_account(address,\n block_identifier=block_identifier))\n logger.error('Tx with nonce=%d was already sent for address=%s, retrying with nonce=%s',\n current_nonce, address, tx['nonce'])\n except InvalidNonce as e:\n if not retry or not number_errors:\n raise e\n logger.error('address=%s Tx with invalid nonce=%d, retrying recovering nonce again',\n address, tx['nonce'])\n tx['nonce'] = self.get_nonce_for_account(address, block_identifier=block_identifier)\n number_errors -= 1", "def sign_transaction_essence(self, prepared_transaction_data):\n return self._call_account_method(\n 'signTransactionEssence', {\n 'preparedTransactionData': prepared_transaction_data\n }\n )", "def sign(module):\n results = {\n \"changed\": False,\n \"results\": [],\n \"changes\": []\n }\n if not module.params['passphrase'] and not module.params['key']:\n module.fail_json(rc=1, msg='Error: Both passphrase and key are '\n 'required when signing an rpm')\n else:\n if module.params['macros']:\n for macro, value in module.params['macros'].items():\n rpm.addMacro(macro, value)\n for package in module.params['rpms']:\n pyread, cwrite = os.pipe()\n cwrite = os.fdopen(cwrite, 'w')\n rpm.setLogFile(cwrite)\n result = rpm.addSign(\n '{rpm}'.format(rpm=package),\n module.params['passphrase'], module.params['key']\n )\n cwrite.close()\n pyread = os.fdopen(pyread)\n msg = pyread.readline()\n pyread.close()\n\n if not result:\n module.fail_json(rc=1, msg='Error: Failed to sign {rpm}, {msg}'.format(rpm=package, msg=msg))\n\n if not msg:\n results['changes'].append('{}'.format(package))\n results['results'].append('{} was signed'.format(package))\n if not results['changed']:\n results['changed'] = True\n else:\n results['results'].append('{} skipped, already signed'.format(package))\n module.exit_json(\n changed=results['changed'],\n results=results['results'],\n changes=dict(signed=results['changes'])\n )", "def createrawtransaction(self, inputs, outputs):\n return self.proxy.createrawtransaction(inputs, outputs)", "def new_transaction():\n\n data = request.get_json()\n\n if not data:\n return \"No transation data passed\", 400\n\n required = ['sender', 'recipient', 'amount']\n\n if not (list(data.keys()) == required):\n return 'Missing Value', 400\n \n block_index = blockchain.add_transaction(data['sender'], data['recipient'], data['amount'])\n response = {'message':f'Adding the transaction to block at index: {block_index}'}\n\n return jsonify(response), 201", "def sign(self, msg: Dict) -> Dict:\n ser = serialize_msg_for_signing(msg, topLevelKeysToIgnore=[f.SIG.nm,\n f.SIGS.nm])\n bsig = self.naclSigner.signature(ser)\n sig = base58.b58encode(bsig).decode(\"utf-8\")\n return sig", "def submit_transaction(self, sender_address, recipient_address, stock, quanitity, signature):\n print(\"self.transactions=\", len(self.transactions))\n\n transaction = OrderedDict({\n 'sender_address': sender_address,\n 'recipient_address': recipient_address,\n 'stock': stock,\n 'quantity': quanitity\n })\n\n verified = self.verify_signature(sender_address, signature, transaction)\n if verified:\n self.transactions.append(transaction)\n print('Added tranasaction successfully (len={})'.format(len(self.transactions)))\n self.mine()\n return len(self.chain) + 1\n else:\n raise Exception(\"Failed to add transaction to blockchain\")", "def test_sign(self):\n self.signer.Sign(b'notadb')\n self.assertTrue(True)", "def get_transaction(tx):\n global INVOKE_COUNTER\n INVOKE_COUNTER = INVOKE_COUNTER + 1\n if INVOKE_COUNTER % 3 == 0:\n return \"\"\n else:\n raise_connection_error()", "def rawTxInSignature(tx, idx, subScript, hashType, key):\n sigHash = calcSignatureHash(subScript, hashType, tx, idx, None)\n sig = signRFC6979(key, sigHash).serialize()\n return sig + ByteArray(hashType)", "def coinbase_transaction(self):\n return self.txns[0]", "def fundrawtransaction(self, given_transaction, *args, **kwargs):\n # just use any txid here\n vintxid = lx(\"99264749804159db1e342a0c8aa3279f6ef4031872051a1e52fb302e51061bef\")\n\n if isinstance(given_transaction, str):\n given_bytes = x(given_transaction)\n elif isinstance(given_transaction, CMutableTransaction):\n given_bytes = given_transaction.serialize()\n else:\n raise FakeBitcoinProxyException(\"Wrong type passed to fundrawtransaction.\")\n\n # this is also a clever way to not cause a side-effect in this function\n transaction = CMutableTransaction.deserialize(given_bytes)\n\n for vout_counter in range(0, self._num_fundrawtransaction_inputs):\n txin = CMutableTxIn(COutPoint(vintxid, vout_counter))\n transaction.vin.append(txin)\n\n # also allocate a single output (for change)\n txout = make_txout()\n transaction.vout.append(txout)\n\n transaction_hex = b2x(transaction.serialize())\n\n return {\"hex\": transaction_hex, \"fee\": 5000000}", "def signTxOutput(\n netParams, tx, idx, pkScript, hashType, keysource, previousScript, sigType\n):\n\n sigScript, scriptClass, addresses, nrequired = sign(\n netParams, tx, idx, pkScript, hashType, keysource, sigType\n )\n\n isStakeType = (\n scriptClass == StakeSubmissionTy\n or scriptClass == StakeSubChangeTy\n or scriptClass == StakeGenTy\n or scriptClass == StakeRevocationTy\n )\n if isStakeType:\n scriptClass = getStakeOutSubclass(pkScript)\n\n if scriptClass == ScriptHashTy:\n raise NotImplementedError(\"ScriptHashTy signing unimplemented\")\n # # TODO keep the sub addressed and pass down to merge.\n # realSigScript, _, _, _ = sign(\n # privKey, netParams, tx, idx, sigScript, hashType, sigType)\n\n # Append the p2sh script as the last push in the script.\n # script = ByteArray(b'')\n # script += realSigScript\n # script += addData(sigScript)\n\n # sigScript = script\n # # TODO keep a copy of the script for merging.\n\n # Merge scripts. with any previous data, if any.\n mergedScript = mergeScripts(\n netParams,\n tx,\n idx,\n pkScript,\n scriptClass,\n addresses,\n nrequired,\n sigScript,\n previousScript,\n )\n return mergedScript", "def algo_transaction(sender, private_key, receiver, amount):\n params = ALGODCLIENT.suggested_params()\n txn = PaymentTxn(sender, params, receiver, amount, None)\n signed_tx = txn.sign(private_key)\n ALGODCLIENT.send_transaction(signed_tx)\n return True", "def generate_transaction(recipient_id: int, amount: float, mute: bool = False) -> bool:\n logging.debug(\"Transaction requested: %f NBC to node %d\", amount, recipient_id)\n sender = wallet.get_public_key().dumpb()\n recipient = wallet.get_public_key(recipient_id).dumpb()\n r = util.get_db()\n inputs: List[TransactionInput] = []\n input_amount = 0.0\n with r.lock(\"blockchain:tx_pool:lock\"), \\\n r.lock(\"blockchain:utxo-tx:lock\"):\n for ib, ob in r.hgetall(\"blockchain:utxo-tx\").items():\n o = TransactionOutput.loadb(ob)\n if o.recipient == sender:\n inputs.append(TransactionInput.loadb(ib))\n input_amount += o.amount\n if input_amount >= amount:\n t = Transaction(recipient=recipient,\n amount=amount,\n inputs=inputs,\n input_amount=input_amount)\n # Add to transaction pool\n r.hset(\"blockchain:tx_pool\", t.id, t.dumpb())\n # \"Add to wallet if mine\"\n r.hdel(\"blockchain:utxo-tx\", *(i.dumpb() for i in t.inputs))\n r.hmset(\"blockchain:utxo-tx\", {TransactionInput(t.id, o.index).dumpb(): \\\n o.dumpb() for o in t.outputs})\n break\n else:\n # Not enough UTXOs\n logging.error(\"Cannot send %f NBC to node %d (not enough coins)\", amount, recipient_id)\n return False\n\n logging.debug(\"Generated transaction %s\", util.bintos(t.id))\n _check_for_new_block()\n if not mute:\n logging.debug(\"Broadcasting transaction %s\", util.bintos(t.id))\n chatter.broadcast_transaction(t, util.get_peer_ids())\n return True", "def create_transaction(inputs: list, outputs: dict) -> ((str, int), str):\n\ttry:\n\t\tc = Bitcoin(testnet=bitcoin_is_testnet)\n\t\touts = []\n\t\tfor outk, outv in outputs.items():\n\t\t\touts += [{'value': outv, 'address': outk}]\n\t\ttx = c.mktx(inputs, outs)\n\t\ttx_serialize = serialize(tx)\n\n\t\t# Signing each input to predict the transaction size\n\t\tpriv = sha256('a big long brainwallet password')\n\t\ttx_signed = tx.copy()\n\t\tfor i in range(len(inputs)):\n\t\t\ttx_signed = c.sign(tx_signed, i, priv)\n\n\t\t# The serialization uses one char per nibble so in order the get the number of bytes it's necessary to\n\t\t# divide the size of the string serialization by 2\n\t\treturn (str(tx_serialize), len(str(serialize(tx_signed))) // 2), None\n\texcept Exception as e:\n\t\t# It should be logging using the default log\n\t\tprint(f\"There was a problem trying to create the transaction: {e}\")\n\t\treturn (None, None), \"There was a problem trying to create the transaction\"", "def call_backend_sign(self, account: str, message: bytes) -> str:\n provider = self._providers[0]\n if isinstance(provider, EthereumTesterProvider):\n address = to_canonical_address(account)\n sig_key = provider.ethereum_tester.backend._key_lookup[address]\n signed_message = sig_key.sign_msg(message)\n return signed_message\n else:\n return self.w3.eth.sign(account, data=message) # Technically deprecated...", "def push_tx(tx, network='testnet', fee=False):\n\n if network in ['testnet', 'main']:\n if network is 'testnet':\n if fee:\n url = 'http://tbtc.blockr.io/api/v1/tx/push'\n else:\n url = 'https://api.blockcypher.com/v1/btc/test3/txs/push'\n elif network is 'main':\n if fee:\n url = 'http://btc.blockr.io/api/v1/tx/push'\n else:\n url = 'https://api.blockcypher.com/v1/btc/main/txs/push'\n\n if fee:\n data = {'hex': tx}\n else:\n data = {'tx': tx}\n\n response = post(url, data=json.dumps(data))\n else:\n response = 'Bad network'\n\n r_code = response.status_code\n r_reason = response.reason\n\n if r_code is 200:\n # blockr server\n pushed_tx = json.loads(response.content)\n tx_hash = str(pushed_tx['data'])\n elif r_code is 201:\n # blockcyper server\n pushed_tx = json.loads(response.content)\n tx_hash = str(pushed_tx['tx']['hash'])\n else:\n tx_hash = None\n\n return r_code, r_reason, tx_hash", "def _get_transaction(self, hash_bytes: bytes) -> BaseTransaction:\n raise NotImplementedError", "def sign(self, encoded):\n signature = self._hmac.copy()\n signature.update(encoded)\n return signature.hexdigest().encode('utf-8')", "def raw_get_transaction(cls, txid):\n r = requests.get(cls.MAIN_TX_API.format(txid), timeout=DEFAULT_TIMEOUT)\n r.raise_for_status() # pragma: no cover\n return r.json()", "def sign(self, data: bytes) -> bytes:\n return self._signing_key.sign(data).signature", "def buildAsk (self, node, name, value, price):\n\n nameData = node.name_show (name)\n namePrevOut = node.gettxout (nameData[\"txid\"], nameData[\"vout\"])\n nameValue = namePrevOut[\"value\"]\n addr = node.getnewaddress ()\n\n tx = CTransaction ()\n nameOut = COutPoint (int (nameData[\"txid\"], 16), nameData[\"vout\"])\n tx.vin.append (CTxIn (nameOut))\n tx.vout.append (self.buildNameUpdate (name, value, addr, nameValue + price))\n\n txHex = tx.serialize ().hex ()\n\n signed = node.signrawtransactionwithwallet (txHex, [],\n \"SINGLE|ANYONECANPAY\")\n assert signed[\"complete\"]\n return signed[\"hex\"]", "def send_tx(self, tx):\n if sys.version_info >= (3, 0):\n tx = tx.encode('ascii')\n tx_b64 = base64.b64encode(tx)\n self.__rpc_client.call(\"Babble.SubmitTx\", [tx_b64], expect_reply=True)", "def _store_transaction(account, transaction):\n tr_tx = transaction['tx']\n meta = transaction.get('meta', {})\n\n if meta.get('TransactionResult') != 'tesSUCCESS':\n return\n\n amount = meta.get('delivered_amount') or tr_tx.get('Amount', {})\n\n is_unprocessed = (\n tr_tx['TransactionType'] == 'Payment' and\n tr_tx['Destination'] == account and\n isinstance(amount, dict) and\n not Transaction.objects.filter(hash=tr_tx['hash'])\n )\n if is_unprocessed:\n logger.info(\n format_log_message(\n 'Saving transaction: %s', transaction\n )\n )\n\n transaction_object = Transaction.objects.create(\n account=tr_tx['Account'],\n hash=tr_tx['hash'],\n destination=account,\n ledger_index=tr_tx['ledger_index'],\n destination_tag=tr_tx.get('DestinationTag'),\n source_tag=tr_tx.get('SourceTag'),\n status=Transaction.RECEIVED,\n currency=amount['currency'],\n issuer=amount['issuer'],\n value=amount['value']\n )\n\n logger.info(\n format_log_message(\n \"Transaction saved: %s\", transaction_object\n )\n )", "def sign(self):\r\n self._reset()\r\n if hasattr(self, \"_privateKey\"):\r\n if \"fee\" not in self:\r\n setFees(self)\r\n if self.type == 4:\r\n missings = \\\r\n self.asset[\"multiSignature\"][\"min\"] - \\\r\n len(self.get(\"signature\", []))\r\n if missings:\r\n raise Exception(\"owner signature missing (%d)\" % missings)\r\n self[\"signature\"] = dposlib.core.crypto.getSignature(\r\n self, self._privateKey\r\n )\r\n else:\r\n raise Exception(\"orphan transaction can not sign itsef\")", "def _generate_transaction(\n payment: Payment,\n kind: str,\n amount: Decimal,\n *,\n id='',\n is_success=True,\n **data) -> Transaction:\n transaction = create_transaction(\n payment=payment,\n kind=kind,\n amount=amount,\n currency=data.pop('currency', payment.currency),\n gateway_response=data,\n token=id,\n is_success=is_success)\n return transaction", "def verify(blocknumber, trx, use_api):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n b = Blockchain(morphene_instance=stm)\n i = 0\n if not blocknumber:\n blocknumber = b.get_current_block_num()\n try:\n int(blocknumber)\n block = Block(blocknumber, morphene_instance=stm)\n if trx is not None:\n i = int(trx)\n trxs = [block.json_transactions[int(trx)]]\n else:\n trxs = block.json_transactions\n except Exception:\n trxs = [b.get_transaction(blocknumber)]\n blocknumber = trxs[0][\"block_num\"]\n wallet = Wallet(morphene_instance=stm)\n t = PrettyTable([\"trx\", \"Signer key\", \"Account\"])\n t.align = \"l\"\n if not use_api:\n from morphenepythonbase.signedtransactions import Signed_Transaction\n for trx in trxs:\n if not use_api:\n # trx is now identical to the output of get_transaction\n # This is just for testing porpuse\n if True:\n signed_tx = Signed_Transaction(trx.copy())\n else:\n tx = b.get_transaction(trx[\"transaction_id\"])\n signed_tx = Signed_Transaction(tx)\n public_keys = []\n for key in signed_tx.verify(chain=mph.chain_params, recover_parameter=True):\n public_keys.append(format(Base58(key, prefix=mph.prefix), mph.prefix))\n else:\n tx = TransactionBuilder(tx=trx, morphene_instance=stm)\n public_keys = tx.get_potential_signatures()\n accounts = []\n empty_public_keys = []\n for key in public_keys:\n account = wallet.getAccountFromPublicKey(key)\n if account is None:\n empty_public_keys.append(key)\n else:\n accounts.append(account)\n new_public_keys = []\n for key in public_keys:\n if key not in empty_public_keys or use_api:\n new_public_keys.append(key)\n if isinstance(new_public_keys, list) and len(new_public_keys) == 1:\n new_public_keys = new_public_keys[0]\n else:\n new_public_keys = json.dumps(new_public_keys, indent=4)\n if isinstance(accounts, list) and len(accounts) == 1:\n accounts = accounts[0]\n else:\n accounts = json.dumps(accounts, indent=4)\n t.add_row([\"%d\" % i, new_public_keys, accounts])\n i += 1\n print(t)", "def transaction_type(self) -> str:\n return self.chunks[2].decode(\"ascii\")", "def get_transaction(self, txid):\n\n return self._blocks._txns.get(txid)", "def call_contract(w3, account, func):\n tx = func.buildTransaction({\n 'nonce': w3.eth.getTransactionCount(account.address),\n 'gas': func.estimateGas()\n })\n signed_tx = w3.eth.account.signTransaction(tx, account.privateKey)\n tx_hash = w3.eth.sendRawTransaction(signed_tx.rawTransaction)\n return tx_hash", "def send_tx(args):\n kwargs = {\n '--privkey': args.privkey,\n '--to': AMEND_ADDR,\n '--code': args.code,\n '--value': str(args.value),\n }\n args = functools.reduce(\n lambda lst, kv: lst + list(kv),\n kwargs.items(),\n [],\n )\n print(['python3', 'make_tx.py', *args, '--no-newcrypto'])\n subprocess.call(['python3', 'make_tx.py', *args, '--no-newcrypto'])\n subprocess.call(['python3', 'send_tx.py'])\n with open('../output/transaction/hash') as fobj:\n return fobj.read().strip()", "def test_modify_transaction_after_signing(mocker):\n transaction_original = Transaction(\n chain=0,\n nonce=4_294_967_295,\n fee=57000,\n value=5_000_000,\n to_address=\"1H7NtUENrEbwSVm52fHePzBnu4W3bCqimP\",\n )\n\n transaction = transaction_original.sign(PRIVATE_KEY_1)\n transaction.value = 10_000_000\n\n assert transaction.validate() == False\n with pytest.raises(\n TransactionNotValid, match=errors.TRANSACTION_INVALID_SIGNATURE\n ):\n transaction.validate(raise_exception=True)", "def compute_transaction_id(self):\n self.tx_id = self.get_sign_data()", "def add_transaction(self, transaction, signature, client_public_key):\r\n # Check If transaction is already in the transaciton_pool\r\n if transaction not in self.transaction_pool:\r\n # Verify With All Other Nodes\r\n if self.verify_transaction(transaction, signature, client_public_key):\r\n # Encrypt the transaction\r\n client_public_key = load_pem_public_key(client_public_key, default_backend())\r\n encrypted_transaction = client_public_key.encrypt(\r\n json.dumps(transaction).encode(),\r\n padding.OAEP(\r\n mgf = padding.MGF1(algorithm=hashes.SHA256()),\r\n algorithm = hashes.SHA256(),\r\n label = None\r\n )\r\n )\r\n\r\n self.transaction_pool.append(str(encrypted_transaction))\r\n\r\n else: return False, self.transaction_pool # Return False if Verification fails\r\n\r\n # Return True if transaction was already in transaction_pool or if verification was successful and new transaction was added\r\n return True, self.transaction_pool", "def CreateTransaction(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def mk_simple_transaction(self, from_addr, to_addr, send_value):\n transaction = dict(\n nonce=self.web3.eth.get_transaction_count(from_addr),\n gasPrice=self.web3.eth.gasPrice,\n # there must be an automated way to automatically set the gas price\n # based off of the gas strategy\n gas=100000,\n to=to_addr,\n value=self.web3.toWei(send_value, 'wei')\n )\n return transaction", "def sign(self, message):\n return Signature(self._sk.sign(message))", "def add_tx(self, txid, tx):\n outputs = tx.outputs()\n so = outputs and outputs[0][1]\n # Note: ScriptOutput here is the subclass defined in this file, not\n # address.ScriptOutput\n if not isinstance(so, ScriptOutput):\n return\n transaction_type = so.message.transaction_type\n try:\n if transaction_type == \"GENESIS\":\n self._add_genesis_or_mint_tx(so, outputs, txid, tx)\n elif transaction_type == \"MINT\":\n self._add_genesis_or_mint_tx(so, outputs, txid, tx)\n elif transaction_type == \"SEND\":\n self._add_send_tx(so, outputs, txid, tx)\n elif transaction_type == \"COMMIT\":\n return # ignore COMMIT, they don't produce any tokens\n else:\n raise InvalidOutputMessage(\"Bad transaction type\")\n except (AssertionError, ValueError, KeyError, TypeError, IndexError) as e:\n self.print_error(f\"ERROR: tx {txid}; exc =\", repr(e))", "def create_raw_transaction(amount, network_fee, from_address, to_address):\n tx_total = amount + network_fee\n tx_inputs = []\n input_total = 0\n unspent = list_unspent(from_address)\n\n # Are there enough funds in one block to cover the amount\n for block in unspent:\n if float(block[\"amount\"]) >= tx_total:\n tx_input = {\"txid\": block[\"txid\"], \"vout\": int(block[\"vout\"])}\n input_total = float(block[\"amount\"])\n tx_inputs.append(tx_input)\n break\n # If tx_inputs is empty that means we have to\n # build the transaction from multiple blocks\n if not tx_inputs:\n for block in unspent:\n if input_total >= tx_total:\n break\n else:\n tx_input = {\"txid\": block[\"txid\"], \"vout\": int(block[\"vout\"])}\n input_total += float(block[\"amount\"])\n tx_inputs.append(tx_input)\n\n # Amount left over after amount to send and network fees are subtracted\n # from input_total. Change is sent back to sender\n change = round((input_total - amount) - network_fee, 8)\n \n if change < dust:\n tx_output = {to_address: amount}\n else:\n tx_output = {to_address: amount, from_address: change}\n \n try:\n tx_hex_string = subprocess.check_output([\"litecoin-cli\", \"createrawtransaction\", json.dumps(tx_inputs), json.dumps(tx_output)])\n except:\n sys.exit(1)\n\n return tx_hex_string.strip()", "def get_tx(txid):\n return requests.get(BASE+f'/api/tx/{txid}').json()", "def add_transaction():\n index = blockchain.add_transaction(request.form['sender'], request.form['receiver'], request.form['amount'])\n response = {'message': \"Transaction will be added to Block #{0}\".format(index)}\n return jsonify(response), 200", "def get_payee_transaction(payee: str) -> Any:\n entry = g.ledger.attributes.payee_transaction(payee)\n return serialise(entry) if entry else None", "def sign(self, msg):\n\n if type(msg) != type(b''):\n raise ValueError(\"msg should be a byte object!\")\n\n return self.gpg.sign(msg)", "def sign(self, object):\n pass", "def local_push(tx, rpc_user=None, rpc_password=None):\n\n rpc_connection = AuthServiceProxy(\"http://\"+rpc_user+\":\"+rpc_password+\"@127.0.0.1:18332\")\n\n try:\n tx_hash = rpc_connection.sendrawtransaction(tx)\n code = 200\n print \"Transaction broadcast \" + tx_hash\n except JSONRPCException as e:\n print e.message\n tx_hash = None\n code = 500\n\n return tx_hash, code", "def _santander_generate_digital_sign(self,type, acquirer,time_stamp,merchant_id,reference,amount,cur,secret, result=False,\n message=False,\n pasref=False,\n authcode=False,\n\t\t\t\t\t\t\t\t\t\t\t\t order_id=False):\n assert acquirer.provider == 'santander'\n\n '''\n def get_value(key):\n if values.get(key):\n return values[key]\n return ''\n\n if inout == 'out':\n keys = ['Ds_Amount',\n 'Ds_Order',\n 'Ds_MerchantCode',\n 'Ds_Currency',\n 'Ds_Response']\n else:\n keys = ['Ds_Merchant_Amount',\n 'Ds_Merchant_Order',\n 'Ds_Merchant_MerchantCode',\n 'Ds_Merchant_Currency',\n 'Ds_Merchant_TransactionType',\n 'Ds_Merchant_MerchantURL']\n sign = ''.join('%s' % (get_value(k)) for k in keys)\n # Add the pre-shared secret key at the end of the signature\n sign = sign + acquirer.santander_secret_key\n if isinstance(sign, str):\n sign = urlparse.parse_qsl(sign)\n shasign = sha1(sign).hexdigest().upper()\n '''\n if type == 'in':\n clave1 = sha1(str(time_stamp) + '.' + str(merchant_id) + '.' + str(reference) + '.' + str(amount) + '.' + str(cur))\n clave2 = sha1(str(clave1.hexdigest()) + '.' + str(secret))\n print str(clave2.hexdigest())\n return str(clave2.hexdigest())\n if type == 'out':\n clave1 = sha1(str(time_stamp) + '.' + str(merchant_id) + '.' + str(order_id) + '.' + str(result) + '.' + str(message) + '.' + str(pasref) + '.' + str(authcode))\n clave2 = sha1(str(clave1.hexdigest()) + '.' + str(secret))\n print str(clave2.hexdigest())\n return str(clave2.hexdigest())", "def test_sign_tx_fetchai(self):\n tx_hash = Web3.keccak(text=\"some_bytes\")\n\n tx_message = TransactionMessage(\n performative=TransactionMessage.Performative.PROPOSE_FOR_SIGNING,\n skill_callback_ids=[PublicId(\"author\", \"a_skill\", \"0.1.0\")],\n tx_id=self.tx_id,\n tx_sender_addr=self.tx_sender_addr,\n tx_counterparty_addr=self.tx_counterparty_addr,\n tx_amount_by_currency_id={\"FET\": -20},\n tx_sender_fee=0,\n tx_counterparty_fee=0,\n tx_quantities_by_good_id={\"good_id\": 0},\n ledger_id=self.ledger_id,\n info=self.info,\n signing_payload={\"tx_hash\": tx_hash},\n )\n\n tx_signature = self.decision_maker._sign_tx(tx_message)\n assert tx_signature is not None", "def _transact(self, payment_method_token, amount, processor_token,\n transaction_type, endpoint, options):\n purchase_data = self._construct_options(payment_method_token, transaction_type,\n amount, options)\n # Send payload and return transaction.\n req = Request(endpoint % processor_token, purchase_data, method='post')\n req.add_header(\"Content-Type\", \"application/xml\")\n return Transaction(fetch_url(req))", "def create_god_transaction(to_pk):\n\n god_pk, god_sk = signature.generate_keys()\n tx = Transaction(god_pk, to_pk, SEED_COIN_SUPPLY)\n tx.sign(god_sk)\n return tx", "def create_tx(coin, account, recipient, amount):\n if coin ==ETH:\n gasEstimate = w3.eth.estimateGas(\n {\"from\": account.address, \"to\": recipient, \"value\": amount})\n return{\n \"to\": recipient,\n \"from\": account.address,\n \"value\": amount,\n \"gasPrice\": w3.eth.gasPrice,\n \"gas\": gasEstimate,\n \"nonce\": w3.eth.getTransactionCount(account.address)\n }\n if coin == BTCTEST:\n return PrivateKeyTestnet.prepare_transaction(account.address, [(recipient, amount, BTC)])", "def _run_transaction(operation_name, txn):\n if not txn:\n return None\n\n response = None\n try:\n response = requests.put(PoliciesOutput.CONSUL_TRANSACTION_URL, json=txn)\n except requests.exceptions.RequestException as ex:\n ctx.logger.error(\n \"RequestException - failed to {0} at {1}: {2} on txn={3}\"\n .format(operation_name, PoliciesOutput.CONSUL_TRANSACTION_URL,\n str(ex), json.dumps(txn)))\n return None\n\n if response.status_code != requests.codes.ok:\n ctx.logger.error(\n \"failed {0} for {1} {2}: text={3} txn={4}\"\n .format(response.status_code, operation_name,\n PoliciesOutput.CONSUL_TRANSACTION_URL, response.text, json.dumps(txn)))\n return None\n ctx.logger.info(\n \"response {0} for {1} {2}: text={3} txn={4}\"\n .format(response.status_code, operation_name,\n PoliciesOutput.CONSUL_TRANSACTION_URL, response.text, json.dumps(txn)))\n return True", "def sign(file, outfile):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not unlock_wallet(stm):\n return\n if file and file != \"-\":\n if not os.path.isfile(file):\n raise Exception(\"File %s does not exist!\" % file)\n with open(file) as fp:\n tx = fp.read()\n if tx.find('\\0') > 0:\n with open(file, encoding='utf-16') as fp:\n tx = fp.read()\n else:\n tx = click.get_text_stream('stdin')\n tx = ast.literal_eval(tx)\n tx = mph.sign(tx, reconstruct_tx=False)\n tx = json.dumps(tx, indent=4)\n if outfile and outfile != \"-\":\n with open(outfile, 'w') as fp:\n fp.write(tx)\n else:\n print(tx)", "def transaction(self, uuid):\r\n return tx.Transaction(self, uuid)", "def sign(data):\n return _make.sign(data)", "def database_transaction_to_rlp_transaction(transaction):\n\n nonce = transaction['nonce']\n value = parse_int(transaction['value'])\n gas = parse_int(transaction['gas'])\n gas_price = parse_int(transaction['gas_price'])\n\n tx = create_transaction(nonce=nonce, gasprice=gas_price, startgas=gas,\n to=transaction['to_address'], value=value,\n data=data_decoder(transaction['data']),\n v=parse_int(transaction['v']),\n r=parse_int(transaction['r']),\n s=parse_int(transaction['s']))\n\n return tx", "def transact_with(self, peer, transaction_type=None):\n if hex(id(peer)) == hex(id(self.node)):\n return\n \n if not max(peer.trust, 0):\n return None\n\n # Locate the routing table responsible for the peer we're dealing with\n router = filter(lambda x: x.node == peer, self.routers)\n if not any(router): return\n router = router[0]\n \n # Routers can be subclassed to turn their .malicious attr into a property\n # with statistical variance. E.g. to return True every 100th transaction.\n if transaction_type == None:\n transaction_type = not router.malicious\n \n peer.transact(positively=transaction_type, router=self)\n \n #log(\"[%s] %s <-- %s\" % \\\n # (\"+\" if not maliciousness else \"-\", self.node, peer))\n\n # Reinforce the network by making ourselves aware of this peers' peers\n for node in router.peers:\n if node == self.node or node in self.peers:\n continue\n self.peers.append(node.copy(router=self))\n\n # and make the peer routing table aware of our peers.\n for node in self.peers:\n if node == router.node or node in router.peers:\n continue\n router.peers.append(node.copy(router=router))\n\n # NoneType indicates an unreachable peer, True indicates a positive\n # transaction and False means the remote peer can be said to have\n # provided a malicious resource.\n return transaction_type", "def parseHexTx (self, txHex):\n\n data = bytes.fromhex (txHex)\n\n tx = CTransaction ()\n tx.deserialize (io.BytesIO (data))\n\n return tx", "def transaction_from_result(cls, result: JSON, **kwargs: Any) -> Transaction:\n res = cls.normalize_transaction_result(result)\n res.update(kwargs)\n\n return Transaction(\n hash=to_bytes(hexstr=res['hash']),\n sender=to_checksum_address(res['sender']),\n success=res['success'],\n timestamp=int(res['timestamp']),\n to=to_checksum_address(res['to']),\n value=int(res['value']),\n )", "def submit_tx_callback(submit_request: dict):\n log.info('submit_tx_callback received', submit_request=submit_request)\n submit_request = SubmitTransactionRequest(submit_request)\n\n try:\n tx_id = root_wallet.submit_transaction(submit_request.transaction)\n except PERSISTENT_ERRORS as e:\n raise PersistentError(e)\n except Exception as e:\n enqueue_payment_failed_callback(submit_request, str(e))\n raise # crash the job\n payment = Payment.from_payment_request(submit_request, submit_request.sender_address, tx_id)\n enqueue_payment_callback(submit_request.callback, payment, 'receive')", "def get_payu_transaction_id():\n hash_object = sha256(str(int(time.time() * 1000)).encode('utf-8'))\n txnid = hash_object.hexdigest().lower()[0:32]\n return txnid", "def insert_tx_signature(tx, index, signature, public_key):\n tx_obj = deserialize(tx)\n tx_obj[\"ins\"][index][\"script\"] = serialize_script([signature, public_key])\n\n return serialize(tx_obj)" ]
[ "0.6919308", "0.6778266", "0.6380237", "0.627283", "0.61029476", "0.6083491", "0.5987487", "0.58167666", "0.57365465", "0.5712428", "0.56981504", "0.56660604", "0.56095326", "0.55846405", "0.55624753", "0.5552044", "0.54918426", "0.5468147", "0.54531074", "0.54221904", "0.5362187", "0.534654", "0.53127396", "0.5284009", "0.52756524", "0.527408", "0.5249939", "0.5228976", "0.5205368", "0.5204117", "0.5184792", "0.5184099", "0.51577383", "0.5024115", "0.50207984", "0.501832", "0.5013838", "0.50050825", "0.49934402", "0.49842024", "0.49813572", "0.49469128", "0.4915934", "0.49127626", "0.49088678", "0.4904845", "0.49041983", "0.4895515", "0.48945612", "0.48927733", "0.4892498", "0.48902103", "0.48868656", "0.48464224", "0.48424318", "0.48382175", "0.48346844", "0.4827431", "0.48251456", "0.48234284", "0.4820536", "0.48191902", "0.48113778", "0.4792901", "0.4789554", "0.47783285", "0.47665325", "0.4764376", "0.47636002", "0.47605386", "0.4755534", "0.4755117", "0.47501242", "0.4744019", "0.4739814", "0.47337583", "0.4722816", "0.4718421", "0.47162744", "0.4708272", "0.46968496", "0.46907973", "0.4689589", "0.4681352", "0.46762648", "0.46747962", "0.4674683", "0.46741635", "0.46678412", "0.46651658", "0.46552607", "0.46544042", "0.4652649", "0.46491984", "0.4648751", "0.46380457", "0.4632886", "0.4631134", "0.46155992", "0.461439" ]
0.77226585
0
Return a list of the ids of outer divs with the specified text in a child element.
def ids_of_outer_divs_with_inner_text(self, child_text): return self.q(css='div.outer').filter( lambda el: child_text in [inner.text for inner in el.find_elements_by_css_selector('div.inner')] ).attrs('id')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_ids(self):\n page = r.get(self.url)\n tree = html.fromstring(page.content)\n ids_elements = tree.xpath(\"//div[@id='selectedcontent']/div/ul/li/a\")\n return [self._e_to_id(e) for e in ids_elements]", "def get_child_ids(id,conn):\n\n child_ids = ('WITH RECURSIVE children AS '\n '(SELECT subject_id '\n 'FROM cvterm_relationship '\n 'WHERE object_id = %s '\n 'UNION '\n 'SELECT cr.subject_id '\n 'FROM cvterm_relationship cr '\n 'INNER JOIN children ch ON ch.subject_id = cr.object_id) '\n 'SELECT * FROM children')\n ids = connect(child_ids,id,conn)\n list_of_ids = []\n for item in ids:\n list_of_ids.append(item[0])\n return(list_of_ids)", "def findChildren(widget=None, name=\"\", text=\"\"):\n\t\t# TODO: figure out why the native QWidget.findChildren method\n\t\t# does not seem to work from PythonQt\n\t\tif not widget:\n\t\t\twidget = mainWindow()\n\t\tchildren = []\n\t\tparents = [widget]\n\t\twhile parents != []:\n\t\t\tp = parents.pop()\n\t\t\tparents += p.children()\n\t\t\tif name and p.name.find(name) >= 0:\n\t\t\t\tchildren.append(p)\n\t\t\telif text:\n\t\t\t\ttry:\n\t\t\t\t\tp.text\n\t\t\t\t\tif p.text.find(text) >= 0:\n\t\t\t\t\t\tchildren.append(p)\n\t\t\t\texcept AttributeError:\n\t\t\t\t\tpass\n\t\treturn children", "def get_ids(self, text):\n\n tokens = [token.orth for token in self.tokenizer(text)]\n ids = []\n for token in tokens:\n try:\n id = self.vocab.vectors.key2row[token]\n except KeyError:\n id = self.oov_id\n\n ids.append(id)\n\n return ids", "def get_ids(self, text):\n\n tokens = [token.orth for token in self.tokenizer(text)]\n ids = []\n for token in tokens:\n try:\n id = self._vocab.vectors.key2row[token]\n except KeyError:\n id = self.oov_id\n\n ids.append(id)\n\n return ids", "def subtype_ids(elements, subtype):\n return [i for (i, element) in enumerate(elements)\n if isinstance(element, subtype)]", "def get_ids(self, sentence):\n return [self.get_id(word) for word in sentence.strip().split(' ')]", "def scan_individual_identifiers(text: str, cpf: bool = True) -> List[str]:\n if cpf:\n regex = re.compile(r\"\\w{3}\\.\\w{3}\\.\\w{3}\\-\\w{2}\")\n else:\n regex = re.compile(r\"\\w{2}\\.\\w{3}\\.\\w{3}/\\w{4}\\-\\w{2}\")\n\n identifiers = re.findall(regex, text)\n return identifiers", "def find_by_xpath(self, element):\n for context_elements in self.within_selector:\n final_elements = context_elements.find_by_xpath(element)\n return final_elements", "def children(word, word_dict):\n res = []\n for i in range(len(word)):\n child = word[:i]+word[i+1:]\n if child in word_dict:\n res.append(child)\n return res", "def findIds(self, query):\t\t\t\t\t\t\t## Multiple Elements\n\t\ttry:\n\t\t\tassert(type(query)) == str or Pattern\n\t\t\treturn self.driver.find_elements_by_id(query)\n\t\texcept Exception as e:\n\t\t\tprint(\"Could not find ID: {}\\n\\n{}\".format(query, e))\n\t\t\treturn -1", "def _find_with_text(self, selector, text):\n stripped = text.strip()\n elements = self.selenium.find_elements_by_css_selector(selector)\n return [e for e in elements if e.text.strip() == stripped]", "def get_child_elements_by_id(self, id):\n for item in self._elements:\n if item.get_parent_id() == id:\n yield item", "def get_child_ids(cur, node):\n sql = \"\"\"\n SELECT\n id\n FROM\n nodes\n WHERE\n parent=%s\n ORDER BY\n position;\n \"\"\"\n cur.execute(sql, (str(node), ))\n for result in cur:\n yield str(result['id'])", "def div_text_list(self):\n return self.q(css='div.test').text", "def activeChildWellIds(self):\n lst=[]\n if self.isReplicateGroup():\n for tc in self.activeChildWells():\n lst.extend(tc.activeChildWellIds())\n else:\n if self.wellids is not None and self.wellids[0] is not None:\n wellid=self.wellids[0]\n else:\n wellid=str(self.childWellIndices()[0])\n lst.append(wellid)\n return lst", "def _findTextWithinElement(self, selector):\n parent_text = self._getStrippedText(selector) # everybody has got text I think. so this shouldn't raise IndexError\n if parent_text: return parent_text\n subelements = selector.css('*')\n texts_found = []\n for element in subelements:\n elem_text = self._getStrippedText(element)\n if \"CDATA\" in elem_text: continue # that's a part of the document not intended to be visible\n texts_found.append(elem_text)\n return ' '.join(texts_found)", "def GetExpandableIds(children, length_name):\n # I could write a list comprehension here. Would it make the code clearer?\n result = []\n for child_id, child in enumerate(children):\n if child.canExpand(length_name):\n result.append(child_id)\n return result", "def get_descendant_ids(cur, node):\n sql = \"\"\"\n SELECT\n node\n FROM\n ancestors\n WHERE\n ancestor=%s;\n \"\"\"\n cur.execute(sql, (str(node), ))\n for result in cur:\n yield str(result['node'])", "def getChildElementsByTagName(element: org.w3c.dom.Element, string: str) -> java.util.List:\n ...", "def find_by_css(self, element):\n for context_elements in self.within_selector:\n final_elements = context_elements.find_by_css(element)\n return final_elements", "def extract_data_listing(html):\n id_finder = re.compile(r'PR[\\d]+~')\n return html.find_all('div', id=id_finder)", "def children(parent, data):\n\n kids = []\n for pid in data:\n if data[pid][\"parentId1\"] == parent or data[pid][\"parentId2\"] == parent:\n kids.append(pid)\n\n return kids", "def get_ids(cls, text):\n tokens = TokenizerContainer.TOKENIZER.tokenize(text)\n token_ids = TokenizerContainer.TOKENIZER.convert_tokens_to_ids(tokens)\n input_ids = token_ids + [0] * (cls.MAX_LEN-len(token_ids))\n return tokens, input_ids", "def get_ids(self) -> List[str]:", "def find_elements_inside_element(self, parent_element: Union[WebElement, Tuple[By, str]],\n children_element_locator: Tuple[By, str], wait_time=10,\n skip_exception=False) -> List[WebElement]:\n parent_element = self.find_element(parent_element)\n for i in range(wait_time):\n by_type, value = children_element_locator\n if by_type == By.CSS_SELECTOR:\n children = parent_element.find_elements_by_css_selector(value)\n elif by_type == By.XPATH:\n children = parent_element.find_elements_by_xpath(value)\n else:\n children = parent_element.find_elements(children_element_locator)\n if len(children):\n return children\n time.sleep(1)\n else:\n if not skip_exception:\n raise TimeoutException(f'Elements was not found in {wait_time} seconds')\n return []", "def get_children(search_tag, tag_list):\n list_return = []\n\n for tag in tag_list:\n if str(tag.parent) == str(search_tag):\n list_return.append(tag)\n list_return.extend(get_children(tag, tag_list))\n return list(set(list_return)) # This will return a list of unique elements", "def getIDs():", "def occurence(main_seq,sub_seq):\n start= 0\n indices =[]\n while True:\n start = main_seq.find(sub_seq,start)\n if start > 0:\n indices.append(start)\n else:\n break\n start +=1\n return indices", "def get_child_ids(self, job_specifier, project=None, status=None):\n if project is None:\n project = self._project\n id_master = self.get_job_id(project=project, job_specifier=job_specifier)\n if id_master is None:\n return []\n else:\n if status is not None:\n id_lst = self._job_table[\n (self._job_table.masterid == id_master) & (self._job_table.status == status)].id.values\n else:\n id_lst = self._job_table[(self._job_table.masterid == id_master)].id.values\n return sorted(id_lst)", "def vectorize(self,text):\r\n \r\n lv_active = set()\r\n words = word_tokenize(text)\r\n for word in words:\r\n if word in self.tree:\r\n ancestors = self.tree.word_ancestors(word)\r\n lv_active.update(ancestors)\r\n \r\n return self.nl.isin(lv_active).values", "def leafs_ids(cls, tree_depth):\n return cls.nodes_at_depth(tree_depth)", "def get_order_from_tree(ids, tree_text):\r\n tree = parse_newick(tree_text, PhyloNode)\r\n ordered_ids = []\r\n for tip in tree.iterTips():\r\n if tip.Name in ids:\r\n ordered_ids.append(tip.Name)\r\n return names_to_indices(ids, ordered_ids)", "def get_pids(pid):\n\n pids=set([pid])\n for child in get_children(pid):\n pids.update(traverse_tree(child,pids))\n \n return list(pids)", "def find(self, text, term):\n\t\tlistOfResults = list()\n\n\t\tcurrentIndex = 0\n\t\ttermLength\t = len(term)\n\t\tappend\t\t = listOfResults.append\n\n\t\twhile currentIndex >= 0:\n\t\t\tcurrentIndex = text.find(term, currentIndex+1)\n\t\t\tappend((currentIndex, currentIndex+termLength))\n\n\t\t# Return listOfResults[:-1] because the last tuple contains -1 (negative one)\n\t\treturn listOfResults[:-1]", "def get_translated_ids(id):", "def getTargets(idlist):\n targets = []\n for xmlid in idlist:\n #the <text> element of the mnemonic or accelerator\n elm = findElementWithId(\"text\", xmlid)\n #the parent element of the mnemonic or accelerator\n parentTag = elm.parentNode.parentNode.tagName\n if parentTag == \"action\" or parentTag == \"container\" or parentTag == \"control\" or parentTag == \"dialog\":\n caption = getFirstChildWithTagName(elm.parentNode.parentNode, \"caption\")\n textid = getTextIdForCaption(caption)\n targets.append(textid)\n return targets", "def test_search_subnode_attribute(self):\n\n lista = []\n for (_, value) in self.parser.search(self.xml, 'node@id'):\n lista.append(value)\n self.assertEqual(lista, ['1', '2', '3'])", "def findAll(self, text):\n\n\t\tfindAllResults = list()\n\n\t\t# CALL THESE JUST ONCE BEFORE LOOP(S)\n\t\tfindInstance = self.findInstance\n\t\tappend \t\t = findAllResults.append\t\t \n\t\t# - - - - - - - - - - - - - - - - - -\n\n\t\tfor i in xrange(len(self.toWORD)):\n\n\t\t\tword = self.toWORD[i]\n\n\t\t\tif i == 0:\n\t\t\t\t#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*#\n\t\t\t\t# Skip the zeroeth index to avoid including punctuation in the findAllResults list\t\t #\n\t\t\t\t#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*#\n\t\t\t\tpass\n\n\t\t\telse:\n\t\t\t\tfor w in word:\n\n\t\t\t\t\tif len(w) > 0:\n\t\t\t\t\t\tresults = findInstance(text = text, term = w)\n\n\t\t\t\t\t\tif len(results) > 0:\n\t\t\t\t\t\t\tappend((i, results))\n\n\t\treturn findAllResults", "def find_elements_by_text(self,param={},ignore_error_handle = False):\n message = {};\n step = 'find all elements by text ' + param.get('text',None) + ' on current page';\n text = param.get('text',None);\n try:\n elements = self.driver.find_elements(by=By.NAME,value=text);\n message = self.feedback.feedback_action_ok(step);\n message['elements'] = elements;\n except BaseException,e:\n message = self.feedback.feedback_action_fail(step,str(e),ignore_error_handle);\n finally:\n return message;", "def _get_parents_from_structure(self, block_id, structure):\r\n items = []\r\n for parent_id, value in structure['blocks'].iteritems():\r\n for child_id in value['fields'].get('children', []):\r\n if block_id == child_id:\r\n items.append(parent_id)\r\n\r\n return items", "def test_custom_ids(self):\n it = [\n \"[[Chapter]]{'id': '/base/chapter/1'} Chapter I\",\n \"This is chapter I text\",\n \"[[Article]]{'id': '/base/article/1'} Article I\",\n \"This is article I text\",\n ]\n\n descriptor = {\n 'components': ['Chapter', 'Section', 'Sub-section', 'Article'],\n 'patterns': ['Chapter', 'Section', 'Sub-section', 'Article']\n }\n\n doc = parse_iterable(it, descriptor)\n\n result = [n for n in doc.graph.nodes(data=True)]\n\n expected = [\n ('ROOT [0]', {'meta': 'root', 'level': 0, 'text': [], 'pad': False, 'id': '/root'}),\n ('Chapter [1]', {'meta': 'Chapter', 'level': 1, 'pad': False, 'text': [\"Chapter I\", 'This is chapter I text'], 'id': '/base/chapter/1'}),\n ('Article [2]', {'meta': 'Article', 'level': 4, 'pad': False, 'text': [\"Article I\", 'This is article I text'], 'id': '/base/article/1'})\n ]\n\n self.assertListEqual(result, expected)", "def get_elements(self, css=None, text=None):\n if css is None and text is None:\n raise ValueError()\n\n # Use ordered sets so we don't muck up the ordering if the caller specifies\n # two or more arguments. This is a bit over-convoluted for having only two\n # ways to query (css and text) but the pattern makes it easy to plug in\n # more ways.\n items = None\n def update(new_items):\n nonlocal items\n if items == None:\n items = OrderedSet(new_items)\n else:\n items = items & OrderedSet(new_items)\n\n if text is not None:\n update([e for e in get_elements(self, css=\"*\") if e.text == text])\n if css is not None:\n update(self.find_elements_by_css_selector(css))\n\n return items", "def _get_matching_node_ids(self, node_name):\n try:\n with closing(self.connection) as con:\n with con:\n with closing(con.cursor()) as cursor:\n cursor.execute(\"\"\"\n SELECT id\n FROM nodes\n WHERE name LIKE (?)\n \"\"\", (node_name,))\n res = cursor.fetchall()\n\n except sqlite3.OperationalError as e:\n print(\"ERROR: An error occurred when retrieving node ids: {}\".format(e))\n\n if len(res) == 0:\n print(\"ERROR: Could not find node ID for name '{0}'.\".format(node_name))\n return []\n\n elif len(res) > 1:\n print(\"Found multiple node IDs for name '{0}', returning first result.\".format(node_name))\n\n # e.g. [(10,), (11,)] => [10, 11]\n return [x[0] for x in res]", "def find_all_sub(self, sub):\n try:\n return [i for i in range(len(self.__dna)) if self.__dna.startswith(sub, i)]\n except ValueError:\n raise ValueError", "def get_child_ids(forum):\n forum_ids = [forum.id]\n if forum.children:\n for child in forum.children:\n forum_ids.extend(\n get_child_ids(child) # Get the children from the children\n )\n return forum_ids", "def getItemsInContainer(elm):\n items = []\n items.extend(getAllChildrenWithTagName(elm, \"action\"))\n items.extend(getAllChildrenWithTagName(elm, \"container\"))\n switches = getAllChildrenWithTagName(elm, \"switch\")\n for sw in switches:\n items.extend(getAllChildrenWithTagName(sw, \"action\"))\n items.extend(getAllChildrenWithTagName(sw, \"container\"))\n return items", "def _subnode_ids(self):\n for ticket in self:\n ticket.subnode_ids = self.search([\n ('parent_id', '=', ticket.id),\n ('type.has_children', '=', True)])", "def get_dependent_control_ids(self, control_id: str) -> List[str]:\n children: List[str] = []\n control = self.get_control(control_id)\n if control.controls:\n new_controls = self._get_all_controls_in_list(control.controls, True)\n children.extend([con.id for con in new_controls])\n return children", "def find_words(root: TrieNode):\n found = []\n\n if root.word:\n found.append(root.data)\n\n for child in root.children:\n for el in find_words(child): \n found.append(el)\n\n return found", "def query_parent(selectors, tree_item):\n return [subitem for subitem in iterate_parent(tree_item)\n if all(selectors, subitem)]", "def positions(self, searchstr: str):\n out = []\n for x in range(0, len(self.sa)):\n sub = self.sa[x]\n if searchstr == sub[0:len(searchstr)]:\n out.append(x)\n return out\n \n pass", "def lookup(root: dict, query: str, exact: bool = False) -> List[Set[int]]:\n if not query:\n return set()\n\n word_ids: List[Set[int]] = [] # ids of items that correspond to query\n for word in preprocess_words(query):\n node = root\n for c in word:\n node: Optional[dict] = node.get(c) # type: ignore\n if not node:\n # dead-end for this word\n word_ids.append(set())\n break\n else:\n word_ids.append(collect(node, exact))\n\n return word_ids", "def get_ids_from_container_name(base_container_name):\n container_prefix = base_container_name + \".\"\n lines = dockercall(\"ps\", \"-a\").splitlines()\n ids = []\n for line in lines[1:]:\n parts = line.strip().split()\n id = parts[0]\n name = parts[-1]\n if name == base_container_name or name.startswith(container_prefix):\n ids.append((name, id)) # name first, for sorting\n ids.sort()\n return {id: name for name, id in ids}", "def extract_events(text: str) -> (List[str], List[str]):\n alert_ids = re.findall(r'\"anomalous\"(?:.+\\n.+)+\"tsId\"\\s:\\s\"(.+)\"', text)\n clear_ids = re.findall(r'\"ok\"(?:.+\\n.+)+\"tsId\"\\s:\\s\"(.+)\"', text)\n return alert_ids, clear_ids", "def get_element_indices_within_rectangle(self, xmin, xmax, zmin, zmax):\n centroids = self.get_element_centroids()\n indices_list = []\n for nr, (x, z) in enumerate(centroids):\n if x >= xmin and x <= xmax and z >= zmin and z <= zmax:\n indices_list.append(nr)\n return np.array(indices_list)", "async def get_child_ids(db, post_id):\n sql = \"SELECT id FROM hive_posts WHERE parent_id = :id AND is_deleted = '0'\"\n return await db.query_col(sql, id=post_id)", "def get_children(pid):\n try:\n stdout=subprocess.check_output([\"ps\",\"--ppid\",pid,\"-o\",\"pid\"])\n except subprocess.CalledProcessError:\n stdout=[]\n\n pids=[]\n if stdout:\n pids=process_ps_stdout(stdout)\n\n return pids", "def getTestsIds():\n with driver.session() as s:\n ids = s.write_transaction(getTestsId)\n\n tIds = []\n for idEl in ids:\n tIds.append(idEl[\"ID(t)\"])\n\n return tIds", "def subwords(txt, sub):\n txt = txt.lower()\n txt = txt.replace('’', '\\'')\n sub = sub.lower().replace(' ', '')\n it = 0\n indices = []\n for c in sub:\n try:\n while txt[it] != c:\n it += 1\n indices.append(it)\n except (IndexError):\n print('Cannot find secret in text.')\n return []\n return indices", "def get_element_commit_ids(sysmlId):\n elements = get_elements_from_elasticsearch(sysmlId=sysmlId)\n commits = []\n for element in elements:\n commits.append(element[\"_source\"][\"_commitId\"])\n return commits", "def Find(self, children, sink):\n\n tkns = [];\n for child in children:\n key = child.word;\n if not child.word: key = child.tag;\n tkns.append(key);\n self.FindFromTokens(tkns, sink);", "def getContentTransito(self, chave):\n content = self.tree.xpath(\"string(//div[@id='%s'])\" % chave).split(' ')[0]\n return content", "def get_text_reply_ids(self):\n if not self._linked_comment:\n return []\n replies = Comment.objects.filter(replied_comment=self._linked_comment)\n return [ids[0] for ids in replies.exclude(reply_text='').order_by('id').values_list('id')]", "def find_label_element(self, label_text, container):\n element = next(iter(list(map(lambda x: self.find_first_div_parent(x), container.find_all(text=re.compile(f\"^{re.escape(label_text)}\" + r\"(\\*?)(\\s*?)$\"))))), None)\n if element is None:\n return []\n\n return element\n\n '''\n next_sibling = element.find_next_sibling(\"input\")\n # next_input = element.contents.find(\"input\")\n if next_sibling:\n # if next_input:\n return [next_sibling]\n # return [next_input]\n else:\n return []\n '''", "def getElementIds(self):\n\t\ttry:\n\t\t\treturn self.collector.ToElementIds()\n\t\texcept:\n\t\t\tself.all()\n\t\t\treturn self.collector.ToElementIds()", "def doFindAll(self, str):\n matches = []\n for value in self.doId2do.values():\n if repr(value).find(str) >= 0:\n matches.append(value)\n return matches", "def get_searchable_content(self, value):\n content = []\n\n if value:\n for child in value:\n content.extend(child.block.get_searchable_content(child.value))\n\n return content", "def members(self, x):\n root = self.find(x)\n return [i for i in range(self.n) if self.find(i) == root]", "def get_camp_ids_containing_str(marketer_id, string):\n all_campaigns = outb.get_campaigns_per_marketer(marketer_id).get(marketer_id[0])\n return [x.get(\"id\") for x in all_campaigns if string in x[\"name\"]]", "def containing(letter, text):\n return([word for word in text if word.count(letter) >= 1])", "def load_xml_text_data(parent_element, text_list_name):\n data_from_text = []\n list_item_name = get_singular_from_plural(text_list_name)\n\n for text_element in parent_element.findall(list_item_name):\n new_data = convert_to_int_if_numeric(text_element.text)\n data_from_text.append(new_data)\n\n return data_from_text", "def find_parents(self, tagname):\n res = []\n if self._tagname == tagname:\n res = [self]\n if self._parent is not None:\n res += self._parent.find_parents(tagname)\n return res", "def findAllInstances(text, term):\n index = 0 - len(term)\n text = text.lower()\n term = term.lower()\n try:\n while True:\n index = text.index(term, index + len(term))\n yield index\n except ValueError:\n pass", "def get_electrode_indeces(electrical_series, electrode_ids):\n electrode_table_region = list(electrical_series.electrodes.to_dataframe().index)\n return [elect_idx for elect_idx, elect_id in enumerate(electrode_table_region) if elect_id in electrode_ids]", "def children_ids(self):\n return self._children_ids", "def get_ordered_ids(tree):\n ordered_ids = []\n ordered_ids.extend(id(node) for node in tree.gen_tips())\n ordered_ids.extend(id(node) for node in tree.gen_internal_nodes())\n return ordered_ids", "def parentsUntil(self,selector):\n nList = self.htmlDom.find(selector)\n parentsList = []\n tmpList = []\n selectedNodesList = []\n for node in self.nodeList:\n if not node.ancestorList:\n node.generateAncestorList()\n tmpList = node.ancestorList\n for selectedNode in nList.nodeList:\n try:\n index = tmpList.index( selectedNode )\n selectedNodeList = tmpList[:index]\n parentsList += self.getUniqueNodes( parentsList, selectedNodeList )\n break\n except ValueError:\n pass\n else:\n parentsList += self.getUniqueNodes( parentsList, tmpList )\n parentsList = sorted( parentsList, key = lambda x: x.pos )\n return HtmlNodeList( parentsList, self.htmlDom, self.nodeList, self )", "def position(element1, root=None):\n \n position = [] \n current = element1\n while (current.getparent() is not None) and (current is not root):\n parent = current.getparent()\n #find the index of current under parent\n index = 0\n for i in parent:\n if i is current: break\n index += 1\n position.insert(0, index + 1)\n current = parent\n \n position.insert(0, 1) # for the root element\n return position", "def search(self, term):\n results = set() # Set of elements matching search term.\n element = [] # Current element reached in search.\n def _search(m, node, i):\n # Having just matched m, search for term[i:] starting at node.\n element.append(m)\n if i == len(term):\n if node._end:\n results.add(''.join(element))\n elif term[i] == '*':\n _search('', node, i + 1)\n for k, child in node._children.items():\n _search(k, child, i)\n elif term[i] in node._children:\n _search(term[i], node._children[term[i]], i + 1)\n element.pop()\n _search('', self, 0)\n return results", "def find_all(st, sub):\n\n if not sub: return None\n if sub[0] not in st.root.trans: return None\n \n found, i, s = False, 0, st.root\n scaned = 0 # length of the scaned\n while True:\n k, p, s = s.trans[sub[i]]\n len1, len2 = p-k+1, len(sub)-i\n if len1 >= len2:\n if st.text[k:k+len2] == sub[i:]:\n found, scaned = True, scaned+len1\n break\n else:\n if st.text[k:k+len1] == sub[i:i+len1]:\n i, scaned = i+len1, scaned+len1\n else: break\n if found:\n # shift_of_suffix = len(st.text) - len(suffix)\n leaf_depthes = get_leaf_depthes(s)\n return [len(st.text)-x-scaned for x in leaf_depthes]\n\n return None", "def get_layer_ids(element):\n \n st='./gbxml:LayerId/@layerIdRef'\n return element.xpath(st,namespaces=ns)", "def subnotebook_get_titles_ids(self):\n tabs = dict()\n for tab_id in range(0, self.subnotebook.index(\"end\")):\n tabs[self.subnotebook.tab(tab_id, \"text\")] = tab_id\n logger.debug(tabs)\n return tabs", "def get_parent(self, element):\n return element.find_elements_by_class_name(\"wrap-text\")[2].get_attribute(\"innerHTML\").strip()", "def get_descendant_elements(self, xpath) -> list:\n tmp_xpath = self._chain_xpath(xpath)\n tmp_loc = (By.XPATH, tmp_xpath)\n return self._wait.until(EC.visibility_of_all_elements_located(tmp_loc))", "def getItemsInDialog(elm):\n items = []\n items.extend(getAllChildrenWithTagName(elm, \"control\"))\n return items", "def get_cpd_ids(string):\n return [x for x in string.split(\" \") if x.startswith(\"C\")]", "def find(self, word):\n currnode = self.root\n\n for letter in word:\n if letter not in currnode.children:\n return Set()\n currnode = currnode.children[letter]\n\n return currnode.pages", "def text_to_id(text, word_to_id_dict):\n return [word_to_id_dict[word] for word in text.split(\" \") if word in word_to_id_dict]", "def find_elements_by_partial_text(self,param,ignore_error_handle = False):\n message = {};\n step = 'find elements by partial text:' + str(param.get('partial_text',None));\n partial_text = str(param.get('partial_text',None));\n try:\n elements = self.driver.find_elements_by_partial_text(partial_text);\n message = self.feedback.feedback_action_ok(step);\n message['elements'] = elements;\n except BaseException,e:\n message = self.feedback.feedback_action_fail(step,str(e),ignore_error_handle)\n finally:\n return message;", "def doFindAllMatching(self, str):\n matches = []\n for value in self.doId2do.values():\n if re.search(str,repr(value)):\n matches.append(value)\n return matches", "def getSiblings():", "def find_all( source, substring, start=None, end=None, overlap=False ):\n return [x for x in find_all_iter( source, substring, start, end, overlap )]", "def subtree_matching(self, subtree):\n\t\t#TODO implement this in a faster way\n\t\ttext = self.preorder_traverse_to_list()\n\t\tpattern = subtree.preorder_traverse_to_list()\n\n\t\tprint text\n\t\tprint pattern\n\n\t\tmatches = []\n\t\tfor i in range(len(text)):\n\t\t\tif text[i:i+len(pattern)] == pattern:\n\t\t\t\tmatches.append(i)\n\t\treturn matches", "def get_child_indices(idx: int):\n return 2 * idx + 1, 2 * idx + 2", "def parents( self, selector = None ): \n tmpList = []\n for node in self.nodeList:\n if not node.ancestorList:\n node.generateAncestorList()\n tmpList += self.getUniqueNodes( tmpList, node.ancestorList )\n if selector:\n return HtmlNodeList( tmpList, self.htmlDom, self.nodeList, self ).filter( selector )\n else:\n tmpList = sorted( tmpList, key = lambda x: x.pos ) \n return HtmlNodeList( tmpList, self.htmlDom, self.nodeList, self)", "def search_id(root, pid):\n for page in root.iter('page'):\n if pid == int(page.find('id').text):\n return page.find('revision').find('text').text", "def findall(l, o):\n return [i for i, u in enumerate(l) if u==o]", "def getIndexes(string1,string2):\n ret = []\n ind = string1.find(string2)\n \n while (ind > -1 and ind < len(string1)):\n ret.append(ind)\n ind = string1.find(string2,ind + 1)\n \n return ret", "def findInstance(self, text, term):\n\t\tindexList = set()\n\t\tindex = 0\n\t\ttext = text.upper()\n\t\tterm = \" {0} \".format(term.upper())\n\n\t\t# CALL THESE JUST ONCE BEFORE LOOP(S)\n\t\tadd = indexList.add\n\t\tfind = text.find\t \n\t\t# - - - - - - - - - - - - - - - - - -\n\n\t\twhile True:\n\t\t\tindex = find(term, index)\n\t\t\tif index == -1: \n\t\t\t\treturn sorted(indexList)\n\t\t\telse:\n\t\t\t\tadd(index + len(term[1:-1]) + 1)\n\t\t\t\tadd(index + 1)\n\t\t\t\tindex += len(term)" ]
[ "0.55548036", "0.5479126", "0.54318756", "0.54250884", "0.53954", "0.5378344", "0.5358235", "0.532862", "0.53090286", "0.52930194", "0.5247154", "0.5223819", "0.5181133", "0.51650614", "0.5156875", "0.51566947", "0.512827", "0.5109152", "0.5081774", "0.5059653", "0.5021192", "0.5018383", "0.49994254", "0.49627846", "0.49329385", "0.49291104", "0.49055174", "0.48959976", "0.48937145", "0.48680916", "0.48606366", "0.48477486", "0.48307118", "0.48272884", "0.4819024", "0.48097506", "0.4803559", "0.47849387", "0.4773739", "0.4770679", "0.4759471", "0.47543085", "0.47460446", "0.4744075", "0.47312298", "0.47254068", "0.4703863", "0.46998802", "0.46952975", "0.46943995", "0.4664719", "0.46624112", "0.46577168", "0.46492475", "0.46402675", "0.4635736", "0.46207464", "0.46125162", "0.4612055", "0.46108004", "0.46103936", "0.46087077", "0.4606447", "0.4606436", "0.46055916", "0.46036246", "0.45992762", "0.45916694", "0.4588196", "0.45869404", "0.45831412", "0.45823866", "0.4575483", "0.45727614", "0.45690903", "0.4565167", "0.45602903", "0.45587212", "0.45563397", "0.4555641", "0.45471898", "0.45455977", "0.45391563", "0.45365098", "0.45244417", "0.4522172", "0.45189717", "0.45116797", "0.45094287", "0.45069525", "0.4504568", "0.44896686", "0.4486609", "0.44787568", "0.44622216", "0.44571167", "0.4456627", "0.4456235", "0.44476113", "0.4439035" ]
0.877173
0
Wait for scripts to finish and then return the contents of the ``output`` div on the page.
def output(self): return super(RequireJSPage, self).output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wait(self):\n\t\tself.wait_window(self)\n\t\treturn self.result", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def trigger_output(self):\n\n EmptyPromise(self.q(css='div#ready').is_present, \"Click ready\").fulfill()\n self.q(css='div#fixture button').first.click()\n EmptyPromise(self.q(css='div#output').is_present, \"Output available\").fulfill()", "def wait_for_ajax_complete():\r\n javascript = \"\"\"\r\n var callback = arguments[arguments.length - 1];\r\n if(!window.jQuery) {callback(false);}\r\n var intervalID = setInterval(function() {\r\n if(jQuery.active == 0) {\r\n clearInterval(intervalID);\r\n callback(true);\r\n }\r\n }, 100);\r\n \"\"\"\r\n # Sometimes the ajax when it returns will make the browser reload\r\n # the DOM, and throw a WebDriverException with the message:\r\n # 'javascript error: document unloaded while waiting for result'\r\n for _ in range(5): # 5 attempts max\r\n try:\r\n result = world.browser.driver.execute_async_script(dedent(javascript))\r\n except WebDriverException as wde:\r\n if \"document unloaded while waiting for result\" in wde.msg:\r\n # Wait a bit, and try again, when the browser has reloaded the page.\r\n world.wait(1)\r\n continue\r\n else:\r\n raise\r\n return result", "def wait(self):\n\n for output in self.proc.communicate():\n if output is not None:\n self.output += output", "def waitOutput( self, verbose=False ):\n log = info if verbose else debug\n output = ''\n while self.waiting:\n data = self.monitor()\n output += data\n log( data )\n return output", "def output(self):\n\t\tif (self.isLoaded()):\n\t\t\treturn self.loader.output()", "def wait_ajax(self, lib='JQUERY', timeout=30):\n page_logger.debug('Waiting for AJAX using %s' % lib)\n js = self.wait_ajax_script.get(lib, 'return true;')\n WebDriverWait(self.driver, timeout).until(\n lambda driver: driver.execute_script(js))", "def run_javascript_with_result(self, code_str):\n assert self._status is self.WindowStatus.READY, 'This method can only be called after the window is ready'\n\n _uniq_key = hex(hash(f'{time.time()}-{random()}'))\n self.call_engine_function('executeThenPoll', _uniq_key, code_str)\n return RuntimeManager.get_instance().JavascriptReturned.wait_for_value(_uniq_key)", "def finished(self):\n qbytearray = self.process.readAllStandardOutput()\n locale_codec = QTextCodec.codecForLocale()\n output = to_text_string(locale_codec.toUnicode(qbytearray.data()))\n testresults = self.load_data()\n self.sig_finished.emit(testresults, output)", "def submit_scripts(self, out):\n program_folder = os.path.join(out, self.out)\n for config in self.configurations:\n config.submit_script(program_folder)\n return None", "def wait_for_page_load(self):\n pass", "def await_simulation(self):\n\n # Test if overall statistics can be requested\n while True:\n d = self.BIVAS_API.get_output_overallstatistics(self.scenarioID)\n\n if (d is not None) and (d.status_code == 200):\n break\n\n logger.info('Waiting for BIVAS to finish...')\n time.sleep(60)\n\n logger.info(d.text)\n\n logger.info('Finished!')\n\n # Close BIVAS\n logger.info('Closing BIVAS')\n os.system('taskkill /f /im Bivas.exe')\n time.sleep(5)", "def complete_output(self):\n if self.stdout0:\n sys.stdout = self.stdout0\n sys.stderr = self.stderr0\n self.stdout0 = None\n self.stderr0 = None\n return self.outputBuffer.getvalue()", "def complete_output(self):\n if self.stdout0:\n sys.stdout = self.stdout0\n sys.stderr = self.stderr0\n self.stdout0 = None\n self.stderr0 = None\n return self.outputBuffer.getvalue()", "def complete_output(self):\n if self.stdout0:\n sys.stdout = self.stdout0\n sys.stderr = self.stderr0\n self.stdout0 = None\n self.stderr0 = None\n return self.outputBuffer.getvalue()", "def complete_output(self):\n if self.stdout0:\n sys.stdout = self.stdout0\n sys.stderr = self.stderr0\n self.stdout0 = None\n self.stderr0 = None\n return self.outputBuffer.getvalue()", "def execute_script(self):\n\n # render script variables\n script = self.replable.render_script_from_flo(self.flo, **self.template_engine_kwargs)\n\n # run over script lines\n for cmd in script.split(\"\\n\"):\n\n # no empty lines\n if cmd:\n\n self.brief_logger.info(cmd)\n if self.verbose_logger and self.log_file_echo_command:\n self.verbose_logger.info(\"$> '%s'\", cmd)\n\n # execute command\n cmd = cmd + \"\\n\"\n self.sock.send(cmd.encode())\n\n res = self.wait_for_command_execution(timeout=self.timeout)\n # read all data which is not covered by the regex used for stream searching\n # TODO: use loop here?!\n res += read_remaining_data(self.sock, SOCKET_READ_BUF_SIZE)\n\n # apply the custom check function\n if self.return_value_checker is not None:\n try:\n self.return_value_checker(cmd, res)\n except Exception as e:\n raise REPLUnexpectedResult(\n \"The following output is unexpected to the method `return_value_checker`:\\n%s\" % res,\n caused_by=e)\n\n yield res", "def execute():\n # print('Wow')\n result = gui.controller.main('execute')\n print(result)\n\n return render_template('results.html', data=json.dumps(result))", "def execute_script(self, script, asynchronous=False):\n pass", "def wait(self):\n self.mainloop().wait()", "def execute_javascript(self, code):\n return self.loop.run_until_complete(self.async_func.execute_javascript_async(code))", "def evaluate_in_page(self, js_string: str) -> Awaitable[Any]:", "def eighth_page_execution(self):\n self.errors_and_correct_input_values_helper()\n self.ninth_page.wait_for_page()", "def script_content(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"script_content\")", "def end_rendering(self, output):\n if self.wrapper_to_generate:\n output = self.div(output, id=self.id, class_='nagare-generated nagare-async-view')\n\n return output", "def wait_for_requirejs(dependencies=None):\r\n if not dependencies:\r\n dependencies = [\"jquery\"]\r\n # stick jquery at the front\r\n if dependencies[0] != \"jquery\":\r\n dependencies.insert(0, \"jquery\")\r\n\r\n javascript = \"\"\"\r\n var callback = arguments[arguments.length - 1];\r\n if(window.require) {{\r\n requirejs.onError = callback;\r\n var unloadHandler = function() {{\r\n callback(\"unload\");\r\n }}\r\n addEventListener(\"beforeunload\", unloadHandler);\r\n addEventListener(\"unload\", unloadHandler);\r\n require({deps}, function($) {{\r\n setTimeout(function() {{\r\n removeEventListener(\"beforeunload\", unloadHandler);\r\n removeEventListener(\"unload\", unloadHandler);\r\n callback(true);\r\n }}, 50);\r\n }});\r\n }} else {{\r\n callback(false);\r\n }}\r\n \"\"\".format(deps=json.dumps(dependencies))\r\n for _ in range(5): # 5 attempts max\r\n try:\r\n result = world.browser.driver.execute_async_script(dedent(javascript))\r\n except WebDriverException as wde:\r\n if \"document unloaded while waiting for result\" in wde.msg:\r\n result = \"unload\"\r\n else:\r\n raise\r\n if result == \"unload\":\r\n # we ran this on the wrong page. Wait a bit, and try again, when the\r\n # browser has loaded the next page.\r\n world.wait(1)\r\n continue\r\n elif result not in (None, True, False):\r\n # We got a require.js error\r\n # Sometimes requireJS will throw an error with requireType=require\r\n # This doesn't seem to cause problems on the page, so we ignore it\r\n if result['requireType'] == 'require':\r\n world.wait(1)\r\n continue\r\n\r\n # Otherwise, fail and report the error\r\n else:\r\n msg = \"Error loading dependencies: type={0} modules={1}\".format(\r\n result['requireType'], result['requireModules'])\r\n err = RequireJSError(msg)\r\n err.error = result\r\n raise err\r\n else:\r\n return result", "def wait_for_load(driver):\n html = driver.page_source\n time.sleep(0.5)\n while html != driver.page_source:\n html = driver.page_source\n time.sleep(0.5)", "def waitUntilSuccess():", "def output(self):\n text_list = self.q(css='#output').text\n\n if len(text_list) < 1:\n return None\n return text_list[0]", "def getscript(self, name):\n code, data, content = self.__send_command(\n \"GETSCRIPT\", [name.encode(\"utf-8\")], withcontent=True)\n if code == \"OK\":\n lines = content.splitlines()\n if self.__size_expr.match(lines[0]) is not None:\n lines = lines[1:]\n return u\"\\n\".join([line.decode(\"utf-8\") for line in lines])\n return None", "async def wait_until_done(self) -> None:\n ...", "def get_content(self):\n url = self.build_url()\n try:\n self.content_page = requests.get(url)\n if not(self.content_page.status_code == requests.codes.ok):\n self.content_page.raise_for_status()\n except requests.exceptions.RequestException as ex:\n logging.info('A requests exception has ocurred: ' + str(ex))\n logging.error(traceback.format_exc())\n sys.exit(0)", "def get_results_from_script(self, script):\n raise NotImplementedError()", "def wait(self, timeout: float = None) -> CompletedProcess: # type: ignore\n if self.stdout is None:\n return CompletedProcess(self.args, returncode=super().wait(timeout=timeout), stdout=None)\n else:\n stdout = []\n while self.poll() is None:\n stdout.append(line := self.stdout.readline())\n\n if self.verbose:\n print(line, end=\"\")\n\n return CompletedProcess(self.args, returncode=self.poll(), stdout=\"\".join(stdout))", "def wait_until_finished(self):\n for processor in self._processors.values():\n while not processor.done:\n time.sleep(0.1)", "def wait():\n pass", "def run_async(self) -> StoryHolderDict:\n self.add_futures(self.j_dict)\n loop = asyncio.get_event_loop()\n get_url_futures = asyncio.gather(\n *[f for f in self.responses.values()])\n find_text_futures = asyncio.gather(\n *[f for f in self.find_futures_list])\n\n final_future = asyncio.gather(get_url_futures, find_text_futures)\n\n if not run_from_ipython:\n loop.run_until_complete(final_future)\n else:\n asyncio.ensure_future(final_future)\n return NewsDump.story_dump", "def wait_until_loading_is_complete(self, locator=None):\n locator = lex_locators[\"body\"] if locator is None else locator\n try:\n self.selenium.wait_until_page_contains_element(locator)\n self.wait_for_aura()\n # this knowledge article recommends waiting a second. I don't\n # like it, but it seems to help. We should do a wait instead,\n # but I can't figure out what to wait on.\n # https://help.salesforce.com/articleView?id=000352057&language=en_US&mode=1&type=1\n time.sleep(1)\n\n except Exception:\n try:\n self.selenium.capture_page_screenshot()\n except Exception as e:\n self.builtin.warn(\"unable to capture screenshot: {}\".format(str(e)))\n raise", "def submit_scripts(self, out):\n program_folder = os.path.join(out, self.out)\n for config in self.configurations:\n config.submit_CaVEMan_scripts(\n program_folder, self.path2exe, self.ref_fai, self.file1, self.file2,\n self.config_file, self.qsub_dir, self.mstep_script, self.merge_script, self.estep_script\n )\n return None", "def _get_output(arguments, timeout=None):\n # NOTE Increase this value if tests fail with None being received as\n # stdout/stderr instead of the expected content\n output_timeout = 0.1 # seconds\n\n pidq = Queue()\n outputq = Queue()\n\n t = Thread(target=_queue_output, args=(arguments, pidq, outputq))\n t.daemon = True\n t.start()\n\n try:\n pid = pidq.get(timeout=timeout)\n except Empty:\n pid = None\n\n # Process crashed or timed out for some reason\n if pid is None:\n return _retrieve_output(t, output_timeout, outputq,\n \"TaskWarrior to start\")\n\n # Wait for process to finish (normal execution)\n state = wait_process(pid, timeout)\n\n if state:\n # Process finished\n return _retrieve_output(t, output_timeout, outputq,\n \"TaskWarrior thread to join\")\n\n # If we reach this point we assume the process got stuck or timed out\n for sig in (signal.SIGABRT, signal.SIGTERM, signal.SIGKILL):\n # Start with lower signals and escalate if process ignores them\n try:\n os.kill(pid, signal.SIGABRT)\n except OSError as e:\n # ESRCH means the process finished/died between last check and now\n if e.errno != errno.ESRCH:\n raise\n\n # Wait for process to finish (should die/exit after signal)\n state = wait_process(pid, timeout)\n\n if state:\n # Process finished\n return _retrieve_output(t, output_timeout, outputq,\n \"TaskWarrior to die\")\n\n # This should never happen but in case something goes really bad\n raise OSError(\"TaskWarrior stopped responding and couldn't be killed\")", "def _exec_and_wait(command):\n proc = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)\n proc.wait()\n return proc.stdout.read()", "def get_test_results(self):\n element = self.find_element_by_id(self.results_id, wait=True)\n\n if element:\n return element.text\n else:\n return False", "def wait() -> None:\n\n process_input(input())", "def Wait(self):\n try:\n # Flush stdout and stderr to be sure no output is interleaved.\n sys.stdout.flush()\n sys.stderr.flush()\n\n # File position pointers are shared across processes, so we must open\n # our own file descriptor to ensure output is not lost.\n self._WaitForStartup()\n silent_death_time = time.time() + self.SILENT_TIMEOUT\n results = []\n with open(self._output.name, 'r') as output:\n pos = 0\n running, exited_cleanly, task_errors, all_errors = (True, False, [], [])\n while running:\n # Check whether the process is still alive.\n running = self.is_alive()\n\n try:\n errors, results = \\\n self._queue.get(True, self.PRINT_INTERVAL)\n if errors:\n task_errors.extend(errors)\n all_errors.extend(errors)\n\n running = False\n exited_cleanly = True\n except Queue.Empty:\n pass\n\n if not running:\n # Wait for the process to actually exit. If the child doesn't exit\n # in a timely fashion, kill it.\n self.join(self.EXIT_TIMEOUT)\n if self.exitcode is None:\n msg = '%r hung for %r seconds' % (self, self.EXIT_TIMEOUT)\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessExitTimeout(msg), ''))\n self._KillChildren([self])\n elif not exited_cleanly:\n msg = ('%r exited unexpectedly with code %s'\n % (self, self.exitcode))\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessUnexpectedExit(msg), ''))\n\n # Read output from process.\n output.seek(pos)\n buf = output.read(_BUFSIZE)\n\n if len(buf) > 0:\n silent_death_time = time.time() + self.SILENT_TIMEOUT\n elif running and time.time() > silent_death_time:\n msg = ('No output from %r for %r seconds' %\n (self, self.SILENT_TIMEOUT))\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessSilentTimeout(msg), ''))\n self._KillChildren([self])\n\n # Read remaining output from the process.\n output.seek(pos)\n buf = output.read(_BUFSIZE)\n running = False\n\n # Print output so far.\n while len(buf) > 0:\n sys.stdout.write(buf)\n pos += len(buf)\n if len(buf) < _BUFSIZE:\n break\n buf = output.read(_BUFSIZE)\n\n # Print error messages if anything exceptional occurred.\n if len(all_errors) > len(task_errors):\n logging.PrintBuildbotStepFailure()\n msg = '\\n'.join(x.str for x in all_errors if x)\n logging.warning(msg)\n traceback.print_stack()\n\n sys.stdout.flush()\n sys.stderr.flush()\n\n # Propagate any results.\n for result in results:\n results_lib.Results.Record(*result)\n\n finally:\n self.Cleanup(silent=True)\n\n # If an error occurred, return it.\n return all_errors", "def second_page_execution(self):\n self.errors_and_correct_input_values_helper()\n self.third_page.wait_for_page()", "def third_page_execution(self):\n self.errors_and_correct_input_values_helper()\n self.fourth_page.wait_for_page()", "def _wait_for_output(self):\n # Here we should get an empty list or list with a tuple [(fd, event)]\n # When we get list with a tuple we can use readline method on\n # the file descriptor.\n poll_result = self.poll_obj.poll(0)\n\n if poll_result:\n line = self.output().readline()\n if self._banner.match(line):\n return True\n\n return False", "def runScript(self, commands):\n sem = defer.DeferredSemaphore(1)\n dl = [sem.run(self.runCommand, command) for command in commands]\n return defer.gatherResults(dl)", "def reload_and_trigger_output(self):\n self.browser.refresh()\n self.wait_for_js() # pylint: disable=no-member\n self.q(css='div#fixture button').first.click()", "def wait_for_version_reply(self):\n frontends = self.get_frontends()\n for frontend in frontends:\n # we abuse this function:\n while frontend.get_afo_state() != AfoServerState.LEADER:\n progress(\".\")\n time.sleep(0.1)", "def make_all_html_results(cmd, folder_names = [], jobs=[]):\n run = cmd.results.current['run_name']\n if not os.path.exists(pjoin(cmd.me_dir, 'HTML', run)):\n os.mkdir(pjoin(cmd.me_dir, 'HTML', run))\n \n unit = cmd.results.unit\n P_text = \"\" \n Presults = collect_result(cmd, folder_names=folder_names, jobs=jobs)\n \n for P_comb in Presults:\n P_text += P_comb.get_html(run, unit, cmd.me_dir) \n P_comb.compute_values()\n if cmd.proc_characteristics['ninitial'] == 1:\n P_comb.write_results_dat(pjoin(cmd.me_dir, 'SubProcesses', P_comb.name,\n '%s_results.dat' % run))\n \n Presults.write_results_dat(pjoin(cmd.me_dir,'SubProcesses', 'results.dat')) \n \n fsock = open(pjoin(cmd.me_dir, 'HTML', run, 'results.html'),'w')\n fsock.write(results_header)\n fsock.write('%s <dl>' % Presults.get_html(run, unit, cmd.me_dir))\n fsock.write('%s </dl></body>' % P_text)\n\n return Presults.xsec, Presults.xerru", "def wait():\n time.sleep(1)", "def run(self):\n\n try:\n # Get the content from this page\n if self.verbose:\n print \"Getting page content for '%s'\" % self.url.strip()\n \n content = getPageContent(self.url)\n\n # Verify that this is not binary data\n if content is not None and isHTML(content):\n\n\n # Extract basic data about this result\n content = content.lower()\n title, keywords, description = parseMetaDataFromContent(content)\n headers = parseHeaderInformationFromContent(content)\n\n # Add this result data\n self.resultDictionary['title'] = title\n self.resultDictionary['keywords'] = keywords\n self.resultDictionary['description'] = description\n self.resultDictionary['content'] = content\n self.resultDictionary['headers'] = headers\n\n # Run the extensions\n for extension in self.extensions:\n extension.run(self.resultDictionary)\n\n\n except URLError:\n\n # Skip this URL, and register it as an error on the cache\n if self.verbose:\n print(\"Error accessing '%s', %s\" % (self.url.strip(), str(sys.exc_info()[1]).strip()))", "def get_page_content(self, url, delay):\r\n\r\n # if browser cannot connect to the server, repeat it infinitely.\r\n while True:\r\n try:\r\n # load the page\r\n self.sel_driver.get(url)\r\n\r\n # if the page is loaded, wait for delay seconds until loading would finish.\r\n # this delay is also to avoid being blocked by upwork due to so frequent access\r\n time.sleep(delay)\r\n\r\n # read and parse the page contents\r\n soup = BeautifulSoup(self.sel_driver.page_source, 'html.parser')\r\n\r\n # page loading succeeded. escape from the endless iteration\r\n break\r\n except (WebDriverException, TimeoutException):\r\n # error occurred, do it again\r\n print(\"(ERROR) Driver could't be load: \", time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\r\n self.relaunch(60)\r\n\r\n # check if the page is ACCESS DENIED\r\n # get the title of the page\r\n elements = soup.find_all(\"title\")\r\n if len(elements) == 0:\r\n return soup # if it has no title, it's may be a normal page\r\n\r\n # if the title is UPWORK ACCESS DENIED, I deal with it\r\n title = elements[0].text\r\n if 'access denied' in title.lower():\r\n print(\"(ERROR) UPWORK DENIED at \", time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\r\n\r\n self.relaunch(200) # relaunch after about 3 minutes\r\n\r\n return self.get_page_content(url, delay)\r\n\r\n # if the title is Upwork - Maintenance, let it wait\r\n if title == 'Upwork - Maintenance':\r\n print(\"(ERROR) UPWORK is under the Maintenance - \", time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\r\n time.sleep(random.randint(200, 400)) # We don't need relaunch browser.\r\n return self.get_page_content(url, delay)\r\n\r\n return soup", "def run(self, script, **kwargs):\r\n # don't return a value from a script\r\n kwargs['nout'] = 0\r\n return self.call(script, **kwargs)", "def wait(self):\n self.__prcs.wait()\n return self.poll()", "def get(self, url):\n\t\ttry:\n\t\t\tassert(type(url)) == str\n\t\t\tself.driver.get(url)\n\t\t\t# sleep(1) # Even tho driver.get is blocking, it returns as soon as DOM loads, without waiting for JS to run and update the DOM with the new elements\n\t\t\t# wait(self.driver, 10).until( EC.visibility_of_element_located() ) # Not sure how to wait here efficiently\n\t\t\tsleep(5) # A little long, but without a conditional variable to tell us when the page is ready us when to go our only choice is to nap\n\t\t\tself.bsource = bs( self.viewSource(), \"lxml\" ) # Update internal BeautifulSoup source with new javascript-encriched code (\"lxml\" is faster that \"html.parser\")\n\t\texcept Exception as e:\n\t\t\tprint(\"[*] Unable to GET page {}\\n{}\".format(url, e))\n\t\t\treturn -1", "def wait(t,p):\n\toutput_list = []\n\tc = ''\n\td = ''\n\twhile p not in d:\n\t\tc = t.read_very_eager()\n\t\tif len(c) > 0:\n\t\t\td += c\n\t\t\tprint c\n\t\t\toutput_list.append(c)\n\t\tif \"Press any key to continue\" in c or \"--More--\" in c:\n\t\t\tt.write(\" \")\n\toutput_list = ((''.join(output_list)).replace('\\r\\n','\\n')).split('\\n')\n\treturn output_list", "def run(self):\n yield from self._dts.ready.wait()", "def fourth_page_execution(self):\n self.errors_and_correct_input_values_helper()\n self.fifth_page.wait_for_page()", "def wait_for_page_load(self, timeout=30):\n old_page = self.driver.find_element_by_tag_name('html')\n yield\n WebDriverWait(self.driver, timeout).until(\n staleness_of(old_page)\n )", "def wait_page_loaded(self, timeout=10):\n from selenium.webdriver.common.by import By\n from selenium.webdriver.support import expected_conditions as ec\n\n old_page = self.selenium.find_element(By.TAG_NAME, \"html\")\n yield\n # Wait for the next page to be loaded\n self.wait_until(ec.staleness_of(old_page), timeout=timeout)\n self.wait_page_ready(timeout=timeout)", "def wait_till_read_out():\n\n\trespond = send_command('waitreadout')", "def get_stdout(self):\n _ = self.get() # force finished wait\n if self._stdout is not None:\n if wait_until_exists(self._stdout):\n with open(self._stdout) as f:\n self._out = f.read()\n return self._out", "def split_graph_output(output):\n html, js = output.split(\"<script\")\n js = \"<script\" + js\n return html, js", "def process(self):\n while not self.halted:\n self.step()\n return self.outputs", "def wait_page_ready(self, timeout=10):\n self.wait_until(\n lambda driver: driver.execute_script(\"return document.readyState;\")\n == \"complete\",\n timeout,\n )", "def execute_javascript(self, code):\n return self.loop.run_until_complete(self.get_async_keyword_group().execute_javascript(code))", "def run_module(self):\n info(\"Searching for cross site scripting (reflected)...\")\n\n # load in a list of lfi attach strings\n #self.attack_strings = self.main.db.get_wordlist(\n # self.info['wordlist_name'])\n\n self.attack_strings = ['<script>alert(1)</script>',\n '<img srx=\"x\" onerror=\"alert(1)>\"']\n\n # the search strings will be the attack strings themselves\n # because python will not interpret any javascript\n self.re_search_strings = self.attack_strings\n\n injectable_params = self._get_previous_results('HTMLParser')\n\n with concurrent.futures.ProcessPoolExecutor() as executor:\n results = executor.map(self._run_thread, injectable_params)\n\n final = []\n for r in results:\n final.extend(r)\n\n # save the results\n self._save_scan_results(final)", "def wait(self):\n self.Popen.wait()", "def wonder():\n copy()\n get_soup()\n get_text()\n change_write_text()\n Check_status_time_stamp()", "def download_scripts(parsed_args, scripts, client):\n print(\"INFO: Fetching available scanning scripts...\")\n for script_object in scripts:\n script = client.fetch_airgapped_script(str(script_object.id))\n if script is None:\n continue\n file_name = script.type.split('::')\n if \"Linux\" in file_name:\n file_name[-1] += '.sh'\n elif \"Windows\" in file_name:\n file_name[-1] += '.ps1'\n path = os.path.join(os.path.dirname(__file__), \"/\".join(file_name))\n with open(path, 'w') as filehandle:\n filehandle.write(script.contents)\n if script.attachment and parsed_args.no_attachment:\n download_attachment(file_name, script.attachment)\n print(\"INFO: Script saved in {}\".format(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'Scripts')))", "def run_results(self):\n calculation_band = self.ctx.workchain_bands.get_outputs(link_type=LinkType.CALL)[0]\n\n self.report('workchain succesfully completed'.format())\n self.out('band_parameters', calculation_band.out.output_parameters)\n self.out('bandstructure', calculation_band.out.output_band)", "def getResults(self, cleanup=True):\n self.wait_on_job()\n stdout_str = self.ofile_string()\n stderr_str = self.efile_string()\n if cleanup:\n self.erase_files()\n return (stdout_str, stderr_str)", "def _get_output(arguments, timeout=None):\n # NOTE Increase this value if tests fail with None being received as\n # stdout/stderr instead of the expected content\n output_timeout = 0.1 # seconds\n\n pidq = Queue()\n outputq = Queue()\n\n t = Thread(target=_queue_output, args=(arguments, pidq, outputq))\n t.daemon = True\n t.start()\n\n try:\n pid = pidq.get(timeout=timeout)\n except Empty:\n pid = None\n\n # Process crashed or timed out for some reason\n if pid is None:\n return _retrieve_output(t, output_timeout, outputq,\n \"Program to start\")\n\n # Wait for process to finish (normal execution)\n state = wait_process(pid, timeout)\n\n if state:\n # Process finished\n return _retrieve_output(t, output_timeout, outputq,\n \"Program thread to join\")\n\n # If we reach this point we assume the process got stuck or timed out\n for sig in (signal.SIGABRT, signal.SIGTERM, signal.SIGKILL):\n # Start with lower signals and escalate if process ignores them\n try:\n os.kill(pid, signal.SIGABRT)\n except OSError as e:\n # 3 means the process finished/died between last check and now\n if e.errno != 3:\n raise\n\n # Wait for process to finish (should die/exit after signal)\n state = wait_process(pid, timeout)\n\n if state:\n # Process finished\n return _retrieve_output(t, output_timeout, outputq,\n \"Program to die\")\n\n # This should never happen but in case something goes really bad\n raise OSError(\"Program stopped responding and couldn't be killed\")", "def run_with_output(self, cmd, end_strs, timeout=310):\n self.write(cmd)\n out = self.gather_output(cmd, end_strs, timeout)\n return out", "def run_script_block(json_response):\r\n scripts = request.values['scripts']\r\n exec_id = request.values['exec_id']\r\n ds_name = request.values['datasource']\r\n user_id = current_user.login_name\r\n result = executor.run_script_block(exec_id, scripts, ds_name, user_id)\r\n return json_response(result=result)", "def print_contents(self):\n try:\n # We only wait for 0.001 seconds.\n self.print_all_contents(indef_wait=False)\n except NotYourTurnError:\n # It's not our turn, so try again the next time this function is called.\n pass", "def handleContentComplete():", "def wait_for_response(self, request_id):\n url = \"{}/{}/{}\".format(self.url, self.url_dir, request_id)\n while True:\n response = requests.get(url)\n if response.text == \"done\\n\":\n return", "def execute(self):\n\n while True:\n\n neighbours, script, location = self.queue.get()\n\n if neighbours is None and script is None:\n self.queue.task_done()\n return\n\n self.run_script(neighbours, script, location)\n self.queue.task_done()", "def wait(self):\n pass", "def wait(self):\n pass", "def wait_complete(self):\n self.join()", "def test_scripts_inside_content_block(self):\n c = Client()\n resp = c.get('/books/')\n self.assertNotIn(b'<div id=\"recent_reviews\"></div>', resp.content)\n self.assertNotIn(b'<script crossorigin src=\"https://unpkg.com/react@16/umd/react.development.js\"></script>',\n resp.content)\n self.assertNotIn(\n b'<script crossorigin src=\"https://unpkg.com/react-dom@16/umd/react-dom.development.js\"></script>',\n resp.content)\n self.assertNotIn(b'<script src=\"https://unpkg.com/babel-standalone@6/babel.min.js\"></script>', resp.content)\n self.assertNotIn(b'<script src=\"/static/recent-reviews.js\" type=\"text/babel\"></script>', resp.content)\n self.assertNotIn(b'ReactDOM.render(<RecentReviews url=\"/api/reviews/?limit=6\" />,', resp.content)\n self.assertNotIn(b'document.getElementById(\\'recent_reviews\\')', resp.content)", "def run(url, output, loglevel, logfile):\n # Logging setup\n loader.logging.setup(level=loglevel, logfile=logfile)\n\n # Download page and get DOM\n dom = BeautifulSoup(loader.network.download(url), DEFAULT_PARSER)\n\n # Split URL to fragments\n scheme, net_loc, *_ = list(urlparse(url))\n\n # Get resource objects from DOM\n resources = loader.handler.get_resources(dom)\n\n if resources:\n # Build resource dirname\n local_dirname = loader.path.for_resource_dir(url)\n # Create dir for resource inside 'output'\n loader.storage.mkdir(os.path.join(output, local_dirname))\n\n web_resource_paths = []\n for resource in resources:\n # Get resource path from resource object\n web_resource_path = loader.handler.get_path(resource)\n # Build resource local path\n local_resource_path = os.path.join(\n local_dirname,\n loader.path.for_resource(web_resource_path),\n )\n # Set local path in resource object\n loader.handler.update_resource(\n resource=resource,\n new_link=local_resource_path,\n )\n web_resource_paths.append(web_resource_path)\n # Save modified DOM\n loader.storage.save(\n f_content=dom.encode(),\n output=output,\n filename=loader.path.for_page(url),\n )\n # Download resources\n for resource_path in tqdm(web_resource_paths, desc=BAR_DESC):\n resource_url = urlunsplit(\n [scheme, net_loc, resource_path, None, None],\n )\n try:\n loader.storage.save(\n f_content=loader.network.download(resource_url),\n output=os.path.join(output, local_dirname),\n filename=loader.path.for_resource(resource_path),\n )\n except loader.network.NetworkError as error:\n logging.debug(error, exc_info=sys.exc_info())", "def wait_for_ajax(self):\n return self.driver.execute_script(\n \"return typeof(jQuery)!='undefined' && jQuery.active==0\")", "def finish(self):\r\n self.start_finish()\r\n self.wait_finish()", "def get(self, url=None, script=None, key=None):\n self.base_url = self.base_url or url # set base URL if not set\n html = self.cache.get(key)\n if html:\n if self.debug: print 'load cache', key \n self.setHtml(html, QUrl(self.base_url))\n elif url:\n self.load(QUrl(url))\n elif script:\n self.js(script)\n\n loop = QEventLoop()\n timer = QTimer()\n timer.setSingleShot(True)\n timer.timeout.connect(loop.quit)\n self.loadFinished.connect(loop.quit)\n timer.start(self.timeout * 1000)\n loop.exec_() # delay here until download finished or timeout\n \n if timer.isActive():\n # downloaded successfully\n timer.stop()\n html = self.current_html()\n if key:\n self.cache[key] = html\n self.inject_jquery()\n else:\n # didn't download in time\n print 'Download timeout'\n html = ''\n return html", "def process_results(self):\n return self._do_action_under_lock(self._process_results)", "def get(self):\n if not self.finished():\n self.wait()\n return self._result", "def get_results_from_script(self, script):\n result = script.scaler.work_free_stats\n return result", "def handle_execution_response(self, data, *, wait):\n ...", "def collect_output(self):\n pass", "def collect_output(self):\n pass", "def include_content_html():\n\n# <div id=\"content\"> \n root_div = etree.Element(\"div\", id=\"content\")\n \n for initial_condition in initial_conditions:\n for flux in fluxes:\n # content_id identifies the results of a particular computation in the HTML document \n content_id = initial_condition + \"_\" + flux\n # <div id=\"content_id\">\n div = etree.SubElement(root_div, \"div\", id=content_id)\n # JQuery function to include content dynamically\n # <script> = include_content(content_id)</script>\n etree.SubElement(div, \"script\").text = \"include_content(\\\"\" + content_id + \"\\\")\"\n #</div> \n# </div>\n\n# Write the generated HTML document to a file\n output_html_file = open(html_path + \"html/computations/include_content.html\", \"w\") \n output_html_file.write(etree.tostring(root_div, pretty_print=True).decode(\"utf-8\"))", "def run_next_action():\n os.environ[\"BROWSER\"] = 'echo %s'\n result = subprocess.run(context.arguments, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding=\"utf-8\")\n return result.stdout + result.stderr" ]
[ "0.58988714", "0.5896934", "0.5896934", "0.5896934", "0.5896934", "0.5818085", "0.5690976", "0.56339425", "0.5543673", "0.5390721", "0.5287882", "0.52217585", "0.5208909", "0.51837784", "0.5157287", "0.5146636", "0.51380193", "0.51380193", "0.51380193", "0.51380193", "0.51246214", "0.50950605", "0.50584346", "0.50428325", "0.501126", "0.50033057", "0.50016296", "0.5001629", "0.500133", "0.5001176", "0.49920604", "0.49898067", "0.4980966", "0.4938505", "0.49365634", "0.49348938", "0.49305403", "0.4911703", "0.48998383", "0.48989302", "0.48979533", "0.48949262", "0.4889959", "0.48642418", "0.48610294", "0.48433548", "0.48398426", "0.48384643", "0.48214427", "0.4816019", "0.48154774", "0.48017773", "0.47952098", "0.4784768", "0.47800678", "0.47712675", "0.47692594", "0.47393233", "0.4730804", "0.4722589", "0.4697666", "0.46956542", "0.46955723", "0.46955082", "0.469297", "0.46922916", "0.46913755", "0.4690477", "0.46857592", "0.46854463", "0.46773377", "0.46747455", "0.46707627", "0.46591407", "0.4653245", "0.46522498", "0.46521986", "0.46521592", "0.46499422", "0.46452317", "0.4641753", "0.463313", "0.46298158", "0.46253136", "0.46206844", "0.46203747", "0.46203747", "0.4617286", "0.46102887", "0.45996612", "0.4598194", "0.45967522", "0.45952183", "0.45903826", "0.45903563", "0.4589297", "0.4588863", "0.457745", "0.457745", "0.45772776", "0.4577204" ]
0.0
-1
x Position x on the map y Position y on the map theta Direction on the map
def scan(self, x, y, theta): # create ray list max_theta = theta + self.fov/2.0 min_theta = theta - self.fov/2.0 thetas = np.arange(min_theta, max_theta, self.theta_inc, dtype=np.float32) self.input_vector[:, 0] = x self.input_vector[:, 1] = y self.input_vector[:, 2] = thetas # run ray marching self.scan_method.calc_range_many(self.input_vector, self.output_vector) return self.output_vector
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def theta(self):\n return atan2(self.y, self.x)", "def theta(self):\n return float(np.arctan2(self.y, self.x))", "def xy(self,theta,phi):\n dist=great_circle_distance(self.theta0,theta,self.phi0,phi)\n [yt,xt]=np.unravel_index(np.argmin(dist),dist.shape)\n return xt,yt", "def _rotate_coords(self, x, y, theta, ox, oy):\n s, c = self._pkgs['numpy'].sin(theta), self._pkgs['numpy'].cos(theta)\n x, y = self._pkgs['numpy'].asarray(x) - ox, self._pkgs['numpy'].asarray(y) - oy\n return x * c - y * s + ox, x * s + y * c + oy", "def wind_xy_to_theta(x, y):\n return np.angle( x + 1j*y, deg=False )/np.pi # note div by np.pi!", "def __call__( self , theta ):\r\n offset = np.dot( z_rot( theta ) , [ self.radius , 0 , 0 ] )\r\n # print \"Offset:\" , offset\r\n return np.add( self.center , offset )", "def project(self, (lng, lat)):\n x = lng * DEG_TO_RAD\n lat = max(min(MAX_LATITUDE, lat), -MAX_LATITUDE)\n y = lat * DEG_TO_RAD\n y = math.log(math.tan((math.pi / 4) + (y / 2)))\n return (x*EARTH_RADIUS, y*EARTH_RADIUS)", "def map(self, lat, long):\r\n rxy = self._r*lat/(np.pi/2)\r\n x = rxy*np.cos(long)\r\n y = rxy*np.sin(long)\r\n return (x, y)", "def polar_to_xy(r, theta):\r\n x = r*np.cos(theta)\r\n y = r*np.sin(theta)\r\n return x, y", "def latToTheta(lat):\n return (90.0 - lat) * (np.pi/180.0)", "def convert_pose_to_xy_and_theta(self, passed_stamped_pose):\n # Convert to map coordinate frame from odom\n pose = self.transform(passed_stamped_pose).pose # Apply current transform to given pose\n\n orientation_tuple = (pose.orientation.x,\n pose.orientation.y,\n pose.orientation.z,\n pose.orientation.w)\n angles = t.euler_from_quaternion(orientation_tuple)\n\n return (pose.position.x, pose.position.y, angles[2])", "def sat_2d_pos(theta):\n r_sat = a * (1 - e**2) / (1 + e * np.cos(theta))\n return r_sat, theta", "def ang2xy(self, theta, phi=None, lonlat=False, direct=False):\n pass", "def tanp_to_world(self, x, y):\n ra, dec = x, y\n return ra, dec", "def coordinates(self):", "def map(self, lat, long):\r\n rxy = self._r*np.sqrt(1-np.cos(lat))\r\n x = rxy*np.cos(long)\r\n y = rxy*np.sin(long)\r\n return (x, y)", "def get_theta(self):\n return self.theta", "def xyz(phi, theta):\n x = cos(theta) * cos(phi)\n y = cos(theta) * sin(phi)\n z = sin(theta)\n loc = asarray([x,y,z])\n return(loc)", "def position(t):\n return c + tangent_vec * 7 * t ** 2", "def theta():\n pass", "def get_pos(self):\n return (self.x/3, 3**0.5*self.y/3, self.r/3)", "def coords(self, p=None, rotationDegreesCCW=0.0):\n Th, R = self.ThRcoords()\n\n x = R * math.cos(Th)\n y = R * math.sin(Th)\n if rotationDegreesCCW:\n x, y = utils.rotateXY(x, y, rotationDegreesCCW)\n return x, y", "def undo_mercator_project(x,y):\n lon = y*np.pi\n ex = np.exp(4*np.pi*x)\n lat = np.arcsin((ex - 1)/(ex +1 ))\n lon = lon*360/2/np.pi\n lat = lat*360 /2/np.pi\n return lon, lat", "def coords(self, p=None, rotationDegreesCCW=0.0):\n if p is None:\n p = 1.0 # useful for doing relative distance comparisons.\n\n i, j = self.indices()\n x = p * (COS30 * i)\n y = p * (SIN30 * i + j)\n\n if rotationDegreesCCW:\n x, y = utils.rotateXY(x, y, rotationDegreesCCW)\n\n return x, y", "def update(self, x, y, theta):\n self.x = x\n self.y = y\n self.theta = theta\n self.theta = wrap_angles(self.theta)", "def coordX(r, theta, useradians = True):\r\n if not useradians :\r\n #convert theta to radians\r\n theta = theta / 180 * math.pi\r\n x = r * math.cos(theta)\r\n return x", "def initialCoordinates():\r\n return (-250,-250)", "def _get_rotated_coords(x, y, PA):\n x_rot = y * np.cos(np.radians(PA)) + x * np.sin(np.radians(PA))\n y_rot = x * np.cos(np.radians(PA)) - y * np.sin(np.radians(PA))\n return x_rot, y_rot", "def _to_world_coord(self, x, y):\n maze = self._get_maze()\n y = maze.shape[1] - y - 1\n return (float(x) + .5) * _MAZE_CELL_SIZE, (float(y) + .5) * _MAZE_CELL_SIZE", "def set(self, x: float, y: float, theta: float):\n self.x = float(x)\n self.y = float(y)\n self.theta = float(theta)", "def forward(self, x):\r\n self.x = (self.x+(x*(math.cos(self.dir))))\r\n self.y = (self.y+(x*(math.sin(self.dir))))\r\n return (self.x, self.y)", "def get_location(self):\r\n return self.__x, self.__y", "def _coord(self, x, y):\n gridEdge = 7 # originally 5\n y = gridEdge - y\n cx = 100 * (x - 1) + 50\n cy = 100 * (y - 1) + 50\n r = 20\n return (cx - r, cy - r, cx + r, cy + r)", "def _get_cart_sky_coords(self, x0, y0):\n return self.x_sky - x0, self.y_sky - y0", "def theta(self):\n return self._theta", "def theta(self):\n return self._theta", "def _get_polar_sky_coords(self, x0, y0):\n x_sky, y_sky = self._get_cart_sky_coords(x0, y0)\n return np.hypot(y_sky, x_sky), np.arctan2(x_sky, y_sky)", "def __init__(self):\n self.x = 0.0\n self.y = 0.0\n self.theta = 0.0\n self.total_distance_covered = 0.0", "def pa(x):\t\t\n if(float(x)>=0 and float(x)<=180.0): \n pos_ang = float(x) - 90.0 #position angle\n if(float(x)<0 and float(x)>=-180.0):\n pos_ang = 90.0 - abs(float(x)) #position angle\n if(float(x)>180 and float(x)<=360.0):\n pos_ang = float(x) - 360.0 + 90.0 #position angle\n if(float(x)>=-360 and float(x)<-180.0):\n pos_ang = float(x) + 360.0 - 90.0 #position angle\t\n return pos_ang", "def theta_deg(self):\n return self.theta * 180 / np.pi", "def tanp_to_world(self, x, y):\n crpix1, crpix2 = self._wcs.wcs.crpix\n x = x + crpix1\n y = y + crpix2\n ra, dec = self._wcslin.all_pix2world(x, y, 1)\n return ra, dec", "def invmap(self, x, y):\r\n r = self._r\r\n rxy = np.sqrt(x*x + y*y)\r\n\r\n lat = (rxy/r)*(np.pi/2)\r\n long = np.arctan2(y, x)\r\n\r\n return (lat, long)", "def position(t, x, y):\n return x * exp(-t * y) * sin(2 * pi * t)", "def xy2ang(self, x, y=None, lonlat=False, direct=False):\n pass", "def attacker_position(inputs):\n rho, theta, _, _, _ = inputs\n x = rho * np.cos(theta)\n y = rho * np.sin(theta)\n return x, y", "def state_to_coords(self, state):\n x, _, theta, _ = state\n cart_coords = (x, self.y)\n pole_coords = ([x, x + 2*self.L*math.sin(theta)],\n [self.y, self.y + 2*self.L*math.cos(theta)])\n return cart_coords, pole_coords", "def theta(point_a, point_b):\r\n dx = point_b[0] - point_a[0]\r\n dy = point_b[1] - point_a[1]\r\n\r\n if abs(dx) < 1.e-6 and abs(dy) < 1.e-6:\r\n return 360\r\n else:\r\n t = dy/(abs(dx) + abs(dy))\r\n\r\n if dx < 0:\r\n t = 2 - t\r\n elif dy < 0:\r\n t += 4\r\n\r\n if t == 0:\r\n return 360\r\n\r\n return t*90", "def theta(self):\n return self.kernel.theta", "def update_position_direction(self, l):\n\n x = self.x + self.mu * l\n mu = self.mu\n\n return x, mu", "def new_robot_coordinates(old_x, old_y):\n d = random.normalvariate(5,1)\n theta = random.uniform(math.pi/5 - math.pi/36, math.pi/5 + math.pi/36)\n new_x = old_x + d * math.cos(theta)\n new_y = old_y + d * math.sin(theta)\n return new_x, new_y", "def Pixel2World(geoMatrix, x, y):\r\n ulX = geoMatrix[0]\r\n ulY = geoMatrix[3]\r\n xdist = geoMatrix[1]\r\n ydist = geoMatrix[5]\r\n coorX = (ulX + (x * xdist))\r\n coorY = (ulY + (y * ydist))\r\n return (coorX, coorY)", "def unproject(self, (x, y)):\n lng = x/EARTH_RADIUS * RAD_TO_DEG\n lat = 2 * math.atan(math.exp(y/EARTH_RADIUS)) - math.pi/2 * RAD_TO_DEG\n return (lng, lat)", "def log_robot_location(self):\n trans, rot = self.get_robot_location()\n if trans != None and rot != None:\n degrees = (rot.yaw * 180./math.pi)\n message = {\n 'x':'{0:.3f}'.format(trans.x),\n 'y':'{0:.3f}'.format(trans.y),\n 'rotation':'{0:.3f}'.format(degrees)}\n\n self.logger.log('ROS_XYR', 'ROS_XYR', message, 'state')", "def to_world(self, x, y, **kwargs):", "def convert_pose_to_xy_and_theta(pose):\n orientation_tuple = (pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w)\n angles = euler_from_quaternion(orientation_tuple)\n return pose.position.x, pose.position.y, angles[2]", "def invmap(self, x, y):\r\n r = self._r\r\n rxy = np.sqrt(x*x + y*y)\r\n\r\n lat = np.arccos(1-(rxy/r)**2)\r\n long = np.arctan2(y, x)\r\n\r\n try:\r\n long[np.isnan(lat)] = np.nan\r\n except TypeError: # Thrown if long is scalar\r\n if np.isnan(lat): long = np.nan\r\n return (lat, long)", "def _rotate_coordinate(self, x, y, angle):\n\n sin = math.sin(angle)\n cos = math.cos(angle)\n\n x_ = x * cos - y * sin\n y_ = x * sin + y * cos\n\n return (x_, y_)", "def mapr(r):\n return np.rad2deg(np.arctan(r)*2)", "def rotate((x, y), theta):\n\n return math.cos(theta) * x + math.sin(theta) * y, -math.sin(theta) * x + math.cos(theta) * y", "def coordnav(self,dx,dy,dth): \n self.cg_ant = np.array(self.cg)\n self.coord[0] = self.coord[0] - self.cg[0]\n self.coord[1] = self.coord[1] - self.cg[1] \n self.Rz = rotation.matrix([0,0,1],dth)\n self.coord = np.dot(self.Rz,self.coord)\n \n self.coord[0] = self.coord[0] + self.cg[0] + dx\n self.coord[1] = self.coord[1] + self.cg[1] + dy \n \n self.px = self.coord[:,self.px_index]\n self.Bx = self.px-self.cg \n self.basex = self.Bx/math.sqrt(np.dot(self.Bx,self.Bx))", "def homog_ang_axs( theta , k , pos ):\r\n return np.vstack( ( np.hstack( ( rot_matx_ang_axs( theta , k ) , [ [ pos[0] ] , [ pos[1] ] , [ pos[2] ] ] ) ) ,\r\n np.hstack( ( [ 0 , 0 , 0 ] , [ 1 ] ) ) ) )", "def theta(lam, gam, p):\n #lam = lam - 1e-15\n return np.pi - np.arccos(np.divide(-1 + lam*np.cos(2*np.pi*p ), w(lam, gam, p) ) )", "def _rotate_points(x, y, ang):\n theta = np.radians(ang - 90.)\n xNew = x*np.cos(theta) - y*np.sin(theta)\n yNew = x*np.sin(theta) + y*np.cos(theta)\n return xNew, yNew", "def pixel2coord(tf, x, y):\n lat = tf[0] + x*tf[1] + y*tf[2]\n lon = tf[3] + x*tf[4] + y*tf[5]\n\n return lat, lon", "def rotate_points(x, y, ang):\n theta = np.radians(ang)\n xNew = x*np.cos(theta) - y*np.sin(theta)\n yNew = x*np.sin(theta) + y*np.cos(theta)\n\n return xNew, yNew", "def rect(r, theta):\n x = r * math.cos(theta)\n y = r * math.sin(theta)\n return x,y", "def get_pixel_pos(self):\n\n c = self.get_center()\n\n return Tank.three_by_three(c[0],c[1])", "def compute_positions(self):\n return (self.x + DIRECTIONS[self.facing_direction][0]) % (self.image.shape[0] - 1), \\\n (self.y + DIRECTIONS[self.facing_direction][1]) % (self.image.shape[1] - 1)", "def points(self, ntheta, nlayer):\n exec self.x\n exec self.y\n return [x, y]", "def spherical(self, x, y):\n\t\twhile x >= self.planet.width or x < 0 or y >= self.planet.height or y < 0:\n\t\t\t#change x if x is out of boundary\n\t\t\tif x >= self.planet.width:\n\t\t\t\tx -= (self.planet.width)\n\t\t\telif x < 0:\n\t\t\t\tx += (self.planet.width)\n\t\t\t#change y if y is out of boundary\n\t\t\tif y >= self.planet.height:\n\t\t\t\ty -= (self.planet.height)\n\t\t\telif y < 0:\n\t\t\t\ty += (self.planet.height)\n\t\treturn x, y", "def GPSlatlon2XY_time(lat_u, lon_u, theta):\n\n\trho_u = np.sqrt(np.power(lon_u, 2) + np.power(lat_u, 2))\n\ttheta_new_u = np.arctan2(lat_u, lon_u) - theta\n\n\tUx, Uy = rho_u * np.cos(theta_new_u), rho_u * np.sin(theta_new_u)\n\n\treturn Ux, Uy", "def predict_coords(self):\r\n\r\n if self.direction == 1:\r\n return [self.coords[0] + 1, self.coords[1]]\r\n if self.direction == 2:\r\n return [self.coords[0] - 1, self.coords[1]]\r\n if self.direction == 3:\r\n return [self.coords[0], self.coords[1] + 1]\r\n if self.direction == 4:\r\n return [self.coords[0], self.coords[1] - 1]", "def getA(self):\n return self.theta", "def point_angle(cx, cy, px, py):\n return atan2(py - cy, px - cx)", "def player_location(self):\n x = 0\n y = 0\n for line in self.grid:\n for i in line:\n if i == \"P\":\n return x, y\n \n y+=1\n x += 1\n y = 0", "def rotate_translate(pixel_pos_x, pixel_pos_y, x_trans, y_trans, phi):\n\n pixel_pos_trans_x = (pixel_pos_x - x_trans) * \\\n np.cos(phi) - (pixel_pos_y - y_trans) * np.sin(phi)\n pixel_pos_trans_y = (pixel_pos_x - x_trans) * \\\n np.sin(phi) + (pixel_pos_y - y_trans) * np.cos(phi)\n return pixel_pos_trans_x, pixel_pos_trans_y", "def GetParametricCoords(self):\n ...", "def coord_polar(mat):\n x = mat[:, 0].copy()\n y = mat[:, 1].copy()\n\n r = np.sqrt(x**2 + y**2)\n theta = np.arctan2(y, x)\n\n return r, theta", "def northing(self):\r\n x, y = self.lonlat2xy(self.longitude, self.latitude)\r\n return y", "def lat_lons(self):", "def world_to_tanp(self, ra, dec):\n x, y = ra, dec\n return x, y", "def xy(self):\n return self.coords.xy", "def coord(self, x, y):\n origin_x = self._raster_meta['transform'][3]\n origin_y = self._raster_meta['transform'][0]\n pixel_x = self._raster_meta['transform'][5]\n pixel_y = self._raster_meta['transform'][1]\n\n x = int((x - origin_x) / pixel_x)\n y = int((y - origin_y) / pixel_y)\n return self[x, y]", "def teleport(self, x, y, reset_rotation=False):\n self.center[0] = x\n self.center[1] = y\n self.rect.center = tuple(self.center) # update pygame sprite placement\n if reset_rotation:\n self.rotate(-self.rotation)", "def theta_phi(Collimator_square, sample_point):\n p1,p2,p3,p4=Collimator_square\n\n points = np.array([sample_point-p1, sample_point-p2, sample_point-p3, sample_point-p4])\n points=points.transpose(1,0,2) #shape: (pointsNum,4,3)\n\n theta = np.arctan2(points[:, :, 0],points[:, :, 1] )\n\n norm_x_y=np.sqrt(points[:, :, 0]**2+points[:, :, 1]**2)\n phi = np.arctan2(norm_x_y, points[:, :, 2])\n\n return theta, phi", "def GPSlatlon2XY(data_sheet, origin, theta):\n\n\tlon = np.array([[data_sheet.cell(row = i, column = 1).value] for i in range(2, data_sheet.max_row+1)]).reshape(data_sheet.max_row-1, )\n\tlat = np.array([[data_sheet.cell(row = i, column = 2).value] for i in range(2, data_sheet.max_row+1)]).reshape(data_sheet.max_row-1, )\n\n\tlon_u = np.array([[data_sheet.cell(row = i, column = 5).value] for i in range(2, data_sheet.max_row+1)]).reshape(data_sheet.max_row-1, )\n\tlat_u = np.array([[data_sheet.cell(row = i, column = 6).value] for i in range(2, data_sheet.max_row+1)]).reshape(data_sheet.max_row-1, )\n\tUz = np.array([[data_sheet.cell(row = i, column = 4).value] for i in range(2, data_sheet.max_row+1)]).reshape(data_sheet.max_row-1, )\n\n\tlon_in_km = (lon - origin[0])*111*np.cos(lat*np.pi/180)\n\tlat_in_km = (lat - origin[1])*111\n\t\n\trho_u = np.sqrt(np.power(lon_u,2) + np.power(lat_u,2))\n\ttheta_new_u = np.arctan2(lat_u,lon_u) - theta\n\n\trho = np.sqrt(np.power(lon_in_km,2) + np.power(lat_in_km,2))\n\ttheta_new = np.arctan2(lat_in_km,lon_in_km) - theta\n\n\tX, Y = rho*np.cos(theta_new), rho*np.sin(theta_new)\n\tUx, Uy = rho_u*np.cos(theta_new_u), rho_u*np.sin(theta_new_u)\n\n\treturn 1e3*X, 1e3*Y, 1e-3*Ux, 1e-3*Uy, 1e-3*Uz", "def compute_theta_phi(self, x, y, phi_0, theta_0=np.pi/2):\n angles=np.zeros(shape=[x.shape[0], y.shape[0],2])\n xx, yy=np.meshgrid(x,y)\n rho=np.sqrt(xx**2+yy**2)\n c=2.0*np.arctan(rho/2.0)\n theta=theta_0-np.arcsin(np.cos(c)*np.cos(theta_0)+yy*np.sin(c)*np.sin(theta_0)/rho)\n phi=phi_0+np.arctan(xx*np.sin(c)/(rho*np.sin(theta_0)*np.cos(c)-yy*np.cos(theta_0)*np.sin(c)))\n angles[:,:,0]=theta\n angles[:,:,1]=phi\n return angles", "def get_coord(self):\n return self.coord", "def getXYpos(relativeNullPoint, p):\n deltaLatitude = p.latitude - relativeNullPoint.latitude\n deltaLongitude = p.longitude - relativeNullPoint.longitude\n latitudeCircumference = 40075160 * math.cos(asRadians(relativeNullPoint.latitude))\n resultX = deltaLongitude * latitudeCircumference / 360\n resultY = deltaLatitude * 40008000 / 360\n return resultX, resultY", "def translateTrame(self,inTrame):\n rawConvertedY=int((inTrame.data1+inTrame.data0),16)\n rawConvertedX=int((inTrame.data3+inTrame.data2),16)\n absX=int(round(rawConvertedX/(16**4-1.0)*self.maxX))\n absY=int(round(rawConvertedY/(16**4-1.0)*self.maxY))\n LOGGER.info(\"Position sensor {} with new coordonate {} -- {}\".format(self.physic_id,absX,absY))\n return {\"coordX\":absX,\"coordY\":absY}", "def rotxaxis(ya, za, angle):\n\n y = ya * math.cos(angle) - za * math.sin(angle) \n z = ya * math.sin(angle) + za * math.cos(angle)\n \n return y, z", "def tick_odom(old_xR,old_yR,old_thetaR,dx,dy,dTheta):\n xR = old_xR+dx\n yR = old_yR+dy\n thetaR = old_thetaR+dTheta\n print (\"Nouvelle coordonnee dans le monde: x = %.3f, y = %.3f, theta = %.3f\" %(xR, yR, thetaR))\n return [xR, yR, thetaR]", "def coords(self) -> Tuple[float, float]:\n return self.lat, self.lng", "def _maping(x,y,l,a):\n newx = (x**2 *(l* ((x**2 + y**2)**(a/2) - 1) + 2) - l * y**2 *((x**2 + y**2)**(a/2) - 1))/(x**2 + y**2) \n newy = (2 * x* y *(l* ((x**2 + y**2)**(a/2) - 1) + 1))/(x**2 + y**2)\n return newx, newy", "def origin(self):\r\n\r\n return self.ox, self.oy, self.oz", "def direction(self):\r\n return 180 - atan2(self.x, self.y)*180/pi", "def position(self):\n return self.x, self.y", "def coordsxy(self, convert_to=False):\n if convert_to == 'rad':\n return (self.x*3.14159/180., self.y*3.14159/180.)\n elif convert_to == 'deg':\n return (self.x/3.14159*180., self.y/3.14159*180.)\n else:\n return (self.x, self.y)", "def phi(cylindrical_x: sc.Variable, cylindrical_y: sc.Variable) -> sc.Variable:\n return sc.atan2(y=cylindrical_y, x=cylindrical_x)", "def cartesian_To_Center(self, x, y, z):\n\n if x > 0.0 and -self.L_cap <= y <= 0.0:\n s = self.L_cap + y\n xc = x - self.rb\n yc = z\n else:\n theta = full_arctan2(y, x)\n if theta <= self.ang:\n s = theta * self.rb + self.L_cap\n xc = np.sqrt(x ** 2 + y ** 2) - self.rb\n yc = z\n elif self.ang < theta <= 2 * np.pi: # i'm being lazy here and not limiting the real end\n x0, y0 = np.cos(self.ang) * self.rb, np.sin(self.ang) * self.rb\n thetaEndPerp = np.pi - np.arctan(-1 / np.tan(self.ang))\n x, y = x - x0, y - y0\n deltaS, xc = np.cos(thetaEndPerp) * x + np.sin(-thetaEndPerp) * y, np.sin(thetaEndPerp) * x + np.cos(\n thetaEndPerp) * y\n yc = z\n xc = -xc\n s = (self.ang * self.rb + self.L_cap) + deltaS\n else:\n raise ValueError\n return s, xc, yc", "def transform_scan(self, particle, distance, theta):\n return (particle.x + distance * math.cos(math.radians(particle.theta + theta)),\n particle.y + distance * math.sin(math.radians(particle.theta + theta)))" ]
[ "0.691251", "0.683143", "0.6619195", "0.6595212", "0.63898385", "0.628581", "0.6238489", "0.61594254", "0.61381114", "0.61379653", "0.61337817", "0.6127042", "0.6115421", "0.6079736", "0.60695887", "0.6060793", "0.6034483", "0.60037607", "0.60031086", "0.59396416", "0.59340966", "0.59020454", "0.5887503", "0.58826196", "0.58797944", "0.5873579", "0.58651507", "0.58620924", "0.5856651", "0.5853568", "0.5849215", "0.5830519", "0.5816828", "0.5792183", "0.57867724", "0.57867724", "0.5761576", "0.57567585", "0.57360375", "0.5727307", "0.5708575", "0.5705571", "0.56994927", "0.56962115", "0.56868905", "0.5686017", "0.5683486", "0.56777525", "0.56714684", "0.5669854", "0.5669079", "0.5668052", "0.5658303", "0.56525147", "0.5648867", "0.56476957", "0.5644555", "0.5643301", "0.56432855", "0.5626319", "0.561571", "0.5586837", "0.5575657", "0.5575096", "0.55582416", "0.5555281", "0.55547166", "0.55497324", "0.5549298", "0.5545948", "0.5545661", "0.5537768", "0.55332243", "0.55227536", "0.5522358", "0.55182105", "0.5516888", "0.55126685", "0.5512271", "0.5510842", "0.5509168", "0.5507259", "0.55053765", "0.55031776", "0.54990715", "0.5498769", "0.5496335", "0.54856163", "0.5484048", "0.5481562", "0.5478411", "0.54768336", "0.5471334", "0.5469245", "0.5464347", "0.5463896", "0.5463705", "0.54628325", "0.54623467", "0.54591584", "0.545635" ]
0.0
-1
Saves ciphers or keys or any text to the given file path; more efficient than manual saving.
def save(string, file): save_file = open(file, 'w') save_file.write(string) save_file.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_file(path, text):\n with path.open(mode='w') as f_stream:\n f_stream.write(text)", "def store_file(text: str, file_path: str) -> None:\n with open(file=file_path, mode='w', encoding='utf8') as f:\n f.write(text)", "def save_text_file(text, path):\n os.makedirs(os.path.dirname(path), exist_ok=True)\n with open(path, \"w\") as f:\n f.write(text)", "def save(self, path):\n f = open(path, 'w')\n f.write(self.content().encode('utf-8'))\n f.close()", "def save(self, file_path):\n with open(file_path, 'w') as file:\n file.write(self.text)\n file.close()", "def save_file(self, file_name, text):\n\n with open(file_name, 'w') as content_file:\n content = content_file.write(text)", "def encrypt_and_store_file(path_to_original_file):\n\t\toriginal_file_name, _ = os.path.splitext(path_to_original_file)\n\t\toutput_string = EncryptDecrypt.ascii_string_to_hex(EncryptDecrypt.file_to_string(path_to_original_file))\n\t\twith open(original_file_name+\".enc\", \"w+\") as save_file:\n\t\t\tsave_file.write(output_string)\n\t\tos.remove(path_to_original_file)", "def save(self, path: str):\n pass", "def saveOnFile(self, path, data):\n with open(path, \"w\") as f:\n f.write(data)", "def save(self, path: str):\n\n\t\tinfo_dict = {\n\t\t\t\"n_gram_size\": self.n_gram_size,\n\t\t\t\"caseless\": self.caseless,\n\t\t\t\"ignore_punctuation\": self.ignore_punctuation,\n\t\t\t\"add_pos_tags\": self.add_pos_tags,\n\t\t\t\"uses_lemma\": self.uses_lemma,\n\t\t\t\"uses_sentences\": self.uses_sentences\n\t\t}\n\n\t\twith open(path, \"wt\", encoding=\"utf8\") as f:\n\t\t\tjson.dump(info_dict, f)", "def save(self, path):\n # Force an update of the canvas\n self._canvas.Update()\n\n # Save to file\n self._canvas.SaveAs(path)", "def save_path(path_to_account):\r\n with open(\"config.txt\", 'w+') as write_in_file:\r\n write_in_file.write(path_to_account)", "def save_file(path, file_data):\n file_data.save(path)", "def write(path):\n return mac_slideshow.preferences.write(KEY, path)", "def writefile(path: Union[str, Path], txt: str) -> None:\n with open(path, 'w') as outfile:\n outfile.write(txt)", "def save(self, path):\n pass", "def save(self, path):\n pass", "def save(self, path):\n pass", "def save(self, path):\n (folder, filename) = os.path.split(path)\n (name, extension) = os.path.splitext(filename)\n\n if not name:\n raise ValueError, \"name is required\"\n\n path = os.path.join(folder, name + self.extension)\n f = open(path, \"wb\")\n f.write(self.contents)\n f.close()\n\n return path", "def savefile(text):\n file = tkinter.filedialog.asksaveasfile(mode='w', defaultextension='.txt')\n if not file:\n return\n file.write(text)\n file.close()", "def _save_file(self, file_path, data):\n self._ensure_directory(os.path.dirname(file_path))\n with open(file_path, \"wb\") as f:\n f.write(data)", "def to_disk(self, path: Union[str, Path], **kwargs: Any) -> None:\n path = ensure_path(path)\n cfg = {\n \"spaczz_overwrite\": self.overwrite,\n \"spaczz_defaults\": self.defaults,\n \"spaczz_ent_id_sep\": self.ent_id_sep,\n }\n serializers = {\n \"spaczz_patterns\": lambda p: srsly.write_jsonl(\n p.with_suffix(\".jsonl\"), self.patterns\n ),\n \"cfg\": lambda p: srsly.write_json(p, cfg),\n }\n if path.suffix == \".jsonl\": # user wants to save only JSONL\n srsly.write_jsonl(path, self.patterns)\n else:\n write_to_disk(path, serializers, {})", "def save(self, path):\n with open(path, 'wb') as f:\n pkl.dump(self, f)", "def save(btctxstore, path, cfg, password=None):\n # always validate before saving\n validate(btctxstore, cfg)\n\n # Create root path if it doesn't already exist.\n root_path = os.path.dirname(path)\n if not os.path.exists(root_path):\n os.makedirs(root_path)\n\n # WRite config to file.\n if password is None: # unencrypted\n with open(path, 'w') as config_file:\n config_file.write(json.dumps(cfg))\n return cfg\n else:\n raise NotImplementedError(\"encryption not implemented\")", "def SaveToFile(self):\n\n if len(self.paris) == 0:\n with open(self.fileLoc, \"w\") as file:\n file.write(\" \")\n return\n\n data = \"\"\n for x in self.paris.iterkeys():\n data += \"%\" + x + \":\" + self.paris[x] + \"%\"\n \n data = self.Encrypt(data)\n\n with open(self.fileLoc, \"w\") as file:\n file.write(data)", "def saveText(file, path):\n files = os.listdir(path)\n for fil in files:\n if filetype.guess(os.path.join(path,fil)) is None:\n os.remove(os.path.join(path,fil)) \n tx = open(os.path.join(path, str(file)), 'wb')\n file.open()\n tx.write(file.read())\n file.close()\n tx.close()", "def encrypt(path, key, default, output):\n if not key:\n key = getpass('Encryption key: ')\n\n path, file_type, file_mtime = get_file_type_and_mtime(path)\n config = get_config(path, file_type, default)\n data = encrypt_credentials(config, key)\n\n if output:\n if output[0] == '.':\n output = output[1:]\n file_type = '.' + output.lower()\n\n with open(path + file_type, 'w') as save_file:\n if file_type == '.json':\n json.dump(data, save_file, indent=2)\n\n elif file_type in {'.ini', '.conf'}:\n if default:\n default_section = 'DEFAULT'\n else:\n default_section = 'DEFAULT' + os.urandom(16).hex()\n\n for heading in data:\n save_file.write(\"[{}]\\n\".format(heading))\n for item in data[heading]:\n save_file.write(\"{} = {}\\n\".format(item, data[heading][item]))\n save_file.write(\"\\n\")\n\n else:\n write_yaml(save_file, data)", "def write_cipher_text(self, data: bytes, out_file: BinaryIO, filename: str):\n\n out_file.write(data)", "def save_file(msl_data_path, filename, content):\n with open(msl_data_path + filename, 'wb') as (file_):\n file_.write(content)\n file_.flush()\n file_.close()", "def save_file(content: Any, filename: str, path: str):\n\n logging.info('Saving file: %s ' % filename)\n path_to_file = join(path, filename)\n if isfile(path_to_file):\n ctrl = input('%s exists already in\\n %s.\\n'\n ' Are you sure you want to overwrite it [y/N]: '\n % (filename, path))\n if ctrl.lower() == 'y' or ctrl.lower() == 'yes':\n with open(path_to_file, \"wb\") as f:\n pickle.dump(content, f)\n else:\n logging.warning(\"%s NOT saved..\" % filename)\n return\n else:\n with open(path_to_file, \"wb\") as f:\n pickle.dump(content, f)\n\n logging.info(\"File '%s' saved.\" % filename)", "def save_to_file(self, file_path: str):\n with open(file_path, 'w') as engine_settings_file:\n json.dump(self._encode_json(), engine_settings_file, indent=4)", "def save_file_(msl_data_path, filename, content):\n with open(msl_data_path + filename, 'w') as (file_):\n file_.write(content)\n file_.flush()\n file_.close()", "def save_txt(filename, data, encoding):\n with open(filename, \"w\") as f:\n f.write(dump(data, encoding))", "def save(self, path):\n save(self.actor_net.state_dict(), path + '_actor.pkl')\n save(self.critic_net.state_dict(), path + '_critic.pkl')", "def save_pickle(file, path):\n with open(path, 'wb') as f:\n pickle.dump(file, f)\n file_name = re.findall(r\"/?[^/]+\", path)[-1].strip(\"/\")\n print(f\"Stored {file_name}.\")", "def save(self, path):\n print(\"Warning: Default save used\")\n with open(path, 'wb') as f:\n pickle.dump(self, f)", "def save(self, file):\n self._save(file.encode())", "def saveToFile(self, filePath):\n d = self.save()\n with open(filePath, 'wb') as f:\n f.write(d)", "def write_file(content, file_path, mode='w', encoding='utf-8'):\n with codecs.open(file_path, mode, encoding=encoding) as fid:\n fid.write(content)", "def write(s, path, encoding=\"utf-8\"):\n with open(path, \"wb\") as f:\n f.write(s.encode(encoding))", "def save(self, filepath):\n save_ckpt = {\n 'ae': self.state_dict(),\n 'optimizer': self.optimizer.state_dict()\n }\n try:\n torch.save(save_ckpt, os.path.join(filepath, 'ckpt_ae.pth'))\n except:\n print('Cannot save autoencoder.')", "def save(self, path: str):\n with open(path, 'w', encoding='utf-8') as f:\n f.write(self.to_json())", "def save():\n\n subject = subject_var.get()\n category = cat_var.get()\n private = private_var.get()\n message = message_inp.get('1.0', tk.END)\n datestamp_type = datestamp_var.get()\n\n extension = 'txt' if not private else 'secret'\n filename = f'{category} - {subject}.{extension}'\n\n # Apply optional datestamp in message\n if datestamp_type == 'Date':\n datestamp = datetime.today().strftime('%Y-%m-%d')\n elif datestamp_type == 'Date+Time':\n datestamp = datetime.today().strftime('%Y-%m-%d_%H-%M-%S')\n else:\n datestamp = ''\n if datestamp:\n message = f'{message}\\n\\n{datestamp}'\n\n if private:\n password = tksd.askstring(\n 'Enter password',\n 'Enter a password to encrypt the message.'\n )\n message = weaksauce_encrypt(message, password)\n\n with open(filename, 'w') as fh:\n fh.write(message)\n\n status_var.set(f'Message was saved to {filename}')\n tkmb.showinfo('Saved', f'Message was saved to {filename}')", "def _write_cache(self, path, text):\n if self._cache:\n cache_path = os.path.join(self._cache, path)\n\n folder = os.path.split(cache_path)[0]\n\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n with io.open(cache_path, 'w', encoding='utf-8') as f:\n f.write(text)", "def save(self, path, project_name=\"project\"):\n save_path = os.path.join(path, self.save_path)\n save_path = re.sub(r\"/^{}/\".format(self.template.name), project_name, save_path)\n try:\n os.makedirs(os.path.dirname(save_path))\n except FileExistsError:\n pass\n file = open(save_path, \"w\")\n file.write(self.text)\n file.close()\n print(\"save file: \", save_path)", "def save(self, path):\n pickle.dump(self, open(path, 'wb'))", "def write_scram_toolfile(self, contents, filename):\n with open(self.spec.prefix.etc + '/scram.d/' + filename, 'w') as f:\n f.write(contents)\n f.close()", "def save(self, path):\n self._scala.save(self._tc._scala_sc, path)", "def write_to_path(self, path):\n assert not path.exists()\n fout = path.open(\"wb\")\n fout.write(self.to_string())\n assert not fout.close()\n path.setdata()", "def save(self):\n if PYTHON3:\n fileobj = open(self.filename, 'w', encoding=self.ENCODING, errors=\"replace\")\n else:\n fileobj = open(self.filename, 'w')\n self.save_to_fileobj(fileobj)\n fileobj.close()", "def write(cls, path, text):\n with cls.open(path, 'wt') as fd:\n return fd.write(text)", "def save(self, export_path: str):", "def save(self, path=None):\n if path is None:\n path = self.path\n try:\n with open(path, 'w') as fd:\n for entry in self:\n fd.write('{}\\n'.format(entry))\n except Exception as e:\n raise SSHKeyError('Error writing {}: {}'.format(path, e))", "def save(file_path, nodes=[]):\n\n t = time.time()\n data = get_data(nodes)\n\n if not data:\n mc.warning('Nothing to save for selected nodes!')\n return\n\n if not file_path.endswith(file_extention):\n file_path = os.path.splitext(file_path)[0]+file_extention\n\n utils.write_json(file_path, data)\n return file_path\n print time.time() - t", "def Save(file=CONFIG):\n\tif file in files:\n\t\tfiles[file].SaveFile()", "def save_file():\n filepath = asksaveasfilename(\n defaultextension=\"txt\",\n filetypes=[(\"Text Files\", \"*.txt\"), (\"All Files\", \"*.*\")],\n )\n if not filepath:\n return\n with open(filepath, \"w\") as output_file:\n text = txt_edit.get(\"1.0\", tk.END)\n output_file.write(text)\n window.title(f\"Switch port Consolidation - {filepath}\")", "def savekey(comp):\n with open('clave.key', mode='w') as key:\n key.write(str(comp[0])+'\\n'+str(comp[2]))\n\n with open('clave.cr', mode='w') as pub:\n pub.write(str(comp[1])+'\\n'+str(comp[2]))", "def write_text_file(path: Path, data: str) -> None:\n path.write_text(data, encoding='utf-8')", "def save(self):\n os.rename(self.scores_filename, '%s-%s' % (self.scores_filename, str(time.time())))\n scores_file = codecs.open(self.scores_filename, 'w', encoding='utf-8')\n for each_chan in self.scores_dict:\n for each_nick in self.scores_dict[each_chan]:\n line = '{0},{1},{2},{3}\\n'.format(each_chan, each_nick, self.scores_dict[each_chan][each_nick][0], self.scores_dict[each_chan][each_nick][1])\n scores_file.write(uc.decode(line))\n scores_file.close()", "def write_cipher_text(self, data: bytes, out_file: BinaryIO, filename: str):\n\n cipher_text, tag, nonce, session_key = self.encrypt(data)\n session_key_file = f'{self.file_folder}/{self.user_id}_{filename}.bin'\n\n if not os.path.exists(session_key_file):\n with open(session_key_file, 'wb') as f:\n f.write(session_key)\n\n out_file.write(nonce)\n out_file.write(tag)\n out_file.write(cipher_text)", "def write_cipher_text(self, data: bytes, out_file: BinaryIO, filename: str):\n\n cipher_text, tag, nonce, session_key = self.encrypt(data)\n session_key_file = f'{self.file_folder}/{self.user_id}_{filename}.bin'\n\n if not os.path.exists(session_key_file):\n with open(session_key_file, 'wb') as f:\n f.write(session_key)\n\n out_file.write(nonce)\n out_file.write(tag)\n out_file.write(cipher_text)", "def write(self, path, content):\n this_file = open(path, 'w')\n this_file.write(content)\n this_file.close()", "def save(file, corpus):\n with open(file, 'w') as f_out:\n f_out.write(corpus)", "def save(self, filename, format = \"text\"):\n #\n for time in self.mdvtc.keys():\n if format == \"csv\":\n save_filename = filename + str(int(time)) + \".csv\"\n elif format == \"text\":\n save_filename = filename + str(int(time)) + \".txt\"\n else:\n save_filename = filename + str(int(time)) + \".txt\"\n self.mdvtc[time].save(save_filename, format)", "def save(self, path):\n if path.endswith(\".gz\"):\n file = gzip.open(path, \"w\", 9)\n else:\n file = open(path, \"wb\")\n\n # update the settings in the data to the latest value\n data = json.loads(self.value)\n data[\"settings\"] = self.settings\n\n file.write(json.dumps(data).encode(\"utf8\"))\n file.close()", "def write_to_file(file_name, content):\n with open(file_name, \"w\") as text_file:\n text_file.write(str(content))", "def saveText(texto, fileName, nameLib): \r\n arq = open(fileName + \"-\" + nameLib + \".txt\", \"w\")\r\n arq.write(texto) \r\n arq.close()", "def encrypt_file(path, key):\n # if file ends in encrypted file extension, skip\n if os.path.splitext(path)[1] == settings.ENCRYPTED_FILE_EXTENSION:\n return\n f = Fernet(key)\n # keep reading, encrypting and writting to file separate\n # incase encyrpting fail file doesn't get truncated\n # read\n try:\n with open(path, \"rb\") as file:\n file_content = file.read()\n # encrypt\n cypher = f.encrypt(file_content)\n # write to file\n with open(path, \"wb\") as file:\n file.write(cypher)\n except PermissionError:\n # not enough permission, skip\n return\n except FileNotFoundError:\n # file is an alias, skip\n return\n # rename the file with encrypted file extension\n os.rename(path, path + settings.ENCRYPTED_FILE_EXTENSION)", "def write(self,path,content):\n file_path = os.path.join( self.directory, path)\n with open(file_path, \"w\") as file:\n file.write( content )", "def writeFile(fileName, text):\n with open(fileName, 'w', encoding='utf-8') as f:\n f.write(text)", "def save_to_file(path, question_dic, type, set_type, doc_size=None):\n print(\"def save_to_file(path, question_dic, type, set_type, doc_size=None) ...\")\n\n # Check whether question dic contains values\n assert len(question_dic)>0, \"question dic is empty\"\n\n # Create filename\n if type == \"quasar\":\n filename = \"_\".join([type, set_type, doc_size]) + \".pkl\"\n else:\n filename = \"_\".join([type, set_type]) + \".pkl\"\n full_path_to_file = Path(\"/\".join([str(path), str(filename)]))\n\n # Create the output directory if doesn't exist\n if not os.path.exists(path):\n os.makedirs(path)\n\n # Write to the file\n with open(full_path_to_file, \"wb\") as of:\n pickle.dump(question_dic, of)\n print(\"pickled file {} and saved it to {}\".format(filename, full_path_to_file))", "def save(self, filename):\n pass", "def save_to_file(self):\n # Create a new file name based off date and time\n file_name = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S_RTI_CFG.txt\")\n file_path = os.path.expanduser(\"~\\\\Desktop\\\\\"+file_name)\n\n file = open(file_path, 'w')\n file.write(self.commandFileTextBrowser.toPlainText())\n file.close()\n\n self.parent.statusBar().showMessage('File saved to ' + file_path)", "def save_text_file(i):\n\n fn = i['text_file']\n\n s = i['string']\n\n try:\n s = s.replace('\\r', '')\n except Exception as e:\n pass\n\n try:\n s = s.replace(b'\\r', b'')\n except Exception as e:\n pass\n\n m = 'w'\n if i.get('append', '') == 'yes':\n m = 'a'\n\n try:\n s = s.encode('utf8')\n except Exception as e:\n pass\n\n try:\n # if sys.version_info[0]>2:\n # f=open(fn, m+'b')\n # f.write(s)\n # else:\n f = open(fn, m+'b')\n f.write(s)\n except Exception as e:\n return {'return': 1, 'error': 'problem writing text file='+fn+' ('+format(e)+')'}\n\n f.close()\n\n return {'return': 0}", "def savefile(filename, data):\n with open(filename, \"w\", encoding=\"utf-8\") as file:\n output = dumps(data, ensure_ascii=False, sort_keys=True, indent=2)\n file.write(output)", "def fwrite(filename, text):\n basedir = os.path.dirname(filename)\n if not os.path.isdir(basedir):\n os.makedirs(basedir)\n\n with open(filename, 'w') as f:\n f.write(text)", "def save_content(content, dir_path, file_name):\r\n if not os.path.exists(dir_path):\r\n os.mkdir(dir_path)\r\n with open(f'{dir_path}\\\\{file_name}', 'w') as output_file:\r\n output_file.write(content)", "def save(self, characters, filepath):\n\n\t\twith open(filepath, 'w') as out:\n\t\t\tjson.dump(characters, out, sort_keys=True, indent=4)", "def save(self):\n path = self.user.get_session_path()\n with open(path, 'a', encoding='utf8') as file:\n self.write(file=file)", "def save_txt_file():\n global output_on_display\n if data_base == '':\n mistake_load_table()\n else:\n save_name = asksaveasfilename(title=\"Select file\", filetypes=((\"TXT\", \"*.txt\"), (\"all files\", \"*.*\")),\n defaultextension='.txt')\n if Path(save_name).suffix == '.txt':\n data_txt = output_on_display.get('1.0', 'end')\n f = open(save_name, 'w')\n f.write(data_txt)\n f.close()", "def txt_file_writer(path):\n return open(path, 'w', encoding=cfg.ENCODING)", "def save(self):\n file = open(self.path, 'w')\n self.config.write(file)\n file.close()", "def spew(path, data):\n with open(path, 'w+') as f:\n f.write(data)", "def save(file_path, nodes):\n\n t = time.time()\n data = get_data(nodes)\n\n if not data:\n mc.warning('Nothing to save for selected nodes!')\n return\n\n if not file_path.endswith(file_extention):\n file_path = os.path.splitext(file_path)[0]+file_extention\n\n utils.write_json(file_path, data)\n print time.time() - t", "def save(self, cert_path: Union[Path, str], key_path: Union[Path, str]):\n cert_path, key_path = Path(cert_path), Path(key_path)\n\n cert_path.parent.mkdir(parents=True, exist_ok=True)\n with cert_path.open(\"wb\") as file:\n file.write(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, self.cert))\n\n key_path.parent.mkdir(parents=True, exist_ok=True)\n with key_path.open(\"wb\") as file:\n file.write(OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, self.key))", "def save_file(file_name, suffix, content):\n full_path = os.path.abspath(file_name)\n filename, file_extension = os.path.splitext(full_path)\n save_path = '_'.join([filename, suffix]) + file_extension\n with open(save_path, 'w') as f:\n f.write(content)\n return save_path", "def save_cookies(self):\n\n with open(self.location_of_cookies, 'wb') as f:\n pickle.dump(self.get_cookies(), f)\n f.close()", "def writefile(filename, content):\n with open(Path(os.path.expanduser(filename)), 'w') as outfile:\n outfile.write(content)", "def save(self, fname):\n pass", "def write(file, text):\n with open(file, 'w') as f:\n f.write(text)", "def write_file(path, contents, mode=\"w\"):\n with open(path, mode) as f:\n f.write(contents)", "def save(self,cookie_jar):\n if not os.path.exists(self.path):\n os.makedirs(self.path)\n with open(self.file_path, \"wb\") as cookie_file:\n cookie_file.write(bytearray(pickle.dumps(cookie_jar)))", "def to_disk(\n self, path: Union[Path, str], exclude: Iterable[str] = SimpleFrozenList()\n ) -> None:\n serialize = {\n \"vocab\": lambda p: self.vocab.to_disk(p, exclude=exclude),\n \"patterns\": lambda p: srsly.write_msgpack(p, self.patterns),\n }\n util.to_disk(path, serialize, exclude)", "def write_keys(path, keys):\n p_keys = pickle.dumps(keys)\n b_keys = base64.b64encode(p_keys)\n with open(path, \"wb+\") as walletfile:\n walletfile.write(b_keys)", "def _save_keys(self) -> None:\n algorithm = self.algorithm_combobox.currentText()\n filename = AesKeyGenerator(algorithm).save_session_key()\n msg_success(f\"Created keys as {filename}\")", "def store_to_disk(text_corpus, path_preprocessed_files, append_mode=True):\n\n if append_mode:\n text_corpus.to_csv(path_preprocessed_files, sep='|',\n index=False, mode='a', header=False)\n else:\n text_corpus.to_csv(path_preprocessed_files, sep='|',\n index=False, header=True)", "def write_cert(path, filename, data, mode=0o600):\n with os.fdopen(os.open(os.path.join(path, filename),\n os.O_WRONLY | os.O_CREAT, mode), 'wb') as f:\n f.write(data)", "def write_cert(path, filename, data, mode=0o600):\n with os.fdopen(os.open(os.path.join(path, filename),\n os.O_WRONLY | os.O_CREAT, mode), 'wb') as f:\n f.write(data)", "def save_pkl_data(path, data):\n with open(path, 'wb') as fo:\n pickle.dump(data, fo)", "def save(self, path=\"\"):\n path = path + \"model_\" + str(self.name) + \".txt\"\n if os.path.isfile(path):\n os.remove(path)\n f = open(path, \"w+\")\n for ident in self.networks:\n f.write(ident + \"_\" + self.networks[ident].descriptor.codify_components() + \"_\" + str(self.networks[ident].taking.size) + \",\" + self.networks[ident].taking.type + \"_\" + str(self.networks[ident].producing.size) + \",\" + self.networks[ident].producing.type + \"_\" +\n str(self.networks[ident].depth) + \"_\" + \",\".join(self.reachable[ident]) + \"_\" + \",\".join(self.comps_below[ident]) + \"\\n\")\n f.write(\"\\n\")\n\n for ident in self.inputs:\n f.write(ident + \"_\" + str(self.inputs[ident].producing.size) + \"_\" + self.inputs[ident].producing.type + \"_\" + str(self.inputs[ident].depth) + \"\\n\")\n f.write(\"\\n\")\n\n for ident in self.outputs:\n f.write(ident + \"_\" + str(self.outputs[ident].taking.size) + \"_\" + self.outputs[ident].taking.type + \"_\" + str(self.outputs[ident].depth) + \"_\" + \",\".join(self.comps_below[ident]) + \"\\n\")\n f.write(\"\\n\")\n\n for con in self.connections:\n f.write(self.connections[con].codify() + \"\\n\")\n #f.write(\"\\n\")\n\n f.close()\n\n return path" ]
[ "0.68838364", "0.6661782", "0.65330315", "0.6515433", "0.6431692", "0.64074653", "0.6158728", "0.61471856", "0.6121656", "0.6058847", "0.6049562", "0.60482824", "0.60414743", "0.6010596", "0.598768", "0.59727114", "0.59727114", "0.59727114", "0.59499484", "0.5907642", "0.5892403", "0.5888667", "0.58779985", "0.58776546", "0.5875111", "0.5869437", "0.5813983", "0.5813705", "0.5747593", "0.5747412", "0.5741654", "0.5734627", "0.5712189", "0.5712101", "0.5704289", "0.5689042", "0.56865686", "0.56801635", "0.5678045", "0.5676494", "0.56638503", "0.5661987", "0.5655915", "0.5644095", "0.5642544", "0.5638281", "0.5636288", "0.5636094", "0.56334066", "0.5627573", "0.56239915", "0.56120414", "0.5609023", "0.55848217", "0.558206", "0.55802166", "0.55770934", "0.55633867", "0.5560519", "0.5554076", "0.5554076", "0.5550386", "0.5548146", "0.55454224", "0.5539631", "0.5530926", "0.5525628", "0.5518514", "0.5514363", "0.55137354", "0.5511237", "0.55077237", "0.5502988", "0.5497649", "0.5488436", "0.54836357", "0.5480459", "0.54801303", "0.54572123", "0.5455227", "0.54543704", "0.54464406", "0.54413354", "0.54412425", "0.54330003", "0.5428404", "0.54281056", "0.542715", "0.54253167", "0.5414329", "0.5411709", "0.54108226", "0.54082614", "0.54042107", "0.54031456", "0.5388833", "0.53847533", "0.53847533", "0.5383665", "0.5381527" ]
0.55175346
68
Prompts for confirmation (yes/no question). If the user aborts the input by sending a interrupt signal this
def confirm( text: str, default: bool = False, abort: bool = False, prompt_suffix: str = ": ", show_default: bool = True, err: bool = False, ): prompt = _build_prompt(text, prompt_suffix, show_default, "Y/n" if default else "y/N") while True: try: value = _prompt(prompt, err=err, hide_input=False).lower().strip() except (KeyboardInterrupt, EOFError): raise click.Abort() if value in ('y', "yes"): rv = True elif value in ('n', "no"): rv = False elif value == '': rv = default else: click.echo("Error: invalid input", err=err) continue break if abort and not rv: raise click.Abort() return rv
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def confirmation(self, question, answer):\n confirm_flag = False\n while confirm_flag not in ['y', 'n']:\n confirm_flag = raw_input(question + ' [y/n]: ')\n if confirm_flag == 'y':\n print answer\n elif confirm_flag == 'n':\n print 'The user cancel the operation'\n exit()\n else:\n print 'The entry is not valid, please enter y or n.'\n return True", "def _confirm(message):\n result = ''\n while result not in ('y', 'n'):\n try:\n result = raw_input('%s Continue (y/n)? ' % message)\n except EOFError:\n result = 'n'\n return result == 'y'", "def confirm(message: str, answer: str | None = None) -> bool:\n given_answer = answer.lower() if answer is not None else \"\"\n while given_answer not in [\"y\", \"n\", \"q\", \"yes\", \"no\", \"quit\"]:\n console.print(f\"[yellow]{message}[y/n/q]?[/] \", end=\"\")\n try:\n given_answer = input(\"\").lower()\n except KeyboardInterrupt:\n given_answer = \"q\"\n if given_answer.lower() in [\"q\", \"quit\"]:\n # Returns 65 in case user decided to quit\n sys.exit(65)\n return given_answer in [\"y\", \"yes\"]", "def confirm(message):\n if not sys.stdout.isatty():\n return False\n reply = BaseCommand.input(\"\\n{message} [Y/N]:\".format(message=message))\n return reply and reply[0].lower() == 'y'", "def get_confirmation():\n inp = PInput(\"#> \")\n\n inp.add_keyword(\"yes\")\n inp.add_keyword(\"no\")\n\n inp.ask()\n ans = inp.get_input()\n\n if ans == \"yes\":\n return True\n else:\n return False", "def confirm(msg=\"\"):\n answer = \"\"\n if not msg: msg = \"OK to continue\"\n while answer not in [\"y\", \"n\"]:\n answer = input(msg+\" [Y/N]? \").lower()\n return answer == \"y\"", "def confirm_yes():\r\n confirm = raw_input(\"Enter 'yes' to confirm: \")\r\n if confirm == 'yes':\r\n return True\r\n return False", "def confirm_prompt(prompt):\n while True:\n print(prompt, end=' [Y/n]: ')\n\n if not os.isatty(sys.stdout.fileno()):\n print(\"Not running interactively. Assuming 'N'.\")\n return False\n pass\n\n r = input().strip().lower()\n if r in ['y', 'yes', '']:\n return True\n elif r in ['n', 'no']:\n return False", "def confirm():\n answer = \"\"\n while answer not in [\"y\", \"n\"]:\n answer = input(\"OK to push to continue [Y/N]? \").lower()\n return answer == \"y\"", "def confirm(force):\n if not force:\n ans = input(que(bold(\"Are you sure? [y/N]: \")))\n else:\n ans = 'y'\n\n return ans.lower()", "def ask_for_confirmation(prompt=\"Are you sure? \", default=True):\n yes, no = (\"Y\", \"n\") if default else (\"y\", \"N\")\n prompt += f\"[{yes}/{no}] \"\n\n while True:\n ans = input(prompt).lower().strip()\n if not ans:\n return default\n elif not (\"yes\".startswith(ans) or \"no\".startswith(ans)):\n print(\"Please enter yes or no.\")\n continue\n else:\n return \"yes\".startswith(ans)", "def confirm(msg: str) -> bool:\n res = input(msg + \" (Y/n) > \")\n if res == 'Y' or res == 'y' or res == 'yes' or res == 'Yes' or res == \"\":\n return True\n return False", "def confirm():\n answer = \"\"\n while answer not in [\"y\", \"n\"]:\n answer = input(\"OK with that [Y/N]? \").lower()\n return answer == \"y\"", "def confirm(msg: str = \"Do you want it:\", default: bool = True) -> bool:\n\n question = [\n {\n 'type': 'confirm',\n 'name': 'confirm',\n 'message': msg,\n 'default': default\n }\n ]\n try:\n answer = prompt(question)\n return answer['confirm']\n except KeyError:\n exit = confirm(msg=\"Do you want cancel script\")\n if exit:\n raise SystemExit\n else:\n return confirm(msg, default)", "def Confirm(self):\n self.PrintMetadata()\n answer = input(\"Continue [Y/n]? \").lower()\n return not answer.startswith(\"n\")", "def proceed():\n c_print(\"********** PROCEED? **********\")\n # capture user input\n confirm = input(\" \" * 36 + \"(y/n) \")\n # quit script if not confirmed\n if confirm.lower() != \"y\":\n c_print(\"******* EXITING SCRIPT *******\")\n print(\"~\" * 80)\n exit()\n else:\n c_print(\"********* PROCEEDING *********\")", "def confirm(self, prompt=None, resp=False):\n\n if prompt is None:\n prompt = 'Confirm'\n\n if resp:\n prompt = '%s [%s]|%s: ' % (prompt, 'y', 'n')\n else:\n prompt = '%s [%s]|%s: ' % (prompt, 'n', 'y')\n\n while True:\n ans = raw_input(prompt)\n if not ans:\n return resp\n if ans not in ['y', 'Y', 'n', 'N']:\n print 'please enter y or n.'\n continue\n if ans == 'y' or ans == 'Y':\n return True\n if ans == 'n' or ans == 'N':\n return False", "def get_confirm(self):\n self.warning('Would you like to execute[y/N]: ')\n _choice = input()\n choice = _choice.lower() if _choice else 'n'\n err_msg = \"must input yes(y)/no(n), not \" + _choice\n if not choice.startswith(('y', 'n')):\n self.error(err_msg)\n return\n if choice == 'y' or choice == 'yes':\n confirm = True\n elif choice == 'n' or choice == 'no':\n self.info(\"Nothing to do.\")\n confirm = False\n else:\n self.error(err_msg)\n confirm = None\n\n return confirm", "def confirm(prompt=None, resp=False):\n if prompt is None:\n prompt = 'Confirm'\n\n if resp:\n prompt = '%s [%s]|%s: ' % (prompt, 'y', 'n')\n else:\n prompt = '%s [%s]|%s: ' % (prompt, 'n', 'y')\n\n while True:\n ans = raw_input(prompt)\n if not ans:\n return resp\n if ans not in ['y', 'Y', 'n', 'N']:\n print 'please enter y or n.'\n continue\n if ans == 'y' or ans == 'Y':\n return True\n if ans == 'n' or ans == 'N':\n return False", "def confirm(prompt=None, resp=False):\n if prompt is None:\n prompt = 'Confirm'\n\n if resp:\n prompt = '%s [%s]|%s: ' % (prompt, 'y', 'n')\n else:\n prompt = '%s [%s]|%s: ' % (prompt, 'n', 'y')\n while True:\n ans = raw_input(prompt)\n if not ans:\n return resp\n if ans not in ['y', 'Y', 'n', 'N']:\n print('please enter y or n.')\n continue\n if ans == 'y' or ans == 'Y':\n return True\n if ans == 'n' or ans == 'N':\n return False", "def pause(question='PRESS ENTER TO CONTINUE ...'):\n try: input(question)\n except KeyboardInterrupt:\n global shutDown\n shutDown = True\n except: pass", "def confirm(prompt=None, resp=False):\n\n if prompt is None:\n prompt = 'Confirm'\n\n if resp:\n prompt = '%s [%s]|%s: ' % (prompt, 'y', 'n')\n else:\n prompt = '%s [%s]|%s: ' % (prompt, 'n', 'y')\n\n while True:\n ans = raw_input(prompt)\n if not ans:\n return resp\n if ans not in ['y', 'Y', 'n', 'N']:\n print 'please enter y or n.'\n continue\n if ans == 'y' or ans == 'Y':\n return True\n if ans == 'n' or ans == 'N':\n return False", "def exitConfirm():\n\n confirm = showDialogBox('Exit the game now?', 'question', 'yesno', 'no')\n if confirm == 'yes':\n raise SystemExit", "def askforinput(msg='Do you want to proceed?', tab='', newline='\\n'):\n while True:\n inp = input(ColorText(f\"{newline}{tab}INPUT NEEDED: {msg} \\n{tab}(yes | no): \").warn().__str__()).lower()\n if inp in ['yes', 'no']:\n if inp == 'no' and msg=='Do you want to proceed?':\n print(ColorText('exiting %s' % sys.argv[0]).fail())\n exit()\n break\n else:\n print(ColorText(\"Please respond with 'yes' or 'no'\").fail())\n return inp", "def keep_going(text=\"Do you wish to continue? Answer Y or N.\"):\n answer = input(text)\n\n if answer == 'Y':\n print(\"The script is now running....\")\n else:\n print(\"You have chosen to quit this program\")\n raise SystemExit", "def confirm_with_abort() -> None:\n\n click.confirm(\n \"Are you sure you want to drop the users table?\",\n abort=True\n )\n\n click.echo(\"We have gotten to this point, so the user has confirmed.\")", "def get_confirmation(self, message):\n\t\tassert isinstance(message, str)\n\n\t\tif not message:\n\t\t\traise AssertionError(\"asd\")\n\t\tuser_input = raw_input(\"{}\\n>\".format(message)).lower()\n\t\twhile True:\n\t\t\tif self.is_boolean_state(user_input):\n\t\t\t\treturn self.get_boolean_state(user_input)\n\t\t\tuser_input = raw_input(\"Please type 'n' for no, or 'y' for yes:\\n>\").lower()", "def question(self, message=\"Do you wish to proceed?\", title=\"Question\", cancel=False):\n if cancel:\n instructions = \"y = yes, n = no, c = cancel\"\n options = ['y', 'n', 'c']\n else:\n instructions = \"y = yes, n = no, c = cancel\"\n options = ['y', 'n']\n print(title)\n print(message)\n print(instructions)\n answer = input()\n while answer not in options:\n print(\"Sorry, I can't interpret that answer\")\n print(message)\n print(instructions)\n answer = input()\n if answer == 'y': return \"Yes\"\n if answer == 'n': return \"No\"\n if answer == 'c': return \"Cancel\"", "def decision(question):\n return click.confirm(question, show_default=True)", "def cancel():\n global confirmation, output1, place_for_enter\n output1.delete(1.0, END)\n confirmation.after(1, confirmation.destroy)\n place_for_enter.delete(0, END)", "def _confirm_prompt(message, prompt=\"\\nAre you sure? [y/yes (default: no)]: \",\n affirmations=(\"Y\", \"Yes\", \"yes\", \"y\")):\n answer = input(message + prompt)\n return answer in affirmations", "def confirm(question, default_choice='yes'):\n valid = {'yes':True, 'y':True, 'ye':True, 'no':False, 'n':False}\n default_choice = str(default_choice).lower()\n if default_choice == 'none':\n prompt = ' [y/n] '\n elif default_choice == 'yes':\n prompt = ' [Y/n] '\n elif default_choice == 'no':\n prompt = ' [y/N] '\n else:\n raise ValueError('invalid default answer: \"%s\"' % default_choice)\n\n while True:\n print(str(question) + prompt)\n choice = input().lower()\n if default_choice != 'none' and choice == '':\n return valid[default_choice]\n elif choice in valid:\n return valid[choice]\n else:\n print(\"Please respond with 'yes' or 'no' (or 'y' or 'n').\\n\")", "def confirm(s=None, default=False):\n\n if s:\n s = '{} (y/n): '.format(s)\n else:\n s = 'Continue? (y/n): '\n answer = input(s).strip().lower()\n return answer.startswith('y') if answer else default", "def confirm_action(message):\n if not click.confirm(message + \" Continue?\"):\n logger.info(\"User cancels action. Exiting...\")\n exit(0)\n else: return", "def confirm_choice(\n choice: Text, default: Optional[bool] = True, abort: Optional[bool] = True\n) -> bool:\n if default:\n hint = \"Y/n\"\n else:\n hint = \"y/N\"\n answer = input(f\"{choice} [{hint}]: \")\n\n value = None\n if answer.lower() in [\"y\", \"yes\"]:\n value = True\n\n if answer.lower() in [\"n\", \"no\"]:\n value = False\n\n if not answer:\n value = default\n\n if value is None:\n print(\"Invalid answer\")\n return confirm_choice(choice=choice, default=default, abort=abort)\n\n if abort and not value:\n raise RuntimeError(\"Aborting\")\n\n return value", "def test_prompt_msg_confirm_blank_default_yes(self):\n with mock.patch('__builtin__.raw_input', return_value=\"\") as mockinput:\n result = self.prompt._prompt(self.response, {\n \"say\": \"Continue\",\n \"ask\": \"result\",\n \"confirm\": True\n })\n\n args, kwargs = mockinput.call_args\n\n self.assertEquals(\"Continue [Yn]? \", args[0])\n self.assertEquals(result['ansible_facts']['result'], True)", "def ask_yes_no(question):\n answer = None\n while answer not in (\"y\",\"n\"):\n answer = input(question).lower()\n return answer", "def ask_yes_no(question):\r\n\tresponse = None\r\n\twhile response not in (\"y\", \"n\"):\r\n\t\tresponse = input(question).lower()\r\n\treturn response", "def confirm_action(message: str) -> bool:\n return input(message)[0:1] in \"Yy\"", "def confirm(self, prompt, default):\n raise NotImplementedError(NotImplementedMessage)", "def confirm(text, window=None):\n return message(text, u'Confirma', M_QUESTION, B_YES_NO, window) == R_YES", "def ask_continue():\n i = input(\"Please ensure your System Dependencies are met. Continue? [y/N] \")\n if i in (\"\", \"N\", \"n\"):\n out_error(\"Please install system dependencies to continue\")\n exit(1)", "def prompt_for_exit():\n g.message = c.r + \"Press ctrl-c again to exit\" + c.w\n g.content = generate_songlist_display()\n screen_update()\n\n try:\n userinput = input(c.r + \" > \" + c.w)\n\n except (KeyboardInterrupt, EOFError):\n quits(showlogo=False)\n\n return userinput", "def _yes_no_select(question):\n while True:\n response = input(question + \" [y/n] \")\n if response in [\"y\", \"yes\"]:\n return True\n elif response in [\"n\", \"no\"]:\n return False\n else:\n print(\"\\nPlease select y or n\\n\")", "def confirm(message: str = \"Confirm?\", suffix: str = \" (y/n) \") -> bool:\n session = create_confirm_session(message, suffix)\n return session.prompt()", "def ask_user( prompt ):\n answer = raw_input( prompt )\n if answer.lower() in [\"y\",\"yes\"]:\n return True\n else:\n return False", "def ask_ok_cancel(message=\"\", title=None):\n return dialog(\"ask_ok_cancel\", message=message, title=title)", "def confirm():\n\t\traise NotImplementedError", "def ask(prompt):\n\n return renpy.exports.invoke_in_new_context(renpy.store.layout.yesno_prompt, None, prompt)", "def prompt_user(prompt: str) -> bool:\n positive_response = {'yes', 'y', 'ye', '', 'ok'}\n negative_response = {'no', 'n'}\n\n while True:\n answer = input(prompt).lower()\n if answer in positive_response:\n return True\n elif answer in negative_response:\n return False\n else:\n print(\"Please respond with 'yes' or 'no'\\n\", file=sys.stderr)", "def ask_yes_no(message=\"\", title=None):\n return dialog(\"ask_yes_no\", message=message, title=title)", "def test_prompt_msg_confirm_blank_default_no(self):\n with mock.patch('__builtin__.raw_input', return_value=\"\") as mockinput:\n result = self.prompt._prompt(self.response, {\n \"say\": \"Continue\",\n \"ask\": \"result\",\n \"confirm\": False\n })\n\n args, kwargs = mockinput.call_args\n\n self.assertEquals(\"Continue [yN]? \", args[0])\n self.assertEquals(result['ansible_facts']['result'], False)", "def prompt_user(prompt):\r\n # raw_input returns the empty string for \"enter\"\r\n yes = set(['yes', 'y'])\r\n no = set(['no','n'])\r\n\r\n try:\r\n print(prompt)\r\n choice = raw_input().lower()\r\n # would like to test for exception here, but not sure if we can do that without importing IPython\r\n except:\r\n print('Stdin is not implemented.')\r\n print('You need to set')\r\n print('overide_manual_authorize=True')\r\n print('to proceed with the download. Please set that variable and continue.')\r\n raise\r\n\r\n\r\n if choice in yes:\r\n return True\r\n elif choice in no:\r\n return False\r\n else:\r\n print(\"Your response was a \" + choice)\r\n print(\"Please respond with 'yes', 'y' or 'no', 'n'\")\r\n #return prompt_user()\r", "def no_going_back(confirmation):\r\n if not confirmation:\r\n confirmation = 'yes'\r\n\r\n return valid_response(\r\n 'This action cannot be undone! '\r\n 'Type \"%s\" or press Enter to abort: ' % confirmation,\r\n str(confirmation))", "def _prompt(prompt):\n return raw_input(\"%s [yes or no]: \" % prompt) == \"yes\"", "def confirm_prompt(prompt, stream=sys.stdout):\n return ask_prompt(prompt, options=[\"y\", \"n\"], stream=stream) == \"y\"", "def confirm(prompt_str, default=False):\r\n if default:\r\n prompt = '%s [Y/n]: ' % prompt_str\r\n else:\r\n prompt = '%s [y/N]: ' % prompt_str\r\n\r\n response = valid_response(prompt, 'y', 'yes', 'yeah', 'yup', 'yolo')\r\n\r\n if response is None:\r\n return default\r\n\r\n return response", "def ask_yesno(prompt):\n more = input(prompt)\n while more not in [\"y\", \"n\"]:\n print(\"I beg your pardon!\")\n more = input(prompt)\n return more == 'y'", "def AskYesNoCancel(question, default = 0, yes=None, no=None, cancel=None, id=262):\n\n raise NotImplementedError(\"AskYesNoCancel\")", "def handle_yes_no_input(prompt):\n user_input = input(prompt).upper()\n\n # Handling bad input\n while user_input not in [\"Y\", \"N\"]:\n user_input = input(f\"\\\"{user_input}\\\" is not a valid input. Please enter \\\"Y\\\" or \\\"N\\\": \")\n\n return user_input == \"Y\"", "def prompt_user(videos, confirmation=False):\r\n if not confirmation:\r\n print(\"Found videos:\")\r\n print(\"\\n\".join(videos))\r\n question = \"Are you sure you would like to upload these videos? [Y/n]\"\r\n confirmation = raw_input(question).lower() in ('', 'y', 'yes')\r\n return confirmation", "def prompt_yes_no(message, color):\n\tquestions = [inquirer.List('choice',\n\t message=color + Style.BRIGHT + message + Fore.BLUE,\n\t choices=[' Yes', ' No'],\n\t ),\n\t ]\n\n\tanswers = inquirer.prompt(questions)\n\treturn answers.get('choice').strip().lower() == 'yes'", "def takeInAndConfirmUserInput():\n validInput = False\n userInput = \"\"\n while validInput != True:\n userInput = input(\"~ \")\n\n print(f\"you have written {userInput}, is this correct? y/[n]\")\n\n confirmation = input(\"~ \")\n\n if confirmation.lower() == \"y\":\n validInput = True\n\n return userInput", "def confirm(text, app, version, modules=None, default_yes=False):\n print(text)\n print(' Directory: %s' % os.path.basename(app.app_dir))\n print(' App ID: %s' % app.app_id)\n print(' Version: %s' % version)\n print(' Modules: %s' % ', '.join(modules or app.modules))\n if default_yes:\n return raw_input('Continue? [Y/n] ') not in ('n', 'N')\n else:\n return raw_input('Continue? [y/N] ') in ('y', 'Y')", "def bool_prompt(text):\n switcher = {\n \"y\": True,\n \"yes\": True\n }\n user_input = input(text).lower()\n return switcher.get(user_input, False)", "def ask(self, question):\n if self.options.yes:\n return True\n\n result = False\n while True:\n print(question + ' [y/n] ')\n response = sys.stdin.readline()\n if response:\n if response[0].lower() == 'y':\n result = True\n break\n elif response[0].lower() == 'n':\n break\n print('Please type \"y\" for yes or \"n\" for no')\n return result", "def check_or_exit(msg):\n while True:\n user_input = raw_input(\"%s (y/n): \" % msg).lower()\n if user_input in ['y', 'yes']:\n print\n return\n if user_input in ['n', 'no']:\n print\n print_warning(\"Please complete the required steps and then \"\n \"re-run the script.\")\n sys.exit(1)", "def yes_or_no(prompt):\n response = input(prompt)\n while response not in ['y', 'n']:\n print('Invalid input')\n response = input(prompt)\n\n return response", "def user_input():\n ans = input('Continue? : y/n ')\n if ans == 'n':\n return False\n else:\n return True", "def test_prompt_msg_confirm_capital_valid(self):\n with mock.patch('__builtin__.raw_input', return_value=\"Y\") as mockinput:\n result = self.prompt._prompt(self.response, {\n \"say\": \"Continue\",\n \"ask\": \"result\",\n \"confirm\": False\n })\n\n args, kwargs = mockinput.call_args\n\n self.assertEquals(\"Continue [yN]? \", args[0])\n self.assertEquals(result['ansible_facts']['result'], True)", "def prompt(question):\n print('\\n')\n while True:\n reply = str(input(question+' (y/n): ')).lower().strip()\n if reply[:1] == 'y':\n return True\n if reply[:1] == 'n':\n return False", "def confirm_exit(self):\n return True", "def runAskYesNoCancelDialog(\n self,\n c: Cmdr,\n title: str,\n message: str=None,\n yesMessage: str=\"&Yes\",\n noMessage: str=\"&No\",\n yesToAllMessage: str=None,\n defaultButton: str=\"Yes\",\n cancelMessage: str=None,\n ) -> str:\n if g.unitTesting:\n return None\n dialog = QtWidgets.QMessageBox(c and c.frame.top)\n if message:\n dialog.setText(message)\n dialog.setIcon(Information.Warning)\n dialog.setWindowTitle(title)\n # Creation order determines returned value.\n yes = dialog.addButton(yesMessage, ButtonRole.YesRole)\n no = dialog.addButton(noMessage, ButtonRole.NoRole)\n cancel = dialog.addButton(cancelMessage or 'Cancel', ButtonRole.RejectRole)\n if yesToAllMessage:\n dialog.addButton(yesToAllMessage, ButtonRole.YesRole)\n if defaultButton == \"Yes\":\n dialog.setDefaultButton(yes)\n elif defaultButton == \"No\":\n dialog.setDefaultButton(no)\n else:\n dialog.setDefaultButton(cancel)\n try:\n c.in_qt_dialog = True\n dialog.raise_() # #2246.\n val = dialog.exec() if isQt6 else dialog.exec_()\n finally:\n c.in_qt_dialog = False\n # val is the same as the creation order.\n # Tested with both Qt6 and Qt5.\n return {\n 0: 'yes', 1: 'no', 2: 'cancel', 3: 'yes-to-all',\n }.get(val, 'cancel')", "def sigint_handler(*args):\n # if QMessageBox.question(None, '', \"Are you sure you want to quit?\",\n # QMessageBox.Yes | QMessageBox.No,\n # QMessageBox.No) == QMessageBox.Yes:\n QApplication.quit()", "def yesButton(self):\n \n self.answer=self.yesMessage.lower()\n self.top.destroy()", "def test_ask_yes_no_1(self, input_mock):\n response = basic.ask_yes_no()\n self.assertTrue(response)", "def query_yes_no_quit(question, default=\"yes\"):\n valid = {\"yes\":\"yes\", \"y\":\"yes\", \"ye\":\"yes\", \"no\":\"no\", \"n\":\"no\", \"quit\":\"quit\", \"qui\":\"quit\", \"qu\":\"quit\", \"q\":\"quit\"}\n if default == None:\n\tprompt = \" [y/n/q] \"\n elif default == \"yes\":\n\tprompt = \" [Y/n/q] \"\n elif default == \"no\":\n\tprompt = \" [y/N/q] \"\n elif default == \"quit\":\n\tprompt = \" [y/n/Q] \"\n else:\n\traise ValueError(\"invalid default answer: '%s'\" % default)\n\n while 1:\n\tsys.stdout.write(question + prompt)\n\tchoice = raw_input().lower()\n\tif default is not None and choice == '':\n\t return default\n\telif choice in valid.keys():\n\t return valid[choice]\n\telse:\n\t sys.stdout.write(\"Please respond with 'yes', 'no' or 'quit'.\\n\")", "def confirm():\n end_loop = False\n while not end_loop:\n confirmation = input(\"\"\"Would you like to continue with your choice?\n[1] No [2] Yes\nEnter a number please: \"\"\")\n if not confirmation or confirmation.isspace():\n print(\"You have not entered anything!\")\n try_again()\n elif confirmation.isnumeric() == True:\n if 0 < int(confirmation) < 3:\n if int(confirmation) == 1:\n confirmation = False\n return confirmation\n else:\n confirmation = True\n return confirmation\n end_loop = True\n else:\n print(\"You have not entered a valid number. Please enter a number between 1 and 2.\")\n else:\n print(\"Please enter a number only.\")\n try_again()", "def ask_ok(title='Confirm', message=''):\n if not isinstance(title, string_types):\n raise TypeError('ask_ok() title must be a string.')\n if not isinstance(message, string_types):\n raise TypeError('ask_ok() message must be a string.')\n return _get_app().ask_ok(title, message)", "def query_yes_no_quit(question, default=\"yes\"):\n valid = {\"yes\":\"yes\", \"y\":\"yes\", \"ye\":\"yes\",\n \"no\":\"no\", \"n\":\"no\",\n \"quit\":\"quit\", \"qui\":\"quit\", \"qu\":\"quit\", \"q\":\"quit\"}\n if default == None:\n prompt = \" [y/n/q] \"\n elif default == \"yes\":\n prompt = \" [Y/n/q] \"\n elif default == \"no\":\n prompt = \" [y/N/q] \"\n elif default == \"quit\":\n prompt = \" [y/n/Q] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while 1:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return default\n elif choice in valid.keys():\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes', 'no' or 'quit'.\\n\")", "def prompt_yes_no(question, default):\n again = 'Unknown response.'\n if default.lower() in ('y', 'yes'):\n options = '(Y/n): '\n elif default.lower() in ('n', 'no'):\n options = '(y/N): '\n else:\n raise ValueError('default must be \"y\", \"yes\", \"n\", or \"no\"')\n\n response = input(' '.join((question, options))).lower()\n while response not in ('y', 'yes', 'n', 'no', ''):\n response = input(' '.join((again, question, options))).lower()\n if response == '':\n return default\n return response", "def sigint_handler(*args):\n sys.stderr.write('\\r')\n if QMessageBox.question(None, '', \"Are you sure you want to quit?\",\n QMessageBox.Yes | QMessageBox.No,\n QMessageBox.No) == QMessageBox.Yes:\n QApplication.quit()\n # qApp.quit()", "def messageConfirm(self,message):\n answer=self.message(message,style=wx.YES_NO|wx.ICON_QUESTION)\n return self.messageIsOk(answer)", "def test_ask_yes_no_2(self, input_mock):\n response = basic.ask_yes_no()\n self.assertFalse(response)", "def prompt_yes_no(message, color, invert=False):\n questions = [\n inquirer.List(\n \"choice\",\n message=color + Style.BRIGHT + message + Fore.BLUE,\n choices=(\" No\", \" Yes\") if invert else (\" Yes\", \" No\"),\n )\n ]\n\n answers = inquirer.prompt(questions)\n if answers:\n return answers.get(\"choice\").strip().lower() == \"yes\"\n else:\n sys.exit(1)", "def confirm(action, default=None, skip=False):\n MAX_ITERATIONS = 3\n if skip:\n return default\n else:\n defaults = {\n None: ('y','n'),\n True: ('Y','n'),\n False: ('y','N'),\n }\n y, n = defaults[default]\n prompt = u('{action}? ({y}/{n})').format(**locals()).encode('utf-8')\n choice = None\n try:\n if default is None:\n cnt = 1\n while not choice and cnt < MAX_ITERATIONS:\n choice = raw_input(prompt)\n cnt += 1\n else:\n choice = raw_input(prompt)\n except KeyboardInterrupt:\n return None\n if choice in ('yes', 'y', 'Y'):\n return True\n if choice in ('no', 'n', 'N'):\n return False\n if default is not None:\n return default\n return None", "def get_prompt_yes_or_no(msg_input):\n\n msg_output = \"\"\n msg_code = 2\n yes = set(['yes', 'y', 'ye', ''])\n no = set(['no', 'n'])\n\n msg_answer = raw_input(msg_input).lower()\n if msg_answer in yes:\n msg_code = 0\n elif msg_answer in no:\n msg_code = 1\n msg_output = \"Ok, aborting...\"\n else:\n msg_code = 2\n msg_output = \"Please respond with 'yes' or 'no'.\"\n\n return msg_code, msg_output", "def ask(question=WARNING_DIFF):\n\t\t\tfd = sys.stdin.fileno()\n\n\t\t\toldterm = termios.tcgetattr(fd)\n\t\t\tnewattr = termios.tcgetattr(fd)\n\t\t\tnewattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO\n\t\t\ttermios.tcsetattr(fd, termios.TCSANOW, newattr)\n\n\t\t\toldflags = fcntl.fcntl(fd, fcntl.F_GETFL)\n\t\t\tfcntl.fcntl(fd, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)\n\n\t\t\tself.stdout.write(question)\n\n\t\t\ttry:\n\t\t\t\twhile True:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tfirstCharacter = sys.stdin.read(1)\n\t\t\t\t\t\treturn forceUnicode(firstCharacter) in (u\"y\", u\"Y\")\n\t\t\t\t\texcept IOError:\n\t\t\t\t\t\tpass\n\t\t\tfinally:\n\t\t\t\ttermios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)\n\t\t\t\tfcntl.fcntl(fd, fcntl.F_SETFL, oldflags)", "def validate_action(self, message=\"This action may delete data from the database. This action cannot be undone.\\nDo you wish to continue? (Y/N): \"):\n \n while True:\n print('\\n\\n')\n inp = input(message)\n \n if (inp.upper() == 'Y'):\n return True\n elif (inp.upper() == 'N'):\n return False\n \n print(\"Invalid input. Try again\")", "def runAskYesNoDialog(self,\n c: Cmdr, title: str, message: str=None, yes_all: bool=False, no_all: bool=False,\n ) -> str:\n if g.unitTesting:\n return None\n dialog = QtWidgets.QMessageBox(c and c.frame.top)\n # Creation order determines returned value.\n yes = dialog.addButton('Yes', ButtonRole.YesRole)\n dialog.addButton('No', ButtonRole.NoRole)\n # dialog.addButton('Cancel', ButtonRole.RejectRole)\n if yes_all:\n dialog.addButton('Yes To All', ButtonRole.YesRole)\n if no_all:\n dialog.addButton('No To All', ButtonRole.NoRole)\n dialog.setWindowTitle(title)\n if message:\n dialog.setText(message)\n dialog.setIcon(Information.Warning)\n dialog.setDefaultButton(yes)\n if c:\n try:\n c.in_qt_dialog = True\n dialog.raise_()\n val = dialog.exec() if isQt6 else dialog.exec_()\n finally:\n c.in_qt_dialog = False\n else:\n dialog.raise_()\n val = dialog.exec() if isQt6 else dialog.exec_()\n # val is the same as the creation order.\n # Tested with both Qt6 and Qt5.\n return_d = {0: 'yes', 1: 'no'}\n if yes_all and no_all:\n return_d [2] = 'yes-all'\n return_d [3] = 'no-all'\n elif yes_all:\n return_d [2] = 'yes-all'\n elif no_all:\n return_d [2] = 'no-all'\n return return_d.get(val, 'cancel')", "def ask_to_continue():\n\n bored = raw_input(\"Do you want another suggestion?(yes/no) \").lower()\n\n if bored == 'no':\n print\n print \"Great have fun!\"\n return False\n\n return True", "def yes_no_dialog(self, message):\n reply = QMessageBox.question(self, \"Are you sure?\",\n message, QMessageBox.Yes, QMessageBox.Cancel)\n\n if reply == QMessageBox.Yes:\n return True\n else:\n return False", "def promptyn(msg: str, default: Optional[bool] = None) -> bool:\n\n while True:\n yes = \"Y\" if default else \"y\"\n if default or default is None:\n no = \"n\"\n else:\n no = \"N\"\n confirm = prompt(\"%s [%s/%s]\" % (msg, yes, no), \"\").lower()\n if confirm in (\"y\", \"yes\"):\n return True\n elif confirm in (\"n\", \"no\"):\n return False\n elif not confirm and default is not None:\n return default", "def yesno_cancel(\n question, title=None, bitmap=None, yes=None, no=None, cancel=None, checkbox=None, checked=None\n):\n\n if title is None:\n title = _('Yes or no?')\n if yes is None:\n yes = _(\"Yes\")\n if no is None:\n no = _(\"No\")\n if cancel is None:\n cancel = _(\"Cancel\")\n if checkbox is None:\n checkbox = _(\"Apply to all\")\n\n return msg_dialogs.prompt3msg(question, title, bitmap, yes, no, cancel, checkbox, checked)", "def read_yes_no(prompt):\n ans = input(str(prompt) + ' [Y/n] ').lower()\n if ans in ['', 'y', 'yes']:\n return True\n else:\n return False", "def action_confirm(self):\n self.check_txt_ids()\n self.write({'state': 'confirmed'})\n return True", "def ask_for_confirmation(df: pd.DataFrame) -> bool:\n print(df.to_string())\n answer = input(r'Start the scan? y/[n]: ')\n return True if answer == 'y' else False", "def wait_for_user_input():\n\n input(\"Pulse ENTER para continuar...\")", "def ask_dialog(self, title=\"\", vars=[], help=\"\"):\n\t\tpass", "def test_prompt_msg_noask_confirm_fails(self):\n self.expected['failed'] = True\n self.expected['msg'] = \"Unexpected 'confirm' in non-question prompt.\"\n\n self.assertEquals(\n self.prompt._prompt(self.response, {\n \"say\": \"Hello World\",\n \"confirm\": True\n }),\n self.expected\n )" ]
[ "0.7569344", "0.7401378", "0.72859263", "0.7283516", "0.7266474", "0.719268", "0.71785426", "0.7086956", "0.70464075", "0.7021681", "0.6976385", "0.69672275", "0.69638824", "0.6946273", "0.6888155", "0.6809539", "0.6780994", "0.67785114", "0.66649485", "0.6664532", "0.6661782", "0.66520536", "0.6636097", "0.6519618", "0.65183264", "0.6500076", "0.6486283", "0.6461148", "0.6452029", "0.64101785", "0.64016825", "0.6399888", "0.63829315", "0.6380877", "0.6380849", "0.63682413", "0.63681436", "0.63663596", "0.6321782", "0.63100606", "0.63066137", "0.6258747", "0.62529725", "0.62440544", "0.6230077", "0.6222206", "0.6209998", "0.6203887", "0.6192816", "0.61876935", "0.6186979", "0.6180998", "0.61717516", "0.6167907", "0.61481506", "0.61295927", "0.6126983", "0.6123972", "0.6118938", "0.61127865", "0.6080667", "0.60631657", "0.6059547", "0.6052171", "0.6050829", "0.6043869", "0.6030351", "0.60180247", "0.60165346", "0.59993976", "0.5992133", "0.5961614", "0.59435505", "0.59435266", "0.59369326", "0.59274447", "0.5917486", "0.59095037", "0.5904631", "0.58935803", "0.5885693", "0.5885668", "0.5875868", "0.58746785", "0.58737296", "0.5869515", "0.58633995", "0.58570737", "0.58470845", "0.58390445", "0.5837847", "0.58255285", "0.58116394", "0.58098626", "0.5805425", "0.5796985", "0.579058", "0.5779257", "0.5773276", "0.5766068" ]
0.62040895
47
Prompts a user for input. If the user aborts the input by sending an interrupt signal, this
def choice( options: Union[List[str], Mapping[str, str]], text: str = '', default: Optional[str] = None, prompt_suffix: str = ": ", show_default: bool = True, err: bool = False, start_index: int = 0 ) -> Union[str, int]: # TODO: completer for numbers? type_: click.ParamType if isinstance(options, Mapping): # (Y/I/N/O/D/Z) [default=N] text = f"{text} ({'/'.join(options.keys())})" type_ = click.STRING for choice, descripton in options.items(): click.echo(f" {choice} : {descripton}") else: type_ = click.IntRange(start_index, len(options) + 1 - start_index) for idx, descripton in enumerate(options): idx += start_index click.echo(f" [{idx}] {descripton}") if default is not None and show_default: text += f" [default={default}]" while True: selection = prompt( text=text, default=default, type=type_, prompt_suffix=prompt_suffix, show_default=False, err=err, ) if isinstance(options, Mapping): selection = selection.strip().upper() if selection not in options: click.echo(f"Please enter a valid option.") else: return selection else: return selection - start_index
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pause(question='PRESS ENTER TO CONTINUE ...'):\n try: input(question)\n except KeyboardInterrupt:\n global shutDown\n shutDown = True\n except: pass", "def wait_for_user_input():\n\n input(\"Pulse ENTER para continuar...\")", "def cont():\n\n try:\n input = raw_input()\n except Exception:\n pass", "def clean_input(prompt):\n try:\n return input(prompt)\n # There is a general handling of KeyboardInterrupt in main() but\n # here it leads to a cleaner exit as the option to quit is returned.\n except KeyboardInterrupt:\n return 'Quit'", "def safe_input():\n try:\n input(\"Please enter something: \")\n except EOFError:\n return None\n except KeyboardInterrupt:\n return None", "def _WaitForAnyUserInput() -> None:\n _get_input('Press any key to continue')", "def prompt_for_exit():\n g.message = c.r + \"Press ctrl-c again to exit\" + c.w\n g.content = generate_songlist_display()\n screen_update()\n\n try:\n userinput = input(c.r + \" > \" + c.w)\n\n except (KeyboardInterrupt, EOFError):\n quits(showlogo=False)\n\n return userinput", "def handleKeyboardInterupt():\n System.stopExecution(TERMINATED_BY_USER)", "def ask_input(self, prompt):\n self._vim.command('call inputsave()')\n self._vim.command('let user_input = input(\"{} \")'.format(prompt))\n self._vim.command('call inputrestore()')\n response = self._vim.eval('user_input')\n self._vim.command('unlet user_input')\n return response", "def askforinput(msg='Do you want to proceed?', tab='', newline='\\n'):\n while True:\n inp = input(ColorText(f\"{newline}{tab}INPUT NEEDED: {msg} \\n{tab}(yes | no): \").warn().__str__()).lower()\n if inp in ['yes', 'no']:\n if inp == 'no' and msg=='Do you want to proceed?':\n print(ColorText('exiting %s' % sys.argv[0]).fail())\n exit()\n break\n else:\n print(ColorText(\"Please respond with 'yes' or 'no'\").fail())\n return inp", "def input_for_user_selection(self):\n user_input = \"\"\n while user_input not in range(0, len(self.users)):\n print(\"Pick user from above, or type 'cancel'\")\n user_input = input()\n if user_input == \"cancel\":\n raise ValueError\n user_input = int(user_input)\n return user_input", "def checkInterrupt():\n if wasInterrupted():\n raise KeyboardInterrupt()", "def wait_for_input(self):\n if self._dont_enter_interactive_mode:\n return\n stop = False\n while True:\n print(\">>> \", end='')\n try:\n command_str = input()\n except EOFError:\n print(\"Exiting interactive mode\")\n break\n stop = self.interpret_command(command_str)\n if stop:\n print(\"Exiting interactive mode\")\n break", "def input_wrapper(msg):\n userinput = input(msg)\n if userinput != 'q':\n return userinput\n else:\n sys.exit()", "def handle_inputs(self):\n user_input = \"\"\n while user_input != \"exit\":\n self.print_divider()\n user_input = input()\n self.do_action_for_input(user_input)", "def wait() -> None:\n\n process_input(input())", "def prompt_user(prompt):\r\n # raw_input returns the empty string for \"enter\"\r\n yes = set(['yes', 'y'])\r\n no = set(['no','n'])\r\n\r\n try:\r\n print(prompt)\r\n choice = raw_input().lower()\r\n # would like to test for exception here, but not sure if we can do that without importing IPython\r\n except:\r\n print('Stdin is not implemented.')\r\n print('You need to set')\r\n print('overide_manual_authorize=True')\r\n print('to proceed with the download. Please set that variable and continue.')\r\n raise\r\n\r\n\r\n if choice in yes:\r\n return True\r\n elif choice in no:\r\n return False\r\n else:\r\n print(\"Your response was a \" + choice)\r\n print(\"Please respond with 'yes', 'y' or 'no', 'n'\")\r\n #return prompt_user()\r", "def wait_for_input(self):\n pass", "def interactive_run(self):\r\n while True:\r\n try:\r\n #self.display_mang.print_instructions()\r\n input_string = input()\r\n if input_string == \"exit\":\r\n break\r\n self.input_mang.process_input(input_string)\r\n except Exception as e:\r\n print(e)", "def prompt(msg):\n # remove non-blocking mode\n fd = sys.stdin.fileno()\n flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)\n flags = flags & ~os.O_NONBLOCK\n fcntl.fcntl(fd, fcntl.F_SETFL, flags)\n return raw_input(msg)", "def noinput():\n env.prompt = False", "def handle_input():\n\n # wait for user input and get timeout or character to process\n char = read_input()\n\n # handle user input\n if not is_input_valid(char):\n # No valid input, keep waiting for input\n return True\n\n # if terminal size is not valid, stop here\n if not nuqql.config.WinConfig.is_terminal_valid():\n show_terminal_warning()\n return True\n\n # if terminal resized, resize and redraw active windows\n if char == curses.KEY_RESIZE:\n nuqql.conversation.resize_main_window()\n return True\n\n # pass user input to active conversation\n for conv in nuqql.conversation.CONVERSATIONS:\n if conv.is_active():\n conv.process_input(char)\n return True\n\n # if no conversation is active pass input to active list window\n if nuqql.win.MAIN_WINS[\"list\"].state.active:\n # list window navigation\n nuqql.win.MAIN_WINS[\"input\"].redraw()\n nuqql.win.MAIN_WINS[\"log\"].redraw()\n nuqql.win.MAIN_WINS[\"list\"].process_input(char)\n return True\n\n # list window is also inactive -> user quit\n return False", "def handle_input(sock):\n\tprint(\"Type message, enter to send. 'q' to quit\")\n\twhile True:\n\t\tmsg = input() #Blocks\n\t\tif msg == 'q':\n\t\t\tprint('Shut Down Client')\n\t\t\tsock.shutdown(socket.SHUT_RDWR)\n\t\t\tsock.close()\n\t\t\tbreak\n\t\ttry:\n\t\t\ttincanchat.send_msg(sock,msg) #Blocks until sent\n\t\texcept(BrokenPipeError,ConnectionError):\n\t\t\tbreak", "def input_(text=''):\n while True:\n try:\n thing = input(text)\n if thing == '':\n raise ValueError\n else:\n return thing\n except (EOFError, KeyboardInterrupt, ValueError):\n print()", "def wait_for_enter(field_data=\"\"):\n try:\n input(f\"{field_data}\\n\" f\"Press the 'ENTER' key to continue\")\n except KeyboardInterrupt:\n pass", "def safe_input(prompt=\"\"):\n\n\ttry:\n\t\tresult = input(prompt)\n\t\treturn result\n\texcept KeyboardInterrupt:\n\t\tsys.exit()\n\texcept:\n\t\treturn \"\"", "def waitenterpressed(message = \"Press ENTER to continue...\"):\n\ttry:\n\t\tinput = raw_input\n\texcept: \n\t\tpass\n\traw_input(message)\n\treturn 0", "def s_input(prompt : str = \">\", accepted_inputs : list = [\"break\"], case_sensitive : bool = False, fail_message : str = \"\") -> str:\r\n\r\n user_input = \"\"\r\n first = True #For checking if the fail message should print or not\r\n while user_input not in accepted_inputs:\r\n if fail_message != \"\" and not first:\r\n print(fail_message) #Prints the assigned fail message if it isn't the first iteration\r\n user_input = input(prompt) #Gets user input\r\n if not case_sensitive:\r\n user_input = user_input.lower() #Sets the input to lower if needed\r\n first = False #Ensures that it is not the first iteration anymore\r\n return user_input", "def keep_going(text=\"Do you wish to continue? Answer Y or N.\"):\n answer = input(text)\n\n if answer == 'Y':\n print(\"The script is now running....\")\n else:\n print(\"You have chosen to quit this program\")\n raise SystemExit", "def prompt_int(prompt):\n while True:\n try:\n return int(input(prompt))\n except ValueError as e:\n print('Provide an integer')", "def handle_input(self, key):\n if key == 'Q' or key == 'q':\n if(self.proc is not None):\n self.proc.send_signal(signal.SIGINT)\n\n raise urwid.ExitMainLoop()\n if key == 'R' or key == 'r':\n self.model.running = True\n self.run()\n if key == 'P' or key == 'p':\n self.togglePause()", "def Wait(p_question: str):\n input(p_question)\n return", "def ask_for_query():\n print('Enter query, empty to quit:')\n try:\n query = input('? ')\n except EOFError:\n # User has cancelled\n return False\n\n return query", "def __exit_condition(data_logger):\n try:\n while True:\n raw_input(\"\") # FIXME: is raw_input the right approach\n if CLOSE:\n raise KeyboardInterrupt()\n\n except (KeyboardInterrupt, EOFError):\n sys.stdin.close()\n data_logger.stop()", "def request_input(self, possibles=[]):\n answer = self.console.input('Type your request here:')\n if len(possibles) > 0 and self.numeric:\n invalid = True\n while invalid:\n try:\n answer = int(answer)\n invalid = False\n break\n except:\n answer = self.console.input('Type your request here (numbers only):')\n\n answer = possibles[answer - 1]\n else:\n if answer.find('quit') != -1:\n self.running = False\n else:\n if answer.find('quit') != -1:\n self.running = False\n return answer", "def stop(self):\n command = input(\"Enter anything to finish (or 'exit' to cancel)>>>\")\n return command != 'exit'", "def input(prompt=\"Input\"):\n \n __PyInputHelper__.userInput = None\n \n __PyInputHelper__.showAlertWithPrompt(prompt)\n \n while (__PyInputHelper__.userInput == None):\n \n if (threading.currentThread() in ignoredThreads):\n return \"\"\n \n continue\n \n userInput = __PyInputHelper__.userInput\n __PyInputHelper__.userInput = None\n return str(userInput)", "def enter_repl(self):\n text_input = ''\n while True:\n text_input = input('>>')\n if text_input == 'exit':\n break\n #An alias for querying an instrument error string\n elif text_input == 'err?':\n self.write_to_serial(':SYST:ERR?')\n print(self.read_from_serial())\n else:\n self.write_to_serial(text_input)\n print(self.read_from_serial())", "def haltExec(self, input_pin=0):\n if not 0 <= input_pin < 2:\n raise(ValueError('`input_pin` [{0}] must be between 0 and 2'\n ''.format(input_sig)))\n cmd_string = 'H{0}'.format(input_sig)\n return self.sendRcv(cmd_string)", "def stdin_thread(self):\n while True:\n if not self.is_running():\n time.sleep(0.1)\n continue\n msg = self._stdin_queue.get()\n if msg is None:\n break # Ask to stop\n self._say(msg)", "def quit():\n while True:\n try:\n choice = input('press q to quit \\n r to restart')\n choice = choice.lower() # sanitize inputs before comparision\n\n except TypeError:\n print('Please enter q to quit or r to restart')\n if choice not in ('q', 'r'):\n continue\n else:\n break\n if choice == 'q':\n return True\n elif choice == 'r':\n return False", "def input(self):\n try:\n temp = ord(raw_input())\n self.tape.replace(temp)\n except:\n print \"Error -002\"\n raise", "def get_input():\n return getch()", "def receive_interrupt_request(self, _: EmptyMsg):\n self.renderer.interrupt()", "def signal_handler(signal, frame): \n import signal\n import sys\n from time import localtime, strftime\n time = strftime(\"%H:%M:%S\", localtime())\n sel = raw_input('\\n\\n%s: Paused. Press return to resume, or type exit to quit: \\n' % time)\n if sel.startswith('e') or sel.startswith('E'):\n sys.exit(0)\n else:\n time = strftime(\"%H:%M:%S\", localtime())\n print '%s: Interrogation resumed.\\n' % time", "def inask(question: str) -> str:\n answer = input(question)\n return answer", "def input(self, prompt):\r\n return console_input(prompt)", "def on_KeyboardInterrupt(player):\n print(\"paused by KeyboardInterrupt\")\n player.edit()", "def looping(self):\n\n pretty_print(\"To Exit enter: 101\", \":\")\n pretty_print(\"To continue press any number key:\", \":\")\n decision = get_int_input()\n\n if decision == 101:\n self.again = False", "def ask_user_input(prompt: str) -> str:\n return input(prompt)", "def switch_input(cls):\n try:\n assert globals()[\"input\"]\n cls.restore_input()\n except KeyError:\n cls.override_input()", "def getInput():\t\n\tglobal active_\n\n\t#to disable the service \n\tactive_ = False \n\t\n\t# reading the previous input\n\tprev_input_ = rospy.get_param('/input')\n\tinput_ = prev_input_\n\t\n\t#in order to make the user to choose one of the 5 possible inputs\n\twhile (prev_input_ == input_) or (input_ > 5 or input_ < 1):\n\t\tif input_ > 5 or input_ < 1: \n\t\t\t#in the case in which the user make another selection\n\t\t\tprint \"Unknown input, please try again\" \n\t\t\n\t\t#propose to the user which are the real possibilities\n\t\tprint(\"Please select one of the following senteces\\n\")\n\t\tprint(\"1 - Move the robot randomly in the environment, by choosing one of six possible target positions\\n\")\n\t\tprint(\"2 - The user can chose the next target position\\n\")\n\t\tprint(\"3 - Start following the external walls\\n\")\n\t\tprint(\"4 - Stop the robot in the last position\\n\")\n\t\tprint(\"5 - Change the planning algorithm from move_base to bug0 and vice versa\\n\")\n\n\t\t#read the input typed by the user\t\n\t\tinput_ = (int(raw_input(\"Please select a number between 1 and 5: \")))\n\n\t#set the choice made by the user\n\tif input_ >= 1 and input_ <= 5:\n\t\trospy.set_param('/input', input_)", "def HandleInput(self, input):\r\n if input.IsKeyDown(K_UP, once=True):\r\n self.selected_option -= 1\r\n if self.selected_option < 0:\r\n self.selected_option = len(self.options) - 1\r\n if input.IsKeyDown(K_DOWN, once=True):\r\n self.selected_option += 1\r\n if self.selected_option >= len(self.options):\r\n self.selected_option = 0\r\n \r\n # Selection\r\n if input.IsKeyDown(K_RETURN, once=True):\r\n self.SelectOption()\r\n \r\n # If they hit ESC, leave bartering\r\n if input.IsKeyDown(K_ESCAPE, once=True):\r\n Log('Quick removed barter from dialogue.')\r\n self.dialogue.SelectLastConversation()", "def input_int(question):\n while True:\n try:\n value = int(input(question))\n except (SyntaxError, NameError) as exception:\n print(\"Invalid entry. Try again.\")\n continue\n\n if value <= 0:\n print(\"Invalid entry. Try again.\")\n continue\n else:\n break\n\n return value", "def get_input(prompt):\n return input(prompt)", "def get_input(prompt):\n return input(prompt)", "def prompt_user():\n print()\n while True:\n print('Please choose one of the following options:')\n print(\"1: Send a Thank You\")\n print(\"2: Create a report\")\n print(\"3: Send letters to everyone\")\n print(\"4: Match donations\")\n print(\"5: Quit\")\n try:\n return int(input(\"Option: \"))\n except ValueError as e:\n print(\"***INVALID Option Selected***\")", "def getInput(self):\n\t\tkeyPressed = self.screen.getch()\n\t\tif keyPressed == 113:\t\t# <escape>\n\t\t\tself.terminate()\n\t\t\tself.running = False\n\t\treturn keyPressed \t\t# return key for (possible) further action in calling program", "def _prompt(letters='yn', default=None):\n while True:\n try:\n input_text = sys.stdin.readline().strip()\n except KeyboardInterrupt:\n sys.exit(0)\n if input_text and input_text in letters:\n return input_text\n if default is not None and input_text == '':\n return default\n print('Come again?')", "def signal_handler(signal, _): \n import signal\n import sys\n from time import localtime, strftime\n signal.signal(signal.SIGINT, original_sigint)\n thetime = strftime(\"%H:%M:%S\", localtime())\n INPUTFUNC('\\n\\n%s: Paused. Press any key to resume, or ctrl+c to quit.\\n' % thetime)\n time = strftime(\"%H:%M:%S\", localtime())\n print('%s: Interrogation resumed.\\n' % time)\n signal.signal(signal.SIGINT, signal_handler)", "def instruction_in(self, register):\n if len(self.input_buffer) == 0:\n user_input = raw_input() + '\\n'\n self.input_buffer = deque(user_input)\n\n char = self.input_buffer.popleft()\n value = ord(char)\n\n self.set_register(register, value)", "def interact(self):\n print('Ready to interact on socket connected with {}.'.format(self.remote_addr))\n try:\n # get initial input from user\n print('Enter input or press CTRL-D for no input.')\n data = sys.stdin.readline()\n self.remote_socket.sendall(data.encode())\n while True:\n if data.startswith('exit'):\n print('[*] Closing remote shell.')\n self.close()\n break\n # wait for response from target host\n recv_len = 1\n response = ''\n while recv_len:\n data = self.remote_socket.recv(4096)\n recv_len = len(data)\n response += data.decode()\n if recv_len < 4096:\n break\n print(response)\n # get further input from user\n print('Enter further input or press CTRL-D for no input.')\n data = sys.stdin.readline()\n self.remote_socket.sendall(data.encode())\n except Exception as e:\n print(e)\n print('[*] Closing remote shell.')\n self.close()", "def qpressed(): #QUITTNG FUNCTION\n #print(\"q pressed\")\n sys.exit()", "def interrupt(self):\r\n self.interrupting = True", "def input(self, message=''):\r\n from javax.swing import JOptionPane\r\n return JOptionPane.showInputDialog(frame, message)", "def ask(question=WARNING_DIFF):\n\t\t\tfd = sys.stdin.fileno()\n\n\t\t\toldterm = termios.tcgetattr(fd)\n\t\t\tnewattr = termios.tcgetattr(fd)\n\t\t\tnewattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO\n\t\t\ttermios.tcsetattr(fd, termios.TCSANOW, newattr)\n\n\t\t\toldflags = fcntl.fcntl(fd, fcntl.F_GETFL)\n\t\t\tfcntl.fcntl(fd, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)\n\n\t\t\tself.stdout.write(question)\n\n\t\t\ttry:\n\t\t\t\twhile True:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tfirstCharacter = sys.stdin.read(1)\n\t\t\t\t\t\treturn forceUnicode(firstCharacter) in (u\"y\", u\"Y\")\n\t\t\t\t\texcept IOError:\n\t\t\t\t\t\tpass\n\t\t\tfinally:\n\t\t\t\ttermios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)\n\t\t\t\tfcntl.fcntl(fd, fcntl.F_SETFL, oldflags)", "def loop(self):\n\t\twhile (self.quit == 0):\n\t\t\ttry:\n\t\t\t\tuserInput = raw_input(\"> \")\n\t\t\t\tself.processCommand(userInput)\n\t\t\texcept EOFError:\n\t\t\t\tsys.exit()\n\t\t\texcept KeyboardInterrupt:\n\t\t\t\tsys.exit()", "def ask_for_interface():\n return input(\"Interface name : \")", "def _interrupt(self, signum: int, frame: Optional[Any]) -> None:\n if self._in_task(frame):\n raise KeyboardInterrupt\n else:\n self._interrupted = True\n self._ready_tasks.interrupt()", "def _handle_interrupts(signal_number, current_stack_frame):\n print(\" Interrupted!\\n\", file=sys.stderr)\n _display_help()\n sys.exit(1) # no match", "def enter():\n input(\"\\nClick Enter to continue \")", "def confirm_prompt(prompt):\n while True:\n print(prompt, end=' [Y/n]: ')\n\n if not os.isatty(sys.stdout.fileno()):\n print(\"Not running interactively. Assuming 'N'.\")\n return False\n pass\n\n r = input().strip().lower()\n if r in ['y', 'yes', '']:\n return True\n elif r in ['n', 'no']:\n return False", "def prompt(self):\n # TODO: fix null input\n print('Enter user input: ')\n userinput = input()\n print(f'User chose: {userinput}')\n return userinput", "def check_or_exit(msg):\n while True:\n user_input = raw_input(\"%s (y/n): \" % msg).lower()\n if user_input in ['y', 'yes']:\n print\n return\n if user_input in ['n', 'no']:\n print\n print_warning(\"Please complete the required steps and then \"\n \"re-run the script.\")\n sys.exit(1)", "def safe_input(response):\n try:\n return input(response)\n except EOFError:\n return None\n except KeyboardInterrupt:\n return None", "def pause():\n global pause_continue\n if pause_continue:\n return\n line = input(\n \"Paused. 'q' to quit, 'c' to continue without pausing, anything else to step.\"\n )\n if line:\n if line[0] == \"q\":\n exit(0)\n if line[0] == \"c\":\n pause_continue = True", "def end_input(self):\n inp = input()\n if inp.upper() == \"Q\":\n return False\n if inp == \"\" \\\n \"\":\n return True\n return self.end_input", "def prompt_user(prompt: str) -> bool:\n positive_response = {'yes', 'y', 'ye', '', 'ok'}\n negative_response = {'no', 'n'}\n\n while True:\n answer = input(prompt).lower()\n if answer in positive_response:\n return True\n elif answer in negative_response:\n return False\n else:\n print(\"Please respond with 'yes' or 'no'\\n\", file=sys.stderr)", "def control(message, the_function):\n while True:\n user_choice = input(message)\n if user_choice in ('Y', 'y'):\n the_function()\n elif user_choice in ('N', 'n'):\n print(\"exiting program.....\")\n print(\"Have a nice day!\")\n break\n else:\n print(\"Not a valid option, try again\")", "def _handle_interrupts(signal_number, current_stack_frame):\n print(\" Interrupted!\\n\", file=sys.stderr)\n _display_help()\n sys.exit(0)", "def alert_on_error(error_msg: str, prompt_user: bool):\n print(error_msg)\n g.alerts.send(error_msg)\n if prompt_user:\n while True:\n user_response = input(\"Continue execution? [Y/N]\\n\").upper()\n if user_response == \"Y\":\n break\n elif user_response == \"N\":\n raise KeyboardInterrupt\n else:\n print(\"Please type in 'Y' or 'N' as a response.\")", "def get_user_input(self):\n while not self.suspended:\n input = raw_input()\n input = input.split('|')\n if input[0] in ['exit', 'quit', 'kill']:\n self.broadcast('kill')\n self.suspended = True\n for client in self.clients.values():\n client.socket.close()\n self.s.close() # Have to connect to socket to exit server.\n sock = socket(AF_INET, SOCK_STREAM)\n port = bind_to_random(sock)\n sock.connect((str(self.ip), self.port))\n elif len(input) > 1:\n msg = '|'.join(['#server']+input[1:])\n if input[0][:1] == '@':\n destination = input[0][1:].lower()\n if destination == 'server':\n print msg\n elif destination == 'all':\n self.broadcast(msg)\n else:\n client = self.clients.get(destination, None)\n if client:\n client_send(client.socket, msg)\n else:\n print 'Destination not active'\n else:\n print msg", "def waitenter(times=1):\n\n # For each time\n for _ in range(times):\n # Ask for user input\n input(\"\")", "def _confirm(message):\n result = ''\n while result not in ('y', 'n'):\n try:\n result = raw_input('%s Continue (y/n)? ' % message)\n except EOFError:\n result = 'n'\n return result == 'y'", "def deny():\n raise InterruptEvent", "def inp(text):\r\n input(text)", "def exit_prompt(message=''):\r\n if message != '': print(str(message))\r\n input('\\nPress [Enter] to exit...')\r\n sys.exit()", "def main():\n dt = DropToken()\n play = True\n while play:\n try:\n line = sys.stdin.readline()\n except KeyboardInterrupt:\n break\n if not line:\n break\n play = dt.inputProcess(line)", "def takeInAndConfirmUserInput():\n validInput = False\n userInput = \"\"\n while validInput != True:\n userInput = input(\"~ \")\n\n print(f\"you have written {userInput}, is this correct? y/[n]\")\n\n confirmation = input(\"~ \")\n\n if confirmation.lower() == \"y\":\n validInput = True\n\n return userInput", "def textinput(self, title, prompt):\n return simpledialog.askstring(title, prompt)", "def prompt(prompt, validator=(lambda x: True), hint=None):\n user_input = input(prompt)\n while not validator(user_input):\n user_input = input(prompt)\n return user_input", "def user_input():\n ans = input('Continue? : y/n ')\n if ans == 'n':\n return False\n else:\n return True", "def press_enter():\n raw_input(\"\\n\\nPress Enter\")", "def interrupt(self):\n raise NotImplementedError", "def get_input(label):\n result = input(label)\n sounds.play_input_beep()\n return result", "def raw_input(self, prompt=''):\r\n \r\n newQueue = Queue()\r\n \r\n self.alert.append(newQueue)\r\n \r\n def requestItem(prompt=''):\r\n out = newQueue.get()\r\n return out\r\n \r\n return requestItem", "def input_helper(prompt):\n if version_info[0] == 2:\n # python2 input is scary - we want raw_input\n return raw_input(prompt)\n else:\n return input(prompt)", "def rawInputWithCheck(prompt):\n proceed = False\n i = None\n while not(proceed):\n i = raw_input(prompt)\n print \"Is this correct?\"\n print ' '*3, repr(i)\n proceed = YNInput(' '*2)\n return i", "def _prompt(letters='yn', default=None):\n\n import sys\n while True:\n try:\n inputstr = sys.stdin.readline().strip()\n except KeyboardInterrupt:\n sys.exit(0)\n if inputstr and inputstr in letters:\n return inputstr\n if default is not None and inputstr == '':\n return default\n print 'Come again?'", "def get_input():\n letters = input('Enter letters, Enter to quit:\\n')\n return letters", "def keyboard_interrupt_handler(interrupt_signal, frame):\n print(\"Scanning finished\")\n print(\"KeyboardInterrupt ID: {} {} has been caught.\".format(interrupt_signal, frame))\n exit(1)" ]
[ "0.7103245", "0.6709058", "0.6595114", "0.6572123", "0.65524346", "0.6543129", "0.65140307", "0.6374105", "0.63740534", "0.631756", "0.6300449", "0.628559", "0.6279168", "0.6268421", "0.62003326", "0.61359113", "0.6018758", "0.6006392", "0.5985524", "0.5977991", "0.5903809", "0.5901102", "0.58932143", "0.58821493", "0.5881889", "0.58528554", "0.5851489", "0.5829887", "0.58218426", "0.580879", "0.5799902", "0.5769742", "0.57581633", "0.57563305", "0.5753157", "0.5742028", "0.5738671", "0.5709427", "0.5626534", "0.5598273", "0.55768156", "0.55691", "0.5550456", "0.5545746", "0.5534817", "0.55345047", "0.55338264", "0.5528914", "0.5516103", "0.55097866", "0.5506788", "0.5506088", "0.5502953", "0.54993415", "0.54712605", "0.54712605", "0.54697394", "0.5463613", "0.544483", "0.54409194", "0.5439392", "0.5437502", "0.5433869", "0.54304296", "0.54197", "0.5418252", "0.5416434", "0.541478", "0.5401092", "0.5387482", "0.53832775", "0.5381311", "0.5373347", "0.5348624", "0.53456473", "0.5344623", "0.53398114", "0.53249764", "0.53244877", "0.53225887", "0.5320025", "0.531944", "0.5319373", "0.5300253", "0.528626", "0.5285827", "0.528339", "0.52782756", "0.5273595", "0.52722824", "0.52722716", "0.52668864", "0.5265775", "0.5263341", "0.5262927", "0.52520883", "0.52468044", "0.5242886", "0.52378786", "0.5235992", "0.5229826" ]
0.0
-1
Calculates the next state of a given 'board' following the classic rules of Conway's Game Of Life
def original(arr): height = np.shape(arr)[0] width = np.shape(arr)[1] result = np.array(arr) for row in range(height): for col in range(width): neighbors = 0 val = result[row][col] for i in range(-1, 2): for j in range(-1, 2): if i == 0 and j == 0: # The cell itself cannot be counted as a neighbor continue if row + i < 0 or col + j < 0 or row + i > height or col + j > width: # Out of bounds continue with suppress(IndexError): if arr[row + i][col + j] == 1: neighbors += 1 if neighbors == 3 and val == 0: # Cell becomes alive result[row][col] = 1 elif neighbors > 3 and val == 1 or neighbors < 2 and val == 1: # Cell dies result[row][col] = 0 return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_next_board_state(self):\n new_board_state = np.zeros_like(self.board_state)\n\n for x in range(self.board_size[0]):\n for y in range(self.board_size[0]):\n new_board_state[x][y] = self.next_state_of_cell(x,y)\n \n self.set_state(new_board_state)", "def gameOfLife(self, board: 'List[List[int]]') -> None:\n m, n = len(board), len(board[0])\n\n def calc(i, j):\n neighbors = [\n [i-1, j-1],[i-1,j],[i-1,j+1],\n [i, j-1],[i,j+1],\n [i+1, j-1],[i+1, j],[i+1,j+1]\n ]\n sum = 0\n for r,c in neighbors:\n if 0 <= r < m and 0 <= c < n:\n sum += (board[r][c] & 1)\n return sum\n\n for i in range(m):\n for j in range(n):\n status = calc(i, j)\n if board[i][j] == 1 and (status == 2 or status == 3):\n board[i][j] = 3\n else:\n if status == 3:\n board[i][j] = 2\n for i in range(m):\n for j in range(n):\n board[i][j] >>= 1", "def advance_board(self):\n # We can advance the board using a pretty simple convolution,\n # so we don't have to execute a lot of loops in python.\n # Of course, this probably won't be sufficient for extremely\n # large boards.\n self.num_steps += 1\n board = self.board\n cfilter = np.array([[1,1,1],[1,0,1],[1,1,1]], dtype=np.uint16)\n\n alive = board & CellTypes.alive > 0\n spawning = board & CellTypes.spawning > 0\n frozen = board & CellTypes.frozen > 0\n\n can_die = ~frozen & (\n convolve2d(board & CellTypes.preserving, cfilter) == 0)\n can_grow = ~frozen & (\n convolve2d(board & CellTypes.inhibiting, cfilter) == 0)\n\n num_neighbors = convolve2d(alive, cfilter)\n num_spawn = convolve2d(spawning, cfilter)\n spawn_prob = 1 - (1 - self.spawn_prob)**num_spawn\n has_spawned = coinflip(spawn_prob, board.shape)\n\n born_rule = np.zeros(9, dtype=bool)\n born_rule[list(self.born_rule)] = True\n dead_rule = np.ones(9, dtype=bool)\n dead_rule[list(self.survive_rule)] = False\n\n new_alive = (born_rule[num_neighbors] | has_spawned) & ~alive & can_grow\n new_dead = dead_rule[num_neighbors] & alive & can_die\n\n new_flags = np.zeros_like(board)\n color_weights = 1 * alive + 2 * spawning\n for color in CellTypes.colors:\n # For each of the colors, see if there are two or more neighbors\n # that have it. If so, any new cells (whether born or spawned)\n # will also get that color.\n has_color = board & color > 0\n new_color = convolve2d(has_color * color_weights, cfilter) >= 2\n new_flags += color * new_color\n indestructible = alive & (board & CellTypes.destructible == 0)\n new_flags += CellTypes.destructible * (convolve2d(indestructible, cfilter) < 2)\n\n board *= ~(new_alive | new_dead)\n board += new_alive * (CellTypes.alive + new_flags)", "def next_state_of_cell(self, x_cell, y_cell):\n neighbours = self.get_number_neighbours_of_cell(x_cell, y_cell)\n if(self.board_state[x_cell][y_cell] == 1):\n # Any live cell with more than three live neighbours dies, \n # as if by overpopulation.\n if(neighbours > 3):\n return 0\n # Any live cell with fewer than two live neighbours dies,\n # as if by underpopulation.\n elif(neighbours < 2):\n return 0\n # Any live cell with two or three live neighbours lives\n # on to the next generation.\n else:\n return 1\n if(self.board_state[x_cell][y_cell] == 0):\n # Any dead cell with exactly three live neighbours becomes a live cell, \n # as if by reproduction.\n if(neighbours == 3):\n return 1\n else:\n return 0", "def evaluateBoardState(self, board):\n\n \"\"\"\n These are the variables and functions for board objects which may be helpful when creating your Agent.\n Look into board.py for more information/descriptions of each, or to look for any other definitions which may help you.\n\n Board Variables:\n board.width \n board.height\n board.last_move\n board.num_to_connect\n board.winning_zones\n board.score_array \n board.current_player_score\n\n Board Functions:\n get_cell_value(row, col)\n try_move(col)\n valid_move(row, col)\n valid_moves()\n terminal(self)\n legal_moves()\n next_state(turn)\n winner()\n \"\"\"\n if self.id == 1:\n opponent_id = 2\n else:\n opponent_id = 1\n\n maxvalue = 100000\n minvalue = -maxvalue\n winner = board.winner()\n if winner == self.id:\n return maxvalue\n elif winner == opponent_id:\n return minvalue\n size_y = board.height\n size_x = board.width\n map_ = []\n num_to_connect = board.num_to_connect\n total_points = 0\n\n multiply_reachable = 1\n multiply_oddeven = 1\n # basically this function is calculating all the possible win positions\n # more pieces in a possible win position will be counted with more weights\n # a win position with X pieces in it will be counted as X^2 points\n # initialise the zones maps\n for i in range(size_y):\n map_.append([])\n for j in range(size_x):\n map_[i].append([])\n\n # Fill in the horizontal win positions\n for i in range(size_y):\n for j in range(size_x - num_to_connect + 1):\n points = 0\n self_pieces_count = 0\n opponent_pieces_count = 0\n for k in range(num_to_connect):\n if board.board[i][j + k] == opponent_id:\n opponent_pieces_count += 1\n elif board.board[i][j + k] == self.id:\n points += len(board.winning_zones[j+k][i])\n if (self.id == 1 and i % 2 == 1) or (self.id == 2 and i%2 == 0):\n points *= multiply_oddeven\n self_pieces_count += 1\n if self_pieces_count == 3 and opponent_pieces_count == 0:\n if j - 1 >= 0 and board.board[i][j + 3] == 0 and board.board[i][j - 1] == 0 \\\n and board.try_move(j + 3) == i and board.try_move(j - 1) == i:\n return maxvalue\n elif j + 4 < size_y and board.board[i][j + 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j + 4) == i and board.try_move(j) == i:\n return maxvalue\n else:\n for k in range(num_to_connect):\n if board.board[i][j + k] == 0 and board.try_move(j + k) == i:\n points *= multiply_reachable\n elif opponent_pieces_count == 3 and self_pieces_count == 0:\n if j - 1 >= 0 and board.board[i][j + 3] == 0 and board.board[i][j - 1] == 0 \\\n and board.try_move(j + 3) == i and board.try_move(j - 1) == i:\n return minvalue\n elif j + 4 < size_y and board.board[i][j + 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j + 4) == i and board.try_move(j) == i:\n return minvalue\n # else:\n # for k in range(num_to_connect):\n # if board.board[i][j + k] == 0 and board.try_move(j + k) == i:\n # points *= -multiply_reachable\n if (opponent_pieces_count == 3 and self_pieces_count == 0) or opponent_pieces_count == 0:\n total_points += points\n\n # Fill in the vertical win positions\n for i in range(size_x):\n for j in range(size_y - num_to_connect + 1):\n points = 0\n self_pieces_count = 0\n for k in range(num_to_connect):\n if board.board[j + k][i] == opponent_id:\n opponent_pieces_count += 1\n elif board.board[j + k][i] == self.id:\n points += len(board.winning_zones[i][j+k])\n if (self.id == 1 and (j+k) % 2 == 1) or (self.id == 2 and (j+k)%2 == 0):\n points *= multiply_oddeven\n self_pieces_count += 1\n points *= multiply_reachable\n # if opponent_pieces_count == 3 and self_pieces_count == 0:\n # points *= -1\n if (opponent_pieces_count == 3 and self_pieces_count == 0) or opponent_pieces_count == 0:\n total_points += points\n\n # Fill in the forward diagonal win positions\n for i in range(size_y - num_to_connect + 1):\n for j in range(size_x - num_to_connect + 1):\n points = 0\n self_pieces_count = 0\n opponent_pieces_count = 0\n for k in range(num_to_connect):\n if board.board[i + k][j + k] == opponent_id:\n opponent_pieces_count += 1\n elif board.board[i + k][j + k] == self.id:\n points += len(board.winning_zones[j+k][i+k])\n if (self.id == 1 and (i+k) % 2 == 1) or (self.id == 2 and (i+k)%2 == 0):\n points *= multiply_oddeven\n self_pieces_count += 1\n if self_pieces_count == 3 and opponent_pieces_count == 0:\n if i - 1 >= 0 and j - 1 >= 0 and board.board[i + 3][j + 3] == 0 and board.board[i - 1][j - 1] == 0 \\\n and board.try_move(j + 3) == i + 3 and board.try_move(j - 1) == i - 1:\n return maxvalue\n elif i + 4 < size_y and j + 4 < size_x and board.board[i + 4][j + 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j + 4) == i + 4 and board.try_move(j) == i:\n return maxvalue\n else:\n for k in range(num_to_connect):\n if board.board[i + k][j + k] == 0 and board.try_move(j + k) == i + k:\n points *= multiply_reachable\n elif opponent_pieces_count == 3 and self_pieces_count == 0:\n if i - 1 >= 0 and j - 1 >= 0 and board.board[i + 3][j + 3] == 0 and board.board[i - 1][j - 1] == 0 \\\n and board.try_move(j + 3) == i + 3 and board.try_move(j - 1) == i - 1:\n return minvalue\n elif i + 4 < size_y and j + 4 < size_x and board.board[i + 4][j + 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j + 4) == i + 4 and board.try_move(j) == i:\n return minvalue\n # else:\n # for k in range(num_to_connect):\n # if board.board[i + k][j + k] == 0 and board.try_move(j + k) == i + k:\n # points *= -multiply_reachable\n if (opponent_pieces_count == 3 and self_pieces_count == 0) or opponent_pieces_count == 0:\n total_points += points\n\n # Fill in the backward diagonal win positions\n for i in range(size_y - num_to_connect + 1):\n for j in range(size_x - 1, num_to_connect - 1 - 1, -1):\n points = 0\n self_pieces_count = 0\n opponent_pieces_count = 0\n for k in range(num_to_connect):\n if board.board[i + k][j - k] == opponent_id:\n opponent_pieces_count += 1\n elif board.board[i + k][j - k] == self.id:\n points += len(board.winning_zones[j-k][i+k])\n if (self.id == 1 and (i+k) % 2 == 1) or (self.id == 2 and (i+k)%2 == 0):\n points *= multiply_oddeven\n self_pieces_count += 1\n if self_pieces_count == 3 and self_pieces_count == 0:\n if board.board[i + 3][j - 3] == 0 and board.board[i - 1][j + 1] == 0 \\\n and board.try_move(j - 3) == i + 3 and board.try_move(j + 1) == i - 1:\n return maxvalue\n elif i + 4 < size_y and j - 4 >= 0 and board.board[i + 4][j - 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j - 4) == i + 4 and board.try_move(j) == i:\n return maxvalue\n else:\n for k in range(num_to_connect):\n if board.board[i + k][j - k] == 0 and board.try_move(j - k) == i + k:\n points *= multiply_reachable\n\n elif opponent_pieces_count == 3 and self_pieces_count == 0:\n if board.board[i + 3][j - 3] == 0 and board.board[i - 1][j + 1] == 0 \\\n and board.try_move(j - 3) == i + 3 and board.try_move(j + 1) == i - 1:\n return minvalue\n elif i + 4 < size_y and j - 4 >= 0 and board.board[i + 4][j - 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j - 4) == i + 4 and board.try_move(j) == i:\n return minvalue\n # else:\n # for k in range(num_to_connect):\n # if board.board[i + k][j - k] == 0 and board.try_move(j - k) == i + k:\n # points *= -multiply_reachable\n if (opponent_pieces_count == 3 and self_pieces_count == 0) or opponent_pieces_count == 0:\n total_points += points\n return total_points", "def gameOfLife(self, board):\n \n # Neighbours array for 8 neighboring cells of a given cell\n neighbors = [(1,0), (1,-1), (0,-1), (-1,-1), (-1,0), (-1,1), (0,1), (1,1)]\n \n rows = len(board)\n cols = len(board[0])\n \n # Iterate through the board by each cell\n for row in range(rows):\n for col in range(cols):\n \n # For each cell counting number of live neighbors\n live_neighbors = 0\n for neighbor in neighbors:\n \n # row and column of neighboring cell\n r = (row + neighbor[0])\n c = (col + neighbor[1])\n \n # Checking validity of neighboring cell and if it was originally a live cell\n if(r < rows and r >= 0) and (c < cols and c >= 0) and abs(board[r][c]) == 1:\n \n live_neighbors += 1\n \n # Rule 1 or Rule 3\n if board[row][col] == 1 and (live_neighbors < 2 or live_neighbors > 3):\n \n board[row][col] = -1 # -1 meaning cell is now dead but was originally live\n \n # Rule 4\n if board[row][col] == 0 and live_neighbors == 3:\n board[row][col] = 2 #2 meaning cell is now live but was originally dead\n # Get final representation for updated board \n for row in range(rows):\n for col in range(cols):\n \n if board[row][col] > 0:\n board[row][col] = 1\n \n else:\n board[row][col] = 0", "def gameOfLife(self, board) -> None:\n rows = len(board)\n cols = len(board[0])\n neighbours = [(-1, 1), (0, 1), (1, 1), (-1, 0), (1, 0), (-1, -1), (0, -1), (1, -1)]\n for row in range(rows):\n for col in range(cols):\n live_neighbour = 0\n for i, j in neighbours:\n new_row = row + i\n new_col = col + j\n if new_row >= 0 and new_row < rows and new_col >= 0 and new_col < cols and \\\n board[new_row][new_col] in [1, -1]:\n live_neighbour += 1\n if (live_neighbour < 2 or live_neighbour > 3) and board[row][col] == 1:\n board[row][col] = -1\n elif live_neighbour == 3 and board[row][col] == 0:\n board[row][col] = 2\n for row in range(rows):\n for col in range(cols):\n if board[row][col] == -1:\n board[row][col] = 0\n elif board[row][col] == 2:\n board[row][col] = 1", "def gameOfLife(self, board):\n n = len(board)\n m = len(board[0])\n DX = [0, 0, 1, -1, 1, 1, -1, -1]\n DY = [1, -1, 0, 0, 1, -1, 1, -1];\n for i in range(n):\n for j in range(m):\n cnt = 0\n for k in range(8):\n x = i + DX[k]\n y = j + DY[k]\n if x < 0 or x >= n or y < 0 or y >= m:\n continue\n cnt += board[x][y] & 1\n if (board[i][j] & 1) > 0:\n if cnt >= 2 and cnt <= 3:\n board[i][j] = 0b11\n elif cnt == 3:\n board[i][j] = 0b10\n for i in range(n):\n for j in range(m):\n board[i][j] >>= 1", "def get_next_state(self, state, x, y):\n my_board = state\n game_over = False\n if is_mine(self.board, x, y):\n my_board[x, y] = MINE\n game_over = True\n else:\n my_board[x, y] = self.count_neighbour_mines(x, y)\n if my_board[x, y] == 0:\n my_board = self.open_neighbour_cells(my_board, x, y)\n self.my_board = my_board\n return my_board, game_over", "def get_next_state(self, state, x, y):\n my_board = state\n game_over = False\n if is_mine(self.board, x, y):\n my_board[x, y] = MINE\n game_over = True\n else:\n my_board[x, y] = self.count_neighbour_mines(x, y)\n if my_board[x, y] == 0:\n my_board = self.open_neighbour_cells(my_board, x, y)\n self.my_board = my_board\n return my_board, game_over", "def gameOfLife(self, board: List[List[int]]) -> None:\n # copy matrix\n copy_matrix = [[board[row][col] for col in range(len(board[0]))] for row in range(len(board))]\n \n # 8 possible directions\n directions = [(0,1), (0, -1), (1,0), (-1,0), (-1,-1), (1,1), (1,-1), (-1,1)]\n num_rows = len(board)\n num_cols = len(board[0])\n \n # matrix traversal\n for i in range(0, num_rows):\n for j in range(0, num_cols):\n # for each cell, explore all of its neighboring cells\n num_live_cells = 0\n for direction in directions:\n r = i + direction[0]\n c = j + direction[1]\n # make sure if it is a live cell \n if (r < num_rows and r >=0) and (c < num_cols and c>=0) and (copy_matrix[r][c]==1):\n # if it is live cell, increment live_cell_count\n num_live_cells +=1\n # if here: We now have estimate of surrounding live cells\n # start applying rules \n # Rule-1: Any live cell with fewer than 2 live neighbors die\n # Rule-2: Any live cell with 2/3 live neighbors live up\n # Rule-3: Any Live cell with > 3 live neighbors die\n # Rule-4: Any dead cell with ==3 live neighbors becomes alive\n if copy_matrix[i][j] == 1 and (num_live_cells > 3 or num_live_cells < 2):\n # Rule-1 and Rule-3: So the current cell dies...\n board[i][j] = 0\n if copy_matrix[i][j] == 0 and num_live_cells == 3:\n # Rule-4: Dead becomes alive\n board[i][j] = 1\n # Rule-2 is taken care by default.", "def next(self):\n self.current_state = self.next_state\n self.next_state = self.clear_screen() # set values to 0\n for x in range(1, 101):\n for y in range(1, 101):\n # calculate the number of alive neighbours at given coordinates\n self.neighbours_alive = self.check_neighbours_alive(x, y)\n\n # assign the result value from rule sets\n self.next_state[x][y] = self.rule_sets[self.selected_rule][ # selected rule name\n str(self.current_state[x][y])][ # 0 or 1 (dead or alive)\n self.neighbours_alive] # number between 0 to 8\n return self.next_state", "def gameOfLife(self, board: List[List[int]]) -> None:\n m = len(board)\n if m==0:\n return board\n n = len(board[0])\n if n==0:\n return board\n def valid(a,b):\n if 0<=a<m and 0<=b<n:\n return True\n mat = [row[:] for row in board] #original copy of the board\n directions = [(0,-1),(-1,-1),(-1,0),(-1,1),(0,1),(1,1),(1,0),(1,-1)]\n for i in range(m):\n for j in range(n):\n #count how many live=1 or dead=0 cells surrounding cell (i,j)\n cnt_live=0\n for direc in directions:\n if valid(i+direc[0],j+direc[1]):\n if mat[i+direc[0]][j+direc[1]]==1:\n cnt_live+=1\n if mat[i][j]==1 and cnt_live<2 or mat[i][j]==1 and cnt_live>3:\n board[i][j]=0\n elif mat[i][j]==1 and 2<=cnt_live<=3 or mat[i][j]==0 and cnt_live==3:\n board[i][j]=1", "def gameOfLife(self, board: List[List[int]]) -> None:\n ds = [(-1, -1), (0, -1), (-1, 0), (1, 0), (0, 1), (1, 1), (1, -1), (-1, 1)]\n for i in range(0, len(board)):\n for j in range(0, len(board[i])):\n lnum = 0\n for k in range(0, len(ds)):\n x, y = ds[k]\n if 0 <= i + x < len(board) and 0 <= j + y < len(board[i]):\n s = board[i + x][j + y] & 1\n if s == 1:\n lnum += 1\n if board[i][j] == 1:\n if lnum < 2:\n board[i][j] |= 2\n elif 2 <= lnum <= 3:\n pass\n else:\n board[i][j] |= 2\n else:\n if lnum == 3:\n board[i][j] |= 2\n for i in range(0, len(board)):\n for j in range(0, len(board[i])):\n if board[i][j] > 1:\n board[i][j] = ~(board[i][j] & 1) & 1", "def gameOfLife(self, board: List[List[int]]) -> None:\n def neighbour(i, j):\n total = 0\n for x,y in ((i+1, j), (i-1, j), (i, j+1), (i, j-1), (i-1, j-1), (i-1, j+1), (i+1, j+1), (i+1, j-1)):\n if x >= 0 and y >= 0 and x <= len(board) -1 and y <= len(board[0]) -1 and board[x][y] & 1:\n total += 1\n return total\n \n def rule(value,i, j):\n if value == 1:\n if neighbour(i, j) == 2 or neighbour(i, j) == 3:\n value |= 2\n elif value == 0:\n if neighbour(i, j) == 3:\n value |= 2\n return value\n \n if not len(board):\n return []\n m = len(board)\n n = len(board[0])\n \n for i in range(m):\n for j in range(n): \n board[i][j] = rule(board[i][j], i, j) \n \n for i in range(m):\n for j in range(n): \n board[i][j] = board[i][j] >> 1 \n \n \n \n return board", "def state_generator(self):\n\n kernel = np.array([\n [1, 1, 1],\n [1, 0, 1],\n [1, 1, 1]])\n iteration = 0\n\n while True: # (Game of Life does not end)\n # Run 2D convolution with the given kernel to find out how many neighbors each cell has.\n # Boundary option determines whether to run with hard boundaries on the game board or\n # using a toroid board which wraps circularly. These are the two strategies for handling\n # a finite game board. scipy.signal.convolve2d handles these two modes gracefully, which\n # is why it is used here. There is also a performance gain when using numpy/scipy matrix\n # operations as opposed to iterating element-wise over the whole matrix.\n # See https://docs.scipy.org/doc/scipy-0.19.1/reference/generated/scipy.signal.convolve2d.html\n\n # There is a more sophisticated and efficient algorithm for determining next game state\n # (see http://dotat.at/prog/life/life.html) but for clarity and a lack of time, the standard\n # implementation was chosen.\n\n num_neighbors_board = convolve2d(self.board, kernel, mode='same', boundary=self.boundary.value)\n\n # Find empty cells that have three neighbors\n birth_coordinates = np.where(np.logical_and(self.board == 0, num_neighbors_board == 3))\n\n # Find live cells with too few or too many neighbors\n death_coordinates = np.where(\n np.logical_and(\n self.board == 1,\n np.logical_or(num_neighbors_board < 2, num_neighbors_board > 3)\n )\n )\n\n births = np.array(birth_coordinates).transpose().tolist()\n deaths = np.array(death_coordinates).transpose().tolist()\n self.board[birth_coordinates] = 1\n self.board[death_coordinates] = 0\n\n iteration += 1\n yield self.board, births, deaths, iteration", "def gameOfLife(self, board: List[List[int]]) -> None:\n neighbors = [(1,0), (1,-1), (0,-1), (-1,-1), (-1,0), (-1,1), (0,1), (1,1)]\n row = len(board)\n col = len(board[0])\n copyboard = copy.deepcopy(board)\n for i in range(row):\n for j in range(col):\n liven = 0\n for neighbor in neighbors:\n r = i + neighbor[0]\n c = j + neighbor[1]\n if (r>=0 and r<row) and (c>=0 and c<col) and (copyboard[r][c] == 1):\n liven += 1\n if copyboard[i][j]==1 and (liven<2 or liven>3):\n board[i][j] = 0\n if copyboard[i][j]==0 and liven == 3:\n board[i][j] =1", "def gameOfLife(self, board: List[List[int]]) -> None:\n m = len(board)\n n = len(board[0])\n def count(x, y):\n top = y - 1\n down = y + 1\n left = x - 1\n right = x + 1\n if top < 0:\n top = 0\n if down >= m:\n down = m - 1\n if left < 0:\n left = 0\n if right >= n:\n right = n - 1\n _count = 0\n for i in range(top, down + 1):\n for j in range(left, right + 1):\n _count += board[i][j]\n _count -= board[y][x]\n return _count\n\n\n result = [[0 for _ in range(n)] for _ in range(m)]\n for i in range(m):\n for j in range(n):\n neighbours = count(j, i)\n if board[i][j] == 0 and neighbours == 3:\n result[i][j] = 1\n if board[i][j] == 1 and (neighbours == 2 or neighbours == 3):\n result[i][j] = 1\n for i in range(m):\n for j in range(n):\n board[i][j] = result[i][j]", "def getNextState(self, board, player, action):\n b = self._base_board.with_np_pieces(np_pieces=np.copy(board))\n b.add_stone(action, player)\n return b.np_pieces, -player", "def gameOfLife(self, board: List[List[int]]) -> None:\n rows = len(board)\n cols = len(board[0])\n if not rows or not cols:\n return board\n neighbors = [(1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0), (-1, 1), (0, 1), (1, 1)]\n\n def no_of_live_neighbors(x, y):\n count = 0\n for dx, dy in neighbors:\n if 0 <= x + dx <= rows - 1 and 0 <= y + dy <= cols - 1:\n if abs(board[x + dx][y + dy]) == 1:\n count += 1\n return count\n\n for i in range(rows):\n for j in range(cols):\n live_neighbours = no_of_live_neighbors(i, j)\n if board[i][j] == 0 and live_neighbours == 3:\n board[i][j] = 2\n if board[i][j] == 1 and (live_neighbours < 2 or live_neighbours > 3):\n board[i][j] = -1\n for i in range(rows):\n for j in range(cols):\n if board[i][j] > 0:\n board[i][j] = 1\n else:\n board[i][j] = 0\n\n return board", "def next_step(self, state, x, y):\n my_board = state\n if not is_new_move(my_board, x, y):\n return my_board, -1, False, {}\n while True:\n state, game_over = self.get_next_state(my_board, x, y)\n if not game_over:\n if is_win(state):\n return state, 1000, True, {}\n else:\n return state, 5, False, {}\n else:\n return state, -100, True, {}", "def gameOfLife(self, board: List[List[int]]) -> None:\n m = len(board)\n if m==0:\n return board\n n = len(board[0])\n if n==0:\n return board\n def valid(a,b):\n if 0<=a<m and 0<=b<n:\n return True\n directions = [(0,-1),(-1,-1),(-1,0),(-1,1),(0,1),(1,1),(1,0),(1,-1)]\n for i in range(m):\n for j in range(n):\n #count how many live=1 or dead=0 cells surrounding cell (i,j)\n cnt_live=0\n for direc in directions:\n if valid(i+direc[0],j+direc[1]):\n if board[i+direc[0]][j+direc[1]]==1 or board[i+direc[0]][j+direc[1]]==-1:\n cnt_live+=1\n if (board[i][j]==1 and cnt_live<2) or \\\n (board[i][j]==1 and cnt_live>3):\n board[i][j]=-1\n elif board[i][j]==0 and cnt_live==3:\n board[i][j]=2\n for i in range(m):\n for j in range(n):\n if board[i][j]==-1:\n board[i][j]=0\n elif board[i][j]==2:\n board[i][j]=1", "def evaluateBoardState(self, board):\n\n \"\"\"\n These are the variables and functions for board objects which may be helpful when creating your Agent.\n Look into board.py for more information/descriptions of each, or to look for any other definitions which may help you.\n\n Board Variables:\n board.width \n board.height\n board.last_move\n board.num_to_connect\n board.winning_zones\n board.score_array \n board.current_player_score\n\n Board Functions:\n get_cell_value(row, col)\n try_move(col)\n valid_move(row, col)\n valid_moves()\n terminal(self)\n legal_moves()\n next_state(turn)\n winner()\n \"\"\"\n\n # print the valid moves on board for current player\n move = board.last_move\n\n # enemy agent's id\n enemy = self.id % 2 + 1\n\n value = self.evaluateRows(board, enemy) + self.evaluateCols(board, enemy) + self.evaluateBackwardDiagonals(board, enemy) + self.evaluateForwardDiagonals(board, enemy)\n return value", "def hill_climbing(problem):\n\n current = State(problem.initial_state)\n print(current.get_value())\n while current.get_value() != 0:\n neighbour = current.generate_neighbour()\n print(neighbour.board)\n print(neighbour.get_value())\n if neighbour.get_value() >= current.get_value():\n return current.board\n current = neighbour", "def life_step(state):\n\t# For every cell each live cell in any of the 8 neighbouring cells contributes 1 to the sum\n\t# Rolling matricies is periodic so this implements periodic boundary conditions\n\tnumberOfNeigbours = sum(np.roll(np.roll(state, i, axis=0), j, axis=1)\n\t\t\t\t\t\t for i in (-1,0,1) for j in (-1,0,1) if (i != 0 or j != 0))\n\n\t# Any live cell with fewer than two live neighbours dies, as if caused by under-population\n\tstate = np.where(numberOfNeigbours < 2, 0, state)\n\t# Any live cell with more than three live neighbours dies, as if by over-population\n\tstate = np.where(numberOfNeigbours > 3, 0, state)\n\t# Any dead cell with exactly three live neighbours becomes a live cell, as if by reproduction.\n\tstate = np.where(numberOfNeigbours == 3, 1, state)\n\n\treturn state", "def next_step(self, state, x, y):\n my_board = state\n if not is_new_move(my_board, x, y):\n return my_board, 0, False, {}\n while True:\n state, game_over = self.get_next_state(my_board, x, y)\n if not game_over:\n if is_win(state):\n return state, 1000, True, {}\n else:\n return state, 0, False, {}\n else:\n return state, -100, True, {}", "def gameOfLife(self, board: List[List[int]]) -> None:\n\n neighbors = [(1,0), (1,-1), (0,-1), (-1,-1), (-1,0), (-1,1), (0,1), (1,1)]\n\n rows = len(board)\n cols = len(board[0])\n\n # 遍历面板每一个格子里的细胞\n for row in range(rows):\n for col in range(cols):\n # 对于每一个细胞统计其八个相邻位置里的活细胞数量\n live_neighbors = 0\n\n for neighbor in neighbors:\n # 相邻位置的坐标\n r = (row + neighbor[0])\n c = (col + neighbor[1])\n # 查看相邻的细胞是否是活细胞\n if (r < rows and r >= 0) and (c < cols and c >= 0) and abs(board[r][c]) == 1:\n live_neighbors += 1\n\n # 过去的活细胞,现在变为死细胞\n if board[row][col] == 1 and (live_neighbors < 2 or live_neighbors > 3):\n # -1 代表这个细胞过去是活的现在死了\n board[row][col] = -1\n # 过去的死细胞,现在变为活细胞\n if board[row][col] == 0 and live_neighbors == 3:\n # 2 代表这个细胞过去是死的现在活了\n board[row][col] = 2\n\n # 遍历 board 刷新更新后的状态\n for row in range(rows):\n for col in range(cols):\n if board[row][col] > 0:\n board[row][col] = 1\n else:\n board[row][col] = 0", "def next_life_generation(board):\r\n\theight = len(board)\r\n\twidth = len(board[0])\r\n\tnextGen = copy(board)\r\n\tfor row in range(height):\r\n\t\tfor col in range(width):\r\n\t\t\tif row > 0 and row < height-1:\r\n\t\t\t\tif col > 0 and col < width-1:\r\n\t\t\t\t\tcellNeigbors = howManyNeigbors(board,row,col)\r\n\t\t\t\t\tif cellNeigbors < 2:\r\n\t\t\t\t\t\tnextGen[row][col] = 0\r\n\t\t\t\t\telif cellNeigbors > 3:\r\n\t\t\t\t\t\tnextGen[row][col] = 0\r\n\t\t\t\t\telif cellNeigbors == 3 and board[row][col] == 0:\r\n\t\t\t\t\t\tnextGen[row][col] = 1\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tnextGen[row][col] = board[row][col]\r\n\treturn nextGen", "def play_round_Conway_Cell(self):\n for x in self.board:\n for f in x:\n f.live_neighbors = 0\n\n for i in range(1, self.cols - 1):\n for j in range(1, self.rows - 1):\n status = self.board[i][j].status\n assert type(status)==int \n\n for m in range(i - 1, i + 2):\n for n in range(j - 1, j + 2):\n self.board[m][n].live_neighbors += status\n self.board[i][j].live_neighbors -= status", "def get_next_board(Board):\n paddedBoard = pad_board(Board)\n\n dims = paddedBoard.shape\n rows = dims[0] - 2\n cols = dims[1] - 2\n\n nextBoard = np.zeros((rows, cols), dtype=int)\n\n for r in range(1, dims[0] - 1):\n for c in range(1, dims[1] - 1):\n numNeighbours = check_neighbours(r, c, paddedBoard)\n # ... ... ... ... ...\n # ... Game Logic ...\n # ... ... ... ... ...\n\n return nextBoard", "def game_value(self, state):\n # check horizontal wins\n for row in state:\n for i in range(2):\n if row[i] != ' ' and row[i] == row[i+1] == row[i+2] == row[i+3]:\n return 1 if row[i]==self.my_piece else -1\n\n # check vertical wins\n for col in range(5):\n for i in range(2):\n if state[i][col] != ' ' and state[i][col] == state[i+1][col] == state[i+2][col] == state[i+3][col]:\n return 1 if state[i][col]==self.my_piece else -1\n\n # check \\ diagonal wins\n for i in range(2):\n for j in range(2):\n if state[i][j]!= ' ' and state[i][j] == state[i+1][j+1] == state[i+2][j+2] == state[i+3][j+3]:\n return 1 if state[i][j]==self.my_piece else -1\n # check / diagonal wins\n for i in range(3,5):\n for j in range(2):\n if state[i][j]!= ' ' and state[i][j] == state[i-1][j+1] == state[i-2][j+2] == state[i-3][j+3]:\n return 1 if state[i][j]==self.my_piece else -1\n # check diamond wins\n for i in range(3):\n for j in range(1,4):\n if state[i+1][j] == ' ' and state[i][j]!= ' ' and state[i][j] == state[i+1][j-1] == state[i+1][j+1] == state[i+2][j]:\n return 1 if state[i][j]==self.my_piece else -1\n\n return 0 # no winner yet", "def next_state(s_curr, action, params):\n P_dist = params['P_dist']\n R = params['R']\n n_rows = params['n_rows']\n n_cols = params['n_cols']\n occ_grid = params['occ_grid']\n\n rnd = np.random.uniform()\n\n s_next = s_curr\n\n # Actions - ['left','right','up','down']\n\n if rnd <= P_dist:\n if action == 0:\n move = 2\n elif action == 1:\n move = 2\n elif action == 2:\n move = 1\n else:\n move = 0\n elif rnd < 2*P_dist:\n if action == 0:\n move = 3\n elif action == 1:\n move = 3\n elif action == 2:\n move = 1\n else:\n move = 1\n else:\n move = action\n\n # Move left\n if move == 0:\n row_next = s_curr[0]\n col_next = s_curr[1] - 1\n if col_next >= 0 and occ_grid[row_next, col_next] == 0:\n s_next = [row_next, col_next]\n\n # Move right\n if move == 1:\n row_next = s_curr[0]\n col_next = s_curr[1] + 1\n if col_next < n_cols and occ_grid[row_next, col_next] == 0:\n s_next = [row_next, col_next]\n\n # Move up\n if move == 2:\n row_next = s_curr[0] - 1\n col_next = s_curr[1]\n if row_next >= 0 and occ_grid[row_next, col_next] == 0:\n s_next = [row_next, col_next]\n\n # Move down\n if move == 3:\n row_next = s_curr[0] + 1\n col_next = s_curr[1]\n if row_next < n_rows and occ_grid[row_next, col_next] == 0:\n s_next = [row_next, col_next]\n\n r = R[s_next[0], s_next[1]]\n return s_next, r", "def game_value(self, state):\r\n # check horizontal wins\r\n for row in state:\r\n for i in range(2):\r\n if row[i] != ' ' and row[i] == row[i+1] == row[i+2] == row[i+3]:\r\n return 1 if row[i] == self.my_piece else -1\r\n # check col wins\r\n for col in range(5):\r\n for i in range(2):\r\n if state[i][col] != ' ' and state[i][col] == state[i+1][col] == state[i+2][col] == state[i+3][col]:\r\n return 1 if state[i][col] == self.my_piece else -1\r\n #check diag up wins\r\n for x in range(2):\r\n for y in range(2):\r\n if state[x][y] != ' ' and state[x][y] == state[x+1][y+1] == state[x+2][y+2] == state[x+3][y+3]:\r\n return 1 if state[x][y] == self.my_piece else -1\r\n #check diag down wins\r\n for x in range(2):\r\n for y in range(3, 5):\r\n if state[x][y] != ' ' and state[x][y] == state[x+1][y-1] == state[x+2][y-2] == state[x+3][y-3]:\r\n return 1 if state[x][y] == self.my_piece else -1\r\n #check square box wins \r\n for x in range(4):\r\n for y in range(4):\r\n if state[x][y] != ' ' and state[x][y] == state[x+1][y] == state[x][y+1] == state[x+1][y+1]:\r\n return 1 if state[x][y] == self.my_piece else -1\r\n\r\n return 0 # no winner yet\r", "def generate():\n global BOARD\n next = [[0] * ROWS for _ in range(COLS)]\n # Loop through every spot in our 2D array and check spots neighbors\n for x in range(COLS):\n for y in range(ROWS):\n # Add up all the states in a 3x3 surrounding grid\n neighbors = 0\n for i in range(-1, 2):\n for j in range(-1, 2):\n nx = (x + i + COLS) % COLS\n ny = (y + j + ROWS) % ROWS\n neighbors += BOARD[nx][ny]\n # A little trick to subtract the current cell's state since\n # we added it in the above loop\n neighbors -= BOARD[x][y]\n # Rules of Life\n if BOARD[x][y] == 1 and neighbors < 2 : next[x][y] = 0 # Loneliness\n elif BOARD[x][y] == 1 and neighbors > 3 : next[x][y] = 0 # Overpopulation\n elif BOARD[x][y] == 0 and neighbors == 3: next[x][y] = 1 # Reproduction\n else: next[x][y] = BOARD[x][y] # Stasis\n # Next is now our board\n BOARD = next", "def gameOfLife(self, board: List[List[int]]) -> None:\n r, c = len(board), len(board[0])\n # 下面两行做zero padding\n board_exp = np.array([[0 for _ in range(c + 2)] for _ in range(r + 2)])\n board_exp[1:1 + r, 1:1 + c] = np.array(board)\n print(board_exp)\n # 设置卷积核\n kernel = np.array([[1, 1, 1], [1, 0, 1], [1, 1, 1]])\n # 开始卷积\n for i in range(1, r + 1):\n for j in range(1, c + 1):\n # 统计细胞周围8个位置的状态\n temp_sum = np.sum(kernel * board_exp[i - 1:i + 2, j - 1:j + 2])\n # 按照题目规则进行判断\n if board_exp[i, j] == 1:\n if temp_sum < 2 or temp_sum > 3:\n board[i - 1][j - 1] = 0\n else:\n if temp_sum == 3:\n board[i - 1][j - 1] = 1", "def gameOfLife(self, board: List[List[int]]) -> None:\n self.board = copy.deepcopy(board)\n self.rows = len(self.board)\n self.cols = len(self.board[0])\n for i in range(self.rows):\n for j in range(self.cols):\n neighbors = self.count_neighbors(i, j)\n if board[i][j] == 1:\n if neighbors < 2 or neighbors > 3:\n board[i][j] = 0\n else:\n if neighbors == 3:\n board[i][j] = 1", "def gameOfLife(self, board: List[List[int]]) -> None:\n m = len(board)\n n = len(board[0])\n\n def affect(x, y):\n for i in [x-1, x, x+1]:\n for j in [y-1, y, y+1]:\n if (i == x and j == y) or i < 0 or i >= m or j < 0 or j >= n:\n continue\n board[i][j] += 10\n for i in range(m):\n for j in range(n):\n if board[i][j] % 10 == 1:\n affect(i, j)\n for i in range(m):\n for j in range(n):\n value = board[i][j]\n if value // 10 == 3:\n board[i][j] = 1\n elif value // 10 == 2 and value % 10 == 1:\n board[i][j] = 1\n else:\n board[i][j] = 0", "def game_state(matrix):\n\n \"\"\"\n # To set winning tile\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n if matrix[i][j] == 2048:\n # return 'win'\n # return 'not over'\n \"\"\"\n for i in range(len(matrix)-1):\n # intentionally reduced to check the row on the right and below\n # more elegant to use exceptions but most likely this will be their solution\n for j in range(len(matrix[0])-1):\n if matrix[i][j] == matrix[i+1][j] or matrix[i][j+1] == matrix[i][j]:\n return 'not over'\n for i in range(len(matrix)): # check for any zero entries\n for j in range(len(matrix[0])):\n if matrix[i][j] == 0:\n return 'not over'\n for k in range(len(matrix)-1): # to check the left/right entries on the last row\n if matrix[len(matrix)-1][k] == matrix[len(matrix)-1][k+1]:\n return 'not over'\n for j in range(len(matrix)-1): # check up/down entries on last column\n if matrix[j][len(matrix)-1] == matrix[j+1][len(matrix)-1]:\n return 'not over'\n return 'lose'", "def computeNextState(self):\n aliveNeighbors = self.numOfLiveNeighbors()\n if aliveNeighbors < 2 or aliveNeighbors > 3:\n self.setNextToDead()\n\n if not self.isAlive() and aliveNeighbors == 3:\n self.setNextToAlive()", "def gameOfLife(self, board: List[List[int]]) -> None:\n changes = list()\n for i in range(len(board)):\n for j in range(len(board[0])):\n neighbor_data = {\n 'live': 0,\n 'dead': 0\n }\n checks = {(0,1), (0,-1), (1, 0), (-1, 0), (1, 1), (1, -1), (-1, 1), (-1,-1)}\n if i == 0:\n checks.discard((-1, 0))\n checks.discard((-1, 1))\n checks.discard((-1, -1))\n if j == 0:\n checks.discard((0, -1))\n checks.discard((-1, -1))\n checks.discard((1, -1))\n if i == (len(board) - 1):\n checks.discard((1,0))\n checks.discard((1,-1))\n checks.discard((1, 1))\n if j == (len(board[0]) - 1):\n checks.discard((0, 1))\n checks.discard((-1, 1))\n checks.discard((1, 1))\n for check in checks:\n if board[i + check[0]][j + check[1]]:\n neighbor_data['live'] += 1\n else:\n neighbor_data['dead'] += 1\n if board[i][j]:\n # check live rules\n if neighbor_data['live'] < 2 or neighbor_data['live'] > 3:\n changes.append((i, j))\n else:\n # check dead rules\n if neighbor_data['live'] == 3:\n changes.append((i, j))\n for change in changes:\n board[change[0]][change[1]] = int (not board[change[0]][change[1]])\n \n print (board)", "def gameOfLife(self, board: List[List[int]]) -> None:\n neighbors = [(1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0), (-1, 1),\n (0, 1), (1, 1)]\n rows = len(board)\n cols = len(board[0])\n\n tmp_board = [[board[r][c] for c in range(cols)] for r in range(rows)]\n\n for row in range(rows):\n for col in range(cols):\n lives = 0\n for n in neighbors:\n r = row + n[0]\n c = col + n[1]\n\n if 0 <= r < rows and 0 <= c < cols and tmp_board[r][c] == 1:\n lives += 1\n if tmp_board[row][col] == 1 and (lives < 2 or lives > 3):\n board[row][col] = 0\n if tmp_board[row][col] == 0 and lives == 3:\n board[row][col] = 1", "def game_value(self, state):\n # check horizontal wins\n for row in state:\n for i in range(2):\n if row[i] != ' ' and row[i] == row[i+1] == row[i+2] == row[i+3]:\n return 1 if row[i]==self.my_piece else -1\n\n # check vertical wins\n for col in range(5):\n for i in range(2):\n if state[i][col] != ' ' and state[i][col] == state[i+1][col] == state[i+2][col] == state[i+3][col]:\n return 1 if state[i][col]==self.my_piece else -1\n\n # TODO: check \\ diagonal wins\n for col in range(2):\n for i in range(2):\n if state[i][col] != ' ' and state[i][col] == state[i+1][col+1] == state[i+2][col+2] == state[i+3][col+3]:\n return 1 if state[i][col]==self.my_piece else -1\n # TODO: check / diagonal wins\n for col in range(2):\n for i in range(2):\n if state[i][col+3] != ' ' and state[i][col+3] == state[i+1][col+2] == state[i+2][col+1] == state[i+3][col]:\n return 1 if state[i][col]==self.my_piece else -1\n # TODO: check 2x2 box wins\n for col in range(4):\n for i in range(4):\n if state[i][col] != ' ' and state[i][col] == state[i+1][col] == state[i][col+1] == state[i+1][col+1]:\n return 1 if state[i][col]==self.my_piece else -1\n \n return 0 # no winner yet", "def evaluate(self, state):\n\t\ttranspose = state.board.transpose()\t\t# columns in state.board = rows in transpose\n\t\tcount = []\n\t\topponentcount = []\n\t\tfor row, column in zip(state.board, transpose):\n\t\t\trowcounter = collections.Counter(row)\n\t\t\tcolumncounter = collections.Counter(column)\n\t\t\tcount.append(rowcounter.get(state.current_player, 0))\n\t\t\tcount.append(columncounter.get(state.current_player, 0))\n\t\t\topponentcount.append(rowcounter.get(state.current_player * - 1, 0))\n\t\t\topponentcount.append(columncounter.get(state.current_player * -1 , 0))\n\n\t\tY = state.board[:, ::-1]\n\t\tdiagonals = [np.diagonal(state.board), np.diagonal(Y)]\n\t\tmain_diagonal_count = collections.Counter(diagonals[0])\n\t\tsecond_diagonal_count = collections.Counter(diagonals[1])\n\t\tcount.append(main_diagonal_count.get(state.current_player, 0))\n\t\tcount.append(second_diagonal_count.get(state.current_player, 0))\n\t\topponentcount.append(main_diagonal_count.get(state.current_player * - 1, 0))\n\t\topponentcount.append(second_diagonal_count.get(state.current_player * -1, 0))\n\n\t\t# max(count): maximum number of player's tiles in a row, column, or a diagonal (the highest value is 5)\n\t\t# max(opponentcount): maximum number of opponent's tiles in a row, column, or a diagonal (the highest value is 5)\n\t\tscoremax = 5 ** max(count)\n\t\tscoremin = 5 ** max(opponentcount)\n\n\t\treturn scoremax - scoremin", "def gameOfLife(self, board: List[List[int]]) -> None:\n if not board or len(board)==0:\n return \n\n rows = len(board)\n cols = len(board[0])\n #lives = 0\n \n\n for i in range(rows):\n for j in range(cols):\n lives = self.n_neighbors(board,i,j)\n \n # Rule 1 and Rule 3\n if board[i][j]==1 and (lives <2 or lives >3):\n board[i][j]= 2 # -1 signifies the cell is now dead but originally was live.\n if board[i][j]== 0 and lives ==3:\n board[i][j]=3 # signifies the cell is now live but was originally dead.\n\n for i in range(rows):\n for j in range(cols):\n board[i][j] = board[i][j]%2\n return board", "def gameOfLife(self, board) :\n # mark live-->dead (-1)\n # mark live-->live (1)\n # mark dead-->live (2)\n # mark dead-->dead (0)\n\n h = len(board)\n w = len(board[0])\n\n def counter(i,j):\n c=0\n for m in range(-1,2):\n for n in range(-1,2):\n if i+m<0 or j+n <0 :\n continue\n if i+m>h-1 or j+n>w-1:\n continue\n else:\n if board[i+m][j+n]==1 or board[i+m][j+n]==-1:\n c+=1\n return c\n\n for i in range(h):\n for j in range(w):\n live=counter(i,j)\n if board[i][j] ==1:\n live=live-1\n if live<2 or live>3:\n board[i][j]=-1\n else:\n if live==3:\n board[i][j]=2\n for i in range(h):\n for j in range(w):\n if board[i][j]==2:\n board[i][j]=1\n if board[i][j]==-1:\n board[i][j]=0", "def gen_next_state(self, row, col, value):\n new_state = self.copy_state() # Create a copy of the current state (final and possible values)\n # Update the board configuration:\n new_state.final_values[row][col] = value\n new_state.possible_values[row][col] = [] # Position has been filled so it no longer has possible moves\n\n new_state.update_constraints(row, col, value) # Update affected possible values (apply constraints)\n\n singleton_list = new_state.get_singletons() # Find singletons for the new board configuration\n while singleton_list:\n row, col = singleton_list.pop() # Get singleton's position\n\n new_state.final_values[row][col] = new_state.possible_values[row][col][0] # Update final value\n new_state.possible_values[row][col] = [] # Position has been filled so it no longer has possible moves\n new_state.update_constraints(row, col, new_state.final_values[row][col]) # Propagate constraints\n\n singleton_list = new_state.get_singletons() # Get the remaining singletons\n\n return new_state # Return the resulting state", "def gameOfLife(self, board: List[List[int]]) -> None:\n res = {}\n for i in range(len(board)):\n for j in range(len(board[0])):\n count = 0\n # top line\n if i-1 >= 0:\n # left-top\n if j-1>=0:\n if board[i-1][j-1]==1:\n count += 1\n # top\n if board[i-1][j]==1:\n count += 1\n # right-top\n if j+1<len(board[0]):\n if board[i-1][j+1]==1:\n count += 1\n # same line\n # left\n if j-1>=0:\n if board[i][j-1]==1:\n count += 1\n # right\n if j+1 < len(board[0]):\n if board[i][j+1]==1:\n count += 1\n # bottom line\n if i+1 < len(board):\n # left-bottom\n if j-1 >= 0:\n if board[i+1][j-1]==1:\n count += 1\n # bottom\n if board[i+1][j]==1:\n count += 1\n # right-bottom\n if j+1 < len(board[0]):\n if board[i+1][j+1]==1:\n count += 1\n # check\n if board[i][j]==0 and count==3:\n res[(i,j)] = 1\n if board[i][j]==1 and (count<2 or count>3):\n res[(i,j)] = 0\n for key in res:\n board[key[0]][key[1]] = res[key]", "def advance_board(self):\n board = self.board\n rules = self.energy_rules\n h, w = board.shape\n beta = 1.0 / max(1e-20, self.temperature)\n if len(rules[0]) - 1 == 4:\n neighborhood = np.array([[0,1,0],[1,0,1],[0,1,0]])\n elif len(rules[0]) - 1 == 6:\n neighborhood = np.array([[0,1,1],[1,0,1],[1,1,0]])\n elif len(rules[0]) - 1 == 8:\n neighborhood = np.array([[1,1,1],[1,0,1],[1,1,1]])\n else:\n raise RuntimeError(\"async rules must have length 5, 7, or 9\")\n rng = get_rng()\n for _ in range(int(board.size * self.cells_per_update)):\n x = rng.choice(w)\n y = rng.choice(h)\n if board[y, x] & CellTypes.frozen:\n continue\n neighbors = board.view(wrapping_array)[y-1:y+2, x-1:x+2] * neighborhood\n alive_neighbors = np.sum(neighbors & CellTypes.alive > 0)\n spawn_neighbors = np.sum(neighbors & CellTypes.spawning > 0)\n frozen = np.sum(neighbors & CellTypes.freezing) > 0\n if frozen:\n continue\n if board[y, x] & CellTypes.alive:\n H = rules[0][alive_neighbors]\n else:\n H = rules[1][alive_neighbors]\n\n P = 0.5 + 0.5*np.tanh(H * beta)\n P = 1 - (1-P)*(1-self.spawn_prob)**spawn_neighbors\n board[y, x] = CellTypes.life if coinflip(P) else CellTypes.empty", "def next_states(self, state):\n import copy\n\n ans = []\n current_array = state.board.array\n space_pos = state.board.space\n\n up_pos = [space_pos[0] - 1, space_pos[1]]\n down_pos = [space_pos[0] + 1, space_pos[1]]\n left_pos = [space_pos[0], space_pos[1] - 1]\n right_pos = [space_pos[0], space_pos[1] + 1]\n\n # down position\n if self.__is_valid(down_pos):\n down_array = [copy.copy(row) for row in current_array]\n down_board = Board(array=down_array, space=space_pos.copy())\n down_board.swap(down_pos)\n ans.append(State(board=down_board, came_from=state, move='U'))\n\n # up position\n if self.__is_valid(up_pos):\n up_array = [copy.copy(row) for row in current_array]\n up_board = Board(array=up_array, space=space_pos.copy())\n up_board.swap(up_pos)\n ans.append(State(board=up_board, came_from=state, move='D'))\n\n # right position\n if self.__is_valid(right_pos):\n right_array = [copy.copy(row) for row in current_array]\n right_board = Board(array=right_array, space=space_pos.copy())\n right_board.swap(right_pos)\n ans.append(State(board=right_board, came_from=state, move='L'))\n\n # left position\n if self.__is_valid(left_pos):\n left_array = [copy.copy(row) for row in current_array]\n left_board = Board(array=left_array, space=space_pos.copy())\n left_board.swap(left_pos)\n ans.append(State(board=left_board, came_from=state, move='R'))\n\n return ans", "def next_step(self):\n\n c = 1\n dt = 0.001\n dx = 1 / 20**2\n\n # copy current state first\n next_state = np.copy(self.state)\n\n # iterate over matrix\n for i in range(self.width - 1):\n for j in range(self.height - 1):\n\n if not self.shape == \"circle\" or self.circle[i, j] == 1:\n\n # left bottom corner\n if i == 0 and j == 0:\n next_state[i, j] = ((c * dt)/ dx)** 2\\\n * (self.state[i + 1, j] + 0\\\n + 0 + self.state[i, j + 1]\\\n - 4 * self.state[i, j])\\\n + 2 * self.state[i, j] - self.prev_state[i, j]\n # right top corner\n elif i == 0 and j == self.height - 1:\n next_state[i, j] = ((c * dt)/ dx)** 2\\\n * (0 + self.state[i + 1, j]\\\n + self.state[i, j - 1] + 0\\\n - 4 * self.state[i, j])\\\n + 2 * self.state[i, j] - self.prev_state[i , j]\n # right bottom corner\n elif i == self.width - 1 and j == 0:\n next_state[i, j] = ((c * dt)/ dx)** 2\\\n * (self.state[i, j - 1] + 0\\\n + 0 + self.state[i - 1, j]\\\n - 4 * self.state[i, j])\\\n + 2 * self.state[i, j] - self.prev_state[i , j]\n # left bottom corner\n elif i == self.width - 1 and j == self.height - 1:\n next_state[i, j] = ((c * dt)/ dx)** 2\\\n * (self.state[i, j - 1] + self.state[i - 1, j]\\\n + 0 + 0\\\n - 4 * self.state[i, j])\\\n + 2 * self.state[i, j] - self.prev_state[i , j]\n elif i == 0: # j is not 0\n next_state[i, j] = ((c * dt)/ dx)** 2\\\n * (self.state[i + 1, j] + 0\\\n + self.state[i, j - 1] + self.state[i, j + 1]\\\n - 4 * self.state[i, j])\\\n + 2 * self.state[i, j] - self.prev_state[i , j]\n elif j == 0:\n next_state[i, j] = ((c * dt)/ dx)** 2\\\n * (self.state[i + 1, j] + self.state[i - 1, j]\\\n + 0 + self.state[i, j + 1]\\\n - 4 * self.state[i, j])\\\n + 2 * self.state[i, j] - self.prev_state[i , j]\n elif i == self.width - 1:\n next_state[i, j] = ((c * dt)/ dx)** 2\\\n * (0 + self.state[i - 1, j]\\\n + self.state[i, j - 1] + self.state[i, j + 1]\\\n - 4 * self.state[i, j])\\\n + 2 * self.state[i, j] - self.prev_state[i , j]\n elif j == self.height - 1:\n next_state[i, j] = ((c * dt)/ dx)** 2\\\n * (self.state[i + 1, j] + self.state[i - 1, j]\\\n + self.state[i, j - 1] + 0\\\n - 4 * self.state[i, j])\\\n + 2 * self.state[i, j] - self.prev_state[i , j]\n else:\n next_state[i, j] = ((c * dt)/ dx)** 2\\\n * (self.state[i + 1, j] + self.state[i - 1, j]\\\n + self.state[i, j - 1] + self.state[i, j + 1]\\\n - 4 * self.state[i, j])\\\n + 2 * self.state[i, j] - self.prev_state[i , j]\n\n self.prev_state = np.copy(self.state)\n self.state = np.copy(next_state)\n\n self.timestep += 1", "def gameOfLife(self, board: List[List[int]]) -> None:\n if (0 == len(board)): return\n ori, l, r = copy.deepcopy(board), len(board), len(board[0])\n for i in range(l):\n for j in range(r):\n live = self.count(ori, i, j)\n if ori[i][j] == 1 and live < 2:\n board[i][j] = 0\n elif ori[i][j] == 1 and live > 3:\n board[i][j] = 0\n elif ori[i][j] == 0 and live == 3:\n board[i][j] = 1", "def start_state():\n return chess.Board()", "def eval_board(self, board):\n\t\ts = 0\n\t\t\n\t\tfor i in board.columns:\n\t\t\tfor j in board.rows:\n\t\t\t\tif board[i+j] == self.color:\n\t\t\t\t\n\t\t\t\t\tif i in ['A', 'H'] or j in ['1', '8']:\n\t\t\t\t\t\tif i + j in ['A1', 'A8', 'H1', 'H8']:\n\t\t\t\t\t\t\ts += 4\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ts += 2\n\t\t\t\t\telse:\n\t\t\t\t\t\ts += 1\n\t\treturn s", "def gameOfLife(self, board: List[List[int]]) -> None:\n if board:\n for i in range(len(board)):\n for j in range(len(board[0])):\n board[i][j] += 1\n for i in range(len(board)):\n for j in range(len(board[0])):\n alive_cnt = 0\n for x, y in [(1, 0), (-1, 0), (0, 1), (0, -1), (1, 1), (1, -1), (-1, 1), (-1, -1)]:\n if 0 <= x + i < len(board) and 0 <= y + j < len(board[0]) and abs(board[x + i][y + j]) == 2:\n alive_cnt += 1\n if alive_cnt <= 1 or alive_cnt >= 4 or (alive_cnt == 2 and abs(board[i][j]) == 1):\n board[i][j] = -1 * board[i][j]\n for i in range(len(board)):\n for j in range(len(board[0])):\n board[i][j] = 1 if board[i][j] > 0 else 0", "def _turn_cycle(self):\r\n\r\n #Get current player\r\n cur_player = self.get_current_player()\r\n\r\n #Get board states for current player\r\n choices = self.board.get_states(cur_player)\r\n\r\n #Update board state\r\n self.board = choices[self.get_player(cur_player).choose_state(choices)]\r\n\r\n #Make sure you have the history, original board is added, so we can do it afterwards\r\n self.board_history.append(self.board)\r\n\r\n #Check for win or tie\r\n if self.board.check_win(self.num_to_win, cur_player):\r\n self._end_game(cur_player)\r\n return cur_player\r\n if self.board.check_tie():\r\n self._end_game()\r\n return -1\r\n if self.turn_number >= self.max_turns:\r\n self._end_game()\r\n return -1\r\n\r\n #Incriment turn counter if the game has not ended\r\n self._increment_turn()\r\n\r\n return 0", "def next_life_generation(a):\n w = len(a[0])\n h = len(a)\n new_a = create_board(w, h)\n\n for n in range(h):\n for m in range(w):\n if 0 < n < h - 1 and 0 < m < w - 1:\n count = count_neighbours(n, m, a)\n if count < 2 or count > 3:\n new_a [n][m] = 0\n elif count == 3:\n new_a[n][m] =1\n else:\n new_a[n][m] = a[n][m]\n else:\n new_a[n][m] = 0\n \n return new_a", "def evaluate(self, board):", "def next_move(ttt):\r\n # get board in 2D array form\r\n b = ttt.get_board()\r\n \r\n # if there's a winning move, take it\r\n (cfw, win_move) = check_for_win_lose(b)\r\n if cfw is not None:\r\n if win_move:\r\n print 'COMPUTER WINS!'\r\n return cfw, win_move\r\n # otherwise, pres on with the next best move\r\n\r\n # get \"points\" on board. this tells us not only the move\r\n # but also who went first\r\n board_count = sum(sum(b,[]))\r\n \r\n # IF COMPUTER HAS FIRST TURN\r\n # if 1st move\r\n if board_count == 0:\r\n return (2,2), False # take the center\r\n # this is not best strategy for winning, but\r\n # it the human messes up, the computer can win.\r\n # taking a corner first makes it a little easier\r\n # for the computer to win becase the human only\r\n # has one correct move to make: to take the center\r\n \r\n # if 3rd move, and not a winning one\r\n if board_count == 3:\r\n if b[0][1]==2 or b[1][0]==2 or b[0][0]==2:\r\n return (3,3), False\r\n elif b[0][2]==2:\r\n return (3,1), False\r\n elif b[2][0]==2:\r\n return (1,3), False\r\n else:#elif b[1][2]==2 or b[2][1]==2 or b[2][2]==2:\r\n return (1,1), False\r\n\r\n # if 5th move, and not a winning or losing one\r\n if board_count == 6:\r\n b5 = numpy.array([[0,2,1],[0,1,0],[2,0,0]])\r\n if (b == b5).all():\r\n return (3,3), False\r\n elif (b == numpy.rot90(b5,1)).all():\r\n return (3,1), False\r\n elif (b == numpy.rot90(b5,2)).all():\r\n return (1,1), False\r\n elif (b == numpy.rot90(b5,3)).all():\r\n return (1,3), False\r\n\r\n b5 = numpy.array([[0,0,1],[0,1,2],[2,0,0]])\r\n if (b == b5).all():\r\n return (1,1), False\r\n elif (b == numpy.rot90(b5,1)).all():\r\n return (1,3), False\r\n elif (b == numpy.rot90(b5,2)).all():\r\n return (3,3), False\r\n elif (b == numpy.rot90(b5,3)).all():\r\n return (3,1), False\r\n\r\n # at this point, all possible boards should have been covered\r\n\r\n # if 7th move, and a winning or losing one\r\n if board_count == 9:\r\n # find the row or col with 2 open slots and mark it\r\n for ri in range(3):\r\n r = b[ri]\r\n if sum([1 if i==0 else 0 for i in r]) == 2:\r\n if r[0] == 0:\r\n return (ri+1,1), False\r\n else:\r\n return (ri+1,2), False\r\n for ci in range(3):\r\n c = get_col(b, ci)\r\n if sum([1 if i==0 else 0 for i in c]) == 2:\r\n if c[0] == 0:\r\n return (1,ci+1), False\r\n else:\r\n return (2,ci+1), False\r\n\r\n \r\n # IF HUMAN HAS FIRST TURN\r\n # if 2nd move\r\n if board_count == 2:\r\n if b[1][1] == 0:\r\n # if the center is open, computer has\r\n # to take it in order to not lose\r\n return (2,2), False\r\n else:\r\n # otherwise take a corner\r\n return (1,1), False\r\n\r\n # if 4th move\r\n if board_count == 5:\r\n # if we took a corner on move 2 and they\r\n # are using computer's offensive strategy\r\n # when it is first player\r\n b4 = [[1,0,0],[0,2,0],[0,0,2]]\r\n if b==b4:\r\n return (3,1), False\r\n # if we took center on move 2\r\n else:\r\n b4 = numpy.array([[2,0,0],[0,1,0],[0,0,2]])\r\n if (b == b4).all() or (b == numpy.rot90(b4,1)).all():\r\n return (1,2), False\r\n\r\n # overall ELSE -- just find a square\r\n for ri in range(3):\r\n for ci in range(3):\r\n if b[ri][ci] == 0:\r\n return (ri+1,ci+1), False", "def current_state(self):\n square_state = np.zeros((4, self.width, self.height))\n if self.states:\n for i in range(8):\n for j in range(8):\n if self.board_value[i][j]==self.current_player:\n square_state[0][i][j]=1\n elif self.board_value[i][j]!=self.current_player and self.board_value[i][j]!= 0:\n square_state[1][i][j]=1\n # indicate the last move location\n square_state[2][self.last_move // self.width, self.last_move % self.height] = 1.0\n if len(self.states) % 2 == 0:\n square_state[3][:, :] = 1.0 # indicate the colour to play\n return square_state[:, ::-1, :]", "def computeSide(self):\n side = 0\n for c in 'abcdefgh':\n side += self.getPieceType(c,1)\n side += self.getPieceType(c,2)\n side -= self.getPieceType(c,7)\n side -= self.getPieceType(c,8) \n rospy.loginfo(\"Computed side value of: %d\" % side)\n if side > 0:\n self.side = self.WHITE # good to go\n else:\n self.side = self.BLACK \n # need to setup board \n temp_board = BoardState(self.side) \n for i in range(8):\n temp_board.setPiece(i, 2, self.makePiece(ChessPiece.WHITE_PAWN, self.getPiece(7-i, 7)) )\n temp_board.setPiece(i, 7, self.makePiece(ChessPiece.BLACK_PAWN, self.getPiece(7-i, 2)) )\n\n temp_board.setPiece('a', 1, self.makePiece(ChessPiece.WHITE_ROOK, self.getPiece('h',8)) )\n temp_board.setPiece('b', 1, self.makePiece(ChessPiece.WHITE_KNIGHT, self.getPiece('g',8)))\n temp_board.setPiece('c', 1, self.makePiece(ChessPiece.WHITE_BISHOP, self.getPiece('f',8)))\n temp_board.setPiece('d', 1, self.makePiece(ChessPiece.WHITE_QUEEN, self.getPiece('e',8)))\n temp_board.setPiece('e', 1, self.makePiece(ChessPiece.WHITE_KING, self.getPiece('d',8)))\n temp_board.setPiece('f', 1, self.makePiece(ChessPiece.WHITE_BISHOP, self.getPiece('c',8)))\n temp_board.setPiece('g', 1, self.makePiece(ChessPiece.WHITE_KNIGHT, self.getPiece('b',8)))\n temp_board.setPiece('h', 1, self.makePiece(ChessPiece.WHITE_ROOK, self.getPiece('a',8)))\n\n temp_board.setPiece('a', 8, self.makePiece(ChessPiece.BLACK_ROOK, self.getPiece('h',1)) )\n temp_board.setPiece('b', 8, self.makePiece(ChessPiece.BLACK_KNIGHT, self.getPiece('g',1)) )\n temp_board.setPiece('c', 8, self.makePiece(ChessPiece.BLACK_BISHOP, self.getPiece('f',1)) )\n temp_board.setPiece('d', 8, self.makePiece(ChessPiece.BLACK_QUEEN, self.getPiece('e',1)) )\n temp_board.setPiece('e', 8, self.makePiece(ChessPiece.BLACK_KING, self.getPiece('d',1)) )\n temp_board.setPiece('f', 8, self.makePiece(ChessPiece.BLACK_BISHOP, self.getPiece('c',1)) )\n temp_board.setPiece('g', 8, self.makePiece(ChessPiece.BLACK_KNIGHT, self.getPiece('b',1)) )\n temp_board.setPiece('h', 8, self.makePiece(ChessPiece.BLACK_ROOK, self.getPiece('a',1)) ) \n\n self.values = temp_board.values\n self.printBoard()\n\n self.last_move = \"go\"", "def find_best_move(board):\n new_board = board.get_board()\n\n # X | X | X <-- Check for win on this row\n # ---------\n # 3 | 4 | 5\n # ---------\n # 6 | 7 | 9\n if new_board[0] == new_board[1] and new_board[2] == \"2\":\n return 2\n elif new_board[0] == new_board[2] and new_board[1] == \"1\":\n return 1\n elif new_board[1] == new_board[2] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | 2\n # ---------\n # X | X | X <-- Check for win on this row\n # ---------\n # 6 | 7 | 9\n elif new_board[3] == new_board[4] and new_board[5] == \"5\":\n return 5\n elif new_board[3] == new_board[5] and new_board[4] == \"4\":\n return 4\n elif new_board[4] == new_board[5] and new_board[3] == \"3\":\n return 3\n\n # 0 | 1 | 2\n # ---------\n # 3 | 4 | 5\n # ---------\n # X | X | X <-- Check for win on this row\n elif new_board[6] == new_board[7] and new_board[8] == \"8\":\n return 8\n elif new_board[6] == new_board[8] and new_board[7] == \"7\":\n return 7\n elif new_board[7] == new_board[8] and new_board[6] == \"6\":\n return 6\n\n # X | 1 | 2 Check for win on column one\n # ---------\n # X | 4 | 5\n # ---------\n # X | 7 | 9\n elif new_board[0] == new_board[3] and new_board[6] == \"6\":\n return 6\n elif new_board[0] == new_board[6] and new_board[3] == \"3\":\n return 3\n elif new_board[6] == new_board[3] and new_board[0] == \"0\":\n return 0\n\n # 0 | X | 2 Checks for win on column two\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | X | 9\n elif new_board[1] == new_board[4] and new_board[7] == \"7\":\n return 7\n elif new_board[1] == new_board[7] and new_board[4] == \"4\":\n return 4\n elif new_board[7] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | 4 | X\n # ---------\n # 6 | 7 | X\n elif new_board[2] == new_board[5] and new_board[8] == \"8\":\n return 8\n elif new_board[2] == new_board[8] and new_board[5] == \"5\":\n return 5\n elif new_board[8] == new_board[5] and new_board[2] == \"2\":\n return 2\n\n # X | 1 | 2\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | 7 | X\n elif new_board[0] == new_board[4] and new_board[8] == \"8\":\n return 8\n elif new_board[0] == new_board[8] and new_board[4] == \"4\":\n return 4\n elif new_board[8] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | X | 5\n # ---------\n # X | 7 | 9\n elif new_board[2] == new_board[4] and new_board[6] == \"6\":\n return 6\n elif new_board[2] == new_board[6] and new_board[4] == \"4\":\n return 4\n elif new_board[6] == new_board[4] and new_board[2] == \"2\":\n return 2\n\n # If corners are empty, play there\n elif new_board[0] == \"0\" or new_board[2] == \"2\" or new_board[6] == \"6\" or new_board[8] == \"8\":\n try_spot = 0\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2\n\n # If middle is empty, play there\n elif new_board[4] == \"4\":\n return 4\n\n # Finally if edges are empty try there\n elif new_board[1] == \"1\" or new_board[3] == \"3\" or new_board[5] == \"5\" or new_board[7] == \"7\":\n try_spot = 1\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2", "def gameOfLife(self, board: list[list[int]]) -> None:\n def game_of_life_infinite(live: set[tuple[int, int]]) -> set[tuple[int, int]]:\n ctr = Counter((I, J)\n for i, j in live\n for I in range(i - 1, i + 2)\n for J in range(j - 1, j + 2)\n if I != i or J != j)\n return {ij\n for ij in ctr\n if ctr[ij] == 3 or ctr[ij] == 2 and ij in live}\n\n live = {(i, j) for i, row in enumerate(board)\n for j, live in enumerate(row) if live}\n live = game_of_life_infinite(live)\n for i, row in enumerate(board):\n for j in range(len(row)):\n row[j] = int((i, j) in live)", "def advance(self, board):", "def init_board():\n\t# Generates a table 10*10 of 0s with -1 around and the initial state\n\t# of the board with 2 whites and 2 blacks in the middle\n\ttable = [[0 if i != 0 and i != 9 else -1 for i in range(10)] if j != 0 and j != 9 else [-1 for i in range(10)] for j in range(10)] #leaves a -1 line around the whole table of 0s\n\t#initial state is drawn and recorded\n\ttable[4][4] = 2\n\ttable[5][5] = 2\n\ttable[4][5] = 1\n\ttable[5][4] = 1\n\tdrawPiece((4,4),2)\n\tdrawPiece((5,5),2)\n\tdrawPiece((4,5),1)\n\tdrawPiece((5,4),1)\n\treturn table", "def _next_state(self, state, action):\n\n # Transition table to define movement for each action\n if self.action_type == 'VonNeumann':\n transitions = {0: [-1, 0], 1: [+1, 0], 2: [0, -1], 3: [0, +1]}\n elif self.action_type == 'Moore':\n transitions = {0: [-1, 0], 1: [+1, 0], 2: [0, -1], 3: [0, +1],\n 4: [-1, +1], 5: [+1, +1], 6: [-1, -1], 7: [+1, -1]}\n\n new_state = [state[0] + transitions[action][0], state[1] + transitions[action][1]]\n if self.maze[new_state[0]][new_state[1]] == 1: # Hit wall, stay there\n return state\n else: # Valid move for 0, 2, 3, 4\n return new_state", "def transition(board, player, action):\n if _ENV.is_valid((board, player), action):\n new_board, __ = _ENV.get_next_state((board, player), action)\n return new_board\n return None", "def calculate_next_move(self, visit):\n self.depth += 1\n new_boards = []\n for vehicle_id in range(len(self.vehicles)):\n vehicle = self.vehicles[vehicle_id]\n state = self.get_board()\n if vehicle.orientation == 0: #horizontal\n if vehicle.x > 0: #left\n if state[vehicle.y][vehicle.x-1] == \"..\":\n self.vehicles[vehicle_id].x -=1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].x += 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].x += 1\n\n if vehicle.x + vehicle.length <= (len(state)-1): #right\n if state[vehicle.y][vehicle.x+vehicle.length] == \"..\":\n self.vehicles[vehicle_id].x += 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].x -= 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].x -= 1\n\n else: #vertical\n if vehicle.y - 1 >= 0: #up\n if state[vehicle.y-1][vehicle.x] == \"..\":\n self.vehicles[vehicle_id].y -= 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].y += 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].y += 1\n\n if vehicle.y + vehicle.length <= (len(state)-1):\n if state[vehicle.y + vehicle.length][vehicle.x] == \"..\":#down\n self.vehicles[vehicle_id].y += 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].y -= 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].y -= 1\n self.depth -= 1\n return new_boards", "def gameOfLife(self, board) -> None:\n changelist = []\n for i in range(len(board)):\n for j in range(len(board[0])):\n if self.ischange(i, j, board):\n changelist.append([i, j])\n\n for x, y in changelist:\n board[x][y] = ~board[x][y] + 2", "def next_possible_states(path, check_dict, check):\r\n \r\n current_state_tuple = path[-1]\r\n state_container = []\r\n x = current_state_tuple[1][0]\r\n y = current_state_tuple[1][1]\r\n current_state = current_state_tuple[0]\r\n\r\n # Down\r\n if y < 3:\r\n new_state = copy.deepcopy(current_state)\r\n new_state[y][x] = new_state[y + 1][x]\r\n new_state[y + 1][x] = 0\r\n if not been_there(new_state, check_dict, check):\r\n new_index = (x, y + 1)\r\n h1 = euclidean_dist(new_state, path)\r\n new_state_tuple = (new_state, new_index, h1)\r\n state_container.append(new_state_tuple)\r\n\r\n # Up\r\n if y > 0:\r\n new_state = copy.deepcopy(current_state)\r\n if y == 1 and x == 0:\r\n new_state[y][x] = new_state[y - 1][x]\r\n new_state[y - 1][x] = 0\r\n if is_goal(new_state):\r\n new_index = (x, y - 1)\r\n h1 = euclidean_dist(new_state, path)\r\n new_state_tuple = (new_state, new_index, h1)\r\n state_container.append(new_state_tuple)\r\n elif y > 1:\r\n new_state[y][x] = new_state[y - 1][x]\r\n new_state[y - 1][x] = 0\r\n if not been_there(new_state, check_dict, check):\r\n new_index = (x, y - 1)\r\n h1 = euclidean_dist(new_state, path)\r\n new_state_tuple = (new_state, new_index, h1)\r\n state_container.append(new_state_tuple)\r\n\r\n # Left\r\n if x > 0 and y > 0:\r\n new_state = copy.deepcopy(current_state)\r\n new_state[y][x] = new_state[y][x - 1]\r\n new_state[y][x - 1] = 0\r\n if not been_there(new_state, check_dict, check):\r\n new_index = (x - 1, y)\r\n h1 = euclidean_dist(new_state, path)\r\n new_state_tuple = (new_state, new_index, h1)\r\n state_container.append(new_state_tuple)\r\n\r\n # Right\r\n if x < 2 and y > 0:\r\n new_state = copy.deepcopy(current_state)\r\n new_state[y][x] = new_state[y][x + 1]\r\n new_state[y][x + 1] = 0\r\n if not been_there(new_state, check_dict, check):\r\n new_index = (x + 1, y)\r\n h1 = euclidean_dist(new_state, path)\r\n new_state_tuple = (new_state, new_index, h1)\r\n state_container.append(new_state_tuple)\r\n\r\n return state_container", "def current_state(self):\n\n square_state = np.zeros((4, self.width, self.height))\n if self.states:\n moves, players = np.array(list(zip(*self.states.items())))\n move_curr = moves[players == self.current_player]\n move_oppo = moves[players != self.current_player]\n square_state[0][move_curr // self.width, move_curr % self.height] = 1.0\n square_state[1][move_oppo // self.width, move_oppo % self.height] = 1.0\n\n # last move indication\n square_state[2][self.last_move // self.width, self.last_move % self.height] = 1.0\n\n if len(self.states) % 2 == 0:\n square_state[3][:, :] = 1.0\n\n return square_state[:, ::-1, :]", "def current_state(self):\n\n square_state = np.zeros((4, self.width, self.height))\n if self.state:\n moves, players = np.array(list(zip(*self.state.items())))\n move_curr = moves[players == self.current_player]\n move_oppo = moves[players != self.current_player]\n square_state[0][move_curr // self.width,\n move_curr % self.height] = 1.0\n square_state[1][move_oppo // self.width,\n move_oppo % self.height] = 1.0\n # indicate the last move location\n square_state[2][self.last_move // self.width,\n self.last_move % self.height] = 1.0\n if len(self.state) % 2 == 0:\n square_state[3][:, :] = 1.0 # indicate the colour to play\n return square_state", "def game_of_life():\n # 3x3 neighbourhood\n offsets = [[(y, x) for y in range(-1, 2)] for x in range(-1, 2)]\n\n # Create mappings\n mappings = {}\n for i in range(2 ** 9):\n\n # Determine the initial state (key)\n key = f\"{bin(i)[2:]:0>9}\" # As binary string\n key = tuple(k == \"1\" for k in key) # As tuple of bools\n key = tuple(key[i * 3:i * 3 + 3] for i in range(3)) # Reshape into 2D grid\n\n # Alive counts\n centre = key[1][1]\n others = sum(sum(row) for row in key) - centre\n\n # Skip if state does not evaluate to True\n if centre:\n if others not in (2, 3):\n continue\n\n else:\n if others != 3:\n continue\n\n mappings[key] = True\n\n return Mapping2DRuleset(mappings, offsets)", "def get_state(self,board):\n s = range(board.size())\n return [ board.getCell(x,y) for y in s for x in s]", "def gameOfLife(self, board: List[List[int]]) -> None:\r\n self.board = board\r\n self.l = len(board)\r\n self.w = len(board[0])\r\n status = [[0] * self.w for _ in range(self.l)]\r\n for i in range(self.l):\r\n for j in range(self.w):\r\n status[i][j] = self.statusUpdate(board[i][j], self.countLivingNeighbor([i, j]))\r\n #print(\"prev: \", i, j ,board[i][j], \" count: \", self.countLivingNeighbor([i, j]), \" after:\", status[i][j])\r\n for i in range(self.l):\r\n for j in range(self.w):\r\n board[i][j] = status[i][j]", "def solve(self, board: List[List[str]]) -> None:\n\n def expansion(i,j):\n for di,dj in {(-1,0),(1,0),(0,-1),(0,1)}:\n if -1<i+di<m and -1<j+dj<n and state[i+di][j+dj]=='O':\n return True\n return False\n\n if not board:\n return board\n\n m = len(board)\n n = len(board[0]) \n state = [['X']*n for _ in range(m)]\n\n for j in range(n):\n state[0][j] = board[0][j]\n state[m-1][j] = board[m-1][j]\n \n for i in range(m):\n state[i][0] = board[i][0]\n state[i][n-1] = board[i][n-1]\n \n flag = 1\n\n while flag:\n flag = 0\n\n for k in range(1, (1+min(m,n))//2):\n for j in range(k,n-k):\n if board[k][j]=='O' and state[k][j] == 'X' and expansion(k,j):\n state[k][j] = 'O'\n flag = 1\n \n if board[m-1-k][j]=='O' and state[m-1-k][j] == 'X' and expansion(m-1-k,j):\n state[m-1-k][j] = 'O'\n flag = 1\n \n for i in range(k,m-k):\n if board[i][k]=='O' and state[i][k] == 'X' and expansion(i,k):\n state[i][k] = 'O'\n flag = 1\n \n if board[i][n-1-k]=='O' and state[i][n-1-k] == 'X' and expansion(i,n-1-k):\n state[i][n-1-k] = 'O'\n flag = 1\n\n board[:] = state[:]", "def gameOfLife_1(self, board):\n self.template = [[[i, j] for i in (-1, 0, 1)] for j in (-1, 0, 1)]\n self.template[1][1] = [9999, 9999]\n tmp_board = [[j for j in row] for row in board]\n\n for i in range(len(tmp_board)):\n for j in range(len(tmp_board[i])):\n count = self.countLiveNeighborsCell(tmp_board, i, j)\n if tmp_board[i][j] == 0 and count == 3:\n board[i][j] = 1\n elif tmp_board[i][j] == 1 and count < 2:\n board[i][j] = 0\n # elif tmp_board[i][j] == 1 and count in [2, 3]:\n # board[i][j] = 1\n elif tmp_board[i][j] == 1 and count > 3:\n board[i][j] = 0", "def knight_tours(board: List[List[int]], curr: Tuple[int, int], count: int) -> -1:\n if count == len(board) ** 2:\n return\n\n deltas = [\n (2, 1),\n (1, 2),\n (-2, 1),\n (-1, 2),\n (2, -1),\n (1, -2),\n (-2, -1),\n (-1, -2),\n ]\n\n for delta in deltas:\n next_x, next_y = curr[0] + delta[0], curr[1] + delta[1]\n if not is_valid_coordinate((next_x, next_y), len(board)):\n continue\n\n board[next_x][next_y] = count\n knight_tours(board, (next_x, next_y), count + 1)\n board[next_x][next_y] = -1", "def make_move(grid, n_columns, n_rows):\r\n # Generate the game grid to be manipulated\r\n new_grid = [[0] * (n_columns + 1) for i in range(n_rows + 1)]\r\n\r\n\r\n for i in range(n_rows):\r\n for j in range(n_columns):\r\n upper_left = grid[i-1][j-1] # neighbor to upper left of cell of interest\r\n upper = grid[i-1][j] # neighbor above cell of interest\r\n upper_right = grid[i-1][j+1] # neighbor to upper right of cell of interest\r\n left = grid[i][j-1] # neighbor to left of cell of interest\r\n right = grid[i][j+1] # neighbor to right of cell of interest\r\n bot_left = grid[i+1][j-1] # neighbor to bottom left cell of interest\r\n bot = grid[i+1][j] # neighbor below cell of interest\r\n bot_right = grid[i+1][j+1] # neighbor to bottom right of cell of interest\r\n\r\n # sum of the state of all neighbors\r\n on_neighbors = upper_left + upper + upper_right + left + right + bot_left + bot + bot_right\r\n\r\n # Any ON cell with fewer than two ON neighbors turns OFF\r\n if grid[i][j] == 1 and on_neighbors < 2:\r\n new_grid[i][j] = 0\r\n\r\n # Any ON cell with two or three ON neighbours stays ON\r\n elif grid[i][j] == 1 and (on_neighbors == 2 or on_neighbors == 3):\r\n new_grid[i][j] = 1\r\n\r\n # Any ON cell with more than three ON neighbors turns OFF\r\n elif grid[i][j] == 1 and on_neighbors > 3:\r\n new_grid[i][j] = 0\r\n\r\n # Any OFF cell with three ON neighbors turns ON\r\n elif grid[i][j] == 0 and on_neighbors == 3:\r\n new_grid[i][j] = 1\r\n\r\n return new_grid #manipulated game grid\r", "def _get_current_game_state(board):\n return np.concatenate((_get_pieces_one_hot(board, color=False),\n _get_pieces_one_hot(board, color=True)),\n axis=-1)", "def current_state(self):\n\n square_state = np.zeros((4, self.width, self.height))\n if self.states:\n moves, players = np.array(list(zip(*self.states.items())))\n move_curr = moves[players == self.current_player]\n move_oppo = moves[players != self.current_player]\n square_state[0][move_curr // self.width,\n move_curr % self.height] = 1.0\n square_state[1][move_oppo // self.width,\n move_oppo % self.height] = 1.0\n # indicate the last move location\n square_state[2][self.last_move // self.width,\n self.last_move % self.height] = 1.0\n if len(self.states) % 2 == 0:\n square_state[3][:, :] = 1.0 # indicate the colour to play\n return square_state[:, ::-1, :]", "def current_state(self):\n\n square_state = np.zeros((4, self.width, self.height))\n if self.states:\n moves, players = np.array(list(zip(*self.states.items())))\n move_curr = moves[players == self.current_player]\n move_oppo = moves[players != self.current_player]\n square_state[0][move_curr // self.width,\n move_curr % self.height] = 1.0\n square_state[1][move_oppo // self.width,\n move_oppo % self.height] = 1.0\n # indicate the last move location\n square_state[2][self.last_move // self.width,\n self.last_move % self.height] = 1.0\n if len(self.states) % 2 == 0:\n square_state[3][:, :] = 1.0 # indicate the colour to play\n return square_state[:, ::-1, :]", "def gameOfLife(self, board: List[List[int]]) -> None:\r\n # 通过本例我们将学习如何搜素一共二维数组\r\n if not board or not board[0]:\r\n return\r\n row = len(board)\r\n col = len(board[0])\r\n\r\n def countCeil(x:int,y:int) -> int:\r\n count = 0\r\n for x_offset in range(-1,2):\r\n for y_offset in range(-1,2):\r\n if x_offset == y_offset == 0:\r\n continue\r\n if 0<= x + x_offset < row and 0 <= y + y_offset < col:\r\n count += board[x + x_offset][y+ y_offset] & 0x0F\r\n if count == 3 or (board[x][y] and count == 2):\r\n count = 1\r\n else:\r\n count = 0\r\n board[x][y] |= (count <<4) # |=意思是按位或\r\n\r\n for x in range(row):\r\n for y in range(col):\r\n countCeil(x, y)\r\n for x in range(row):\r\n for y in range(col):\r\n board[x][y] = (board[x][y] & 0xF0) >> 4", "def make_move(self, move: Any) -> \"StonehengeState\":\n new_board = deepcopy(self.current_board)\n for index in range(len(self.current_board)):\n if self.current_board[index] == move:\n if self.p1_turn:\n new_board = new_board.replace(\n self.current_board[index], '1')\n else:\n new_board = new_board.replace(\n self.current_board[index], '2')\n new_ley_lines = deepcopy(self.current_ley_lines)\n for item in new_ley_lines:\n for key in item:\n for index in range(len(key[1])):\n if key[1][index] == move:\n if self.p1_turn:\n key[1][index] = '1'\n else:\n key[1][index] = '2'\n change_dict = {}\n for item in new_ley_lines:\n for key in item:\n p1_count = 0\n p2_count = 0\n for string in key[1]:\n if string == '1':\n p1_count += 1\n if string == '2':\n p2_count += 1\n\n\n if p1_count >= len(key[1])/2 and p1_count > p2_count:\n\n change_dict[key[0]] = '1'\n if p2_count >= len(key[1])/2 and p2_count > p1_count:\n\n change_dict[key[0]] = '2'\n for key in change_dict:\n if not (key == '1' or key == '2'):\n if str(key) in new_board:\n new_board = new_board.replace(str(key), change_dict[key])\n for item in new_ley_lines:\n for key1 in item:\n if key == key1[0]:\n key1[0] = change_dict[key]\n\n new_state = StonehengeState(not self.p1_turn, self.side_length,\n new_board, new_ley_lines)\n return new_state", "def __iterate(self):\n\t\tnext_board = []\n\n\t\tfor y, row in enumerate(self.__board):\n\t\t\tnext_board.append([])\n\n\t\t\tfor x, cell in enumerate(row):\n\t\t\t\tneighbors = [\n\t\t\t\t\tself.__get_cell_state(y - 1, x - 1),\n\t\t\t\t\tself.__get_cell_state(y - 1, x),\n\t\t\t\t\tself.__get_cell_state(y - 1, x + 1),\n\t\t\t\t\tself.__get_cell_state(y, x - 1),\n\t\t\t\t\tself.__get_cell_state(y, x + 1),\n\t\t\t\t\tself.__get_cell_state(y + 1, x - 1),\n\t\t\t\t\tself.__get_cell_state(y + 1, x),\n\t\t\t\t\tself.__get_cell_state(y + 1, x + 1)\n\t\t\t\t]\n\t\t\t\tnum_neighbors = sum(neighbors)\n\t\t\t\tstate = get_new_state(cell, num_neighbors)\n\t\t\t\tnext_board[y].append(state)\n\n\t\tself.__board = next_board\n\t\tself.__display(self.__board)", "def __init__(self, n: int):\n        self.rows = [[n, -1] for _ in range(n)]\n        self.cols = [[n, -1] for _ in range(n)]\n        self.diag = [[n, -1], [n, -1]] # 0 for normal, 1 for anti\n        \n    def move(self, row: int, col: int, player: int) -> int:\n        r1, r2 = self.check(self.rows, row, player), self.check(self.cols, col, player)\n        r3, r4 = 0, 0\n        if(row == col):\n            r3 = self.check(self.diag, 0, player)\n        if(row + col == len(self.rows)-1):\n            r4 = self.check(self.diag, 1, player)\n        \n        return max(r1,r2,r3,r4)\n    def check(self, arr, i, player):\n        arr[i][0] -= 1\n        \n        if(arr[i][1] == -1):\n            arr[i][1] = player\n        elif(arr[i][1] != player):\n            arr[i][1] = 0\n        \n        if(arr[i][0] == 0 and arr[i][1] != 0):\n            return player\n        return 0\n        \n        \"\"\"\n       Player {player} makes a move at ({row}, {col}).\n       @param row The row of the board.\n       @param col The column of the board.\n       @param player The player, can be either 1 or 2.\n       @return The current winning condition, can be either:\n               0: No one wins.\n               1: Player 1 wins.\n               2: Player 2 wins.\n       \"\"\"\n        ", "def successorStates(self, state):\n currentState = state[1]\n successors = []\n for action in Directions.CARDINAL:\n x, y = state[0] # currentPosition\n print(\"State: {}\".format(state[0]))\n dx, dy = Actions.directionToVector(action)\n nextx, nexty = int(x + dx), int(y + dy)\n hitsWall = self.walls[nextx][nexty]\n\n # Implement a successor discovery, check if any corners are satisfied\n # and update values as they are satisfied\n if (not hitsWall):\n successorsState = []\n nextxy = (nextx, nexty)\n if nextxy == self.corners[0]:\n successorsState.append(True)\n else:\n successorsState.append(currentState[0])\n if nextxy == self.corners[1]:\n successorsState.append(True)\n else:\n successorsState.append(currentState[1])\n if nextxy == self.corners[2]:\n successorsState.append(True)\n else:\n successorsState.append(currentState[2])\n if nextxy == self.corners[3]:\n successorsState.append(True)\n else:\n successorsState.append(currentState[3])\n # Put all updated values of 4 corners to a variable\n successorPost = (successorsState[0], successorsState[1],\n successorsState[2], successorsState[3])\n # Append to go to the next move\n successors.append(((nextxy, successorPost), action, 1))\n\n self._numExpanded += 1 # Count the number of nodes expanded\n return successors", "def next_state(self, debug=False):\n\n if self.current_state == 'NoObstacle':\n # First check if any obstacle is in sight\n if self.transitions.next_room_reached():\n self.current_state = 'RoomReached'\n elif self.transitions.obstacle_in_sight():\n self.current_state = 'Obstacle'\n\n elif self.current_state == 'Obstacle':\n # First check if obstacle is still in sight\n if self.transitions.no_obstacle_in_sight() and not self.transitions.obstacle_in_sight():\n self.current_state = 'NoObstacle'\n elif self.transitions.next_room_reached():\n self.current_state = 'RoomReached'\n\n elif self.current_state == 'RoomReached':\n self.current_state = 'InspectCorners'\n\n elif self.current_state == 'InspectCorners':\n if self.transitions.all_corners_inspected():\n if not self.transitions.all_rooms_visited():\n self.current_state = 'RotateToExit'\n else:\n self.current_state = 'Finished'\n\n elif self.current_state == 'RotateToExit':\n if self.transitions.aiming_to_carrot():\n self.current_state = 'NoObstacle'\n\n\n elif self.current_state == 'Finished':\n pass\n\n # DEBUG\n if debug:\n print 'Next state: %s' % self.current_state\n\n if self.current_state is not self.old_state:\n print self.current_state\n\n self.old_state = self.current_state\n\n return self.current_state", "def vidrach_actual(board):\n board_size = len(board)\n\n # coordinates queue - list of (red_pos, blue_pos) tuples\n posqueue = queue.SimpleQueue()\n posqueue.put(((0, 0), (board_size - 1, board_size - 1)))\n\n moves = {((0, 0), (board_size - 1, board_size - 1)): 0}\n\n while not posqueue.empty():\n curr_pos = posqueue.get()\n curr_move = moves[curr_pos]\n\n red_pos = curr_pos[0]\n blue_pos = curr_pos[1]\n\n # if at the swapped position, break/return as this is the fastest\n if red_pos == (board_size - 1, board_size - 1) and blue_pos == (0, 0):\n return curr_move\n\n # check all red moves\n if red_pos != (board_size - 1, board_size - 1):\n move_dist = board[blue_pos[0]][blue_pos[1]]\n\n # up\n new_pos = (red_pos[0], red_pos[1] - move_dist)\n if ( # if in-bounds, not occupied, and has not been visited before\n new_pos[1] >= 0\n and new_pos != blue_pos\n and (new_pos, blue_pos) not in moves\n ):\n posqueue.put((new_pos, blue_pos))\n moves[(new_pos, blue_pos)] = curr_move + 1\n\n # down\n new_pos = (red_pos[0], red_pos[1] + move_dist)\n if ( # if in-bounds, not occupied, and has not been visited before\n new_pos[1] < board_size\n and new_pos != blue_pos\n and (new_pos, blue_pos) not in moves\n ):\n posqueue.put((new_pos, blue_pos))\n moves[(new_pos, blue_pos)] = curr_move + 1\n\n # left\n new_pos = (red_pos[0] - move_dist, red_pos[1])\n if ( # if in-bounds, not occupied, and has not been visited before\n new_pos[0] >= 0\n and new_pos != blue_pos\n and (new_pos, blue_pos) not in moves\n ):\n posqueue.put((new_pos, blue_pos))\n moves[(new_pos, blue_pos)] = curr_move + 1\n\n # right\n new_pos = (red_pos[0] + move_dist, red_pos[1])\n if ( # if in-bounds, not occupied, and has not been visited before\n new_pos[0] < board_size\n and new_pos != blue_pos\n and (new_pos, blue_pos) not in moves\n ):\n posqueue.put((new_pos, blue_pos))\n moves[(new_pos, blue_pos)] = curr_move + 1\n\n # check all blue moves\n if blue_pos != (0, 0):\n move_dist = board[red_pos[0]][red_pos[1]]\n\n # up\n new_pos = (blue_pos[0], blue_pos[1] - move_dist)\n if ( # if in-bounds, not occupied, and has not been visited before\n new_pos[1] >= 0\n and new_pos != red_pos\n and (red_pos, new_pos) not in moves\n ):\n posqueue.put((red_pos, new_pos))\n moves[(red_pos, new_pos)] = curr_move + 1\n\n # down\n new_pos = (blue_pos[0], blue_pos[1] + move_dist)\n if ( # if in-bounds, not occupied, and has not been visited before\n new_pos[1] < board_size\n and new_pos != red_pos\n and (red_pos, new_pos) not in moves\n ):\n posqueue.put((red_pos, new_pos))\n moves[(red_pos, new_pos)] = curr_move + 1\n\n # left\n new_pos = (blue_pos[0] - move_dist, blue_pos[1])\n if ( # if in-bounds, not occupied, and has not been visited before\n new_pos[0] >= 0\n and new_pos != red_pos\n and (red_pos, new_pos) not in moves\n ):\n posqueue.put((red_pos, new_pos))\n moves[(red_pos, new_pos)] = curr_move + 1\n\n # right\n new_pos = (blue_pos[0] + move_dist, blue_pos[1])\n if ( # if in-bounds, not occupied, and has not been visited before\n new_pos[0] < board_size\n and new_pos != red_pos\n and (red_pos, new_pos) not in moves\n ):\n posqueue.put((red_pos, new_pos))\n moves[(red_pos, new_pos)] = curr_move + 1\n\n # if queue has been exhausted and the end has not been reached\n return -1", "def next_round(self, old_round):\n new_round = np.copy(old_round) #copy of the old grid\n # for each square\n for i in range(Lx):\n for j in range(Ly):\n if old_round[i][j] == 0 : #if the cell is dead, it will live if it has 3 living neighbors.\n if self.sum_living_cell(i, j, old_round) == 3:\n new_round[i][j] = 1\n else:\n new_round[i][j] = 0\n if old_round[i][j] == 1 : #if the cell is alive, it won't dead if it has 2 or 3 living neighors.\n square_score = self.sum_living_cell(i, j, old_round)\n if square_score != 2 and square_score != 3 :\n new_round[i][j] = 0\n else:\n new_round[i][j] = 1\n return new_round", "def check_complete_board(start_pos, dim_square, board):\n change = False\n for row in range(8):\n for col in range(8):\n # Grab image on real board\n im = region_grabber((start_pos[0] + col * dim_square[0],\n start_pos[1] - (row + 1.0) * dim_square[1],\n start_pos[0] + (col + 1.0) * dim_square[0],\n start_pos[1] - row * dim_square[1]))\n\n # Check if piece corresponds with piece on board if there is a piece\n if piece_on_pos((row, col), board):\n obj = board[row][col]\n if (row + col) % 2 == 0: # Black background\n pos = imagesearcharea(obj.im_b, 0, 0, 0, 0, 0.9, im)\n if pos != [-1, -1]:\n continue\n else: # White background\n pos = imagesearcharea(obj.im_w, 0, 0, 0, 0, 0.9, im)\n if pos != [-1, -1]:\n continue\n\n # Else --> Go through every possible image\n if (row + col) % 2 == 0: # Black background\n # Pawn\n pos = imagesearcharea(\"Images/PWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Pawn(\"PW\")\n continue\n pos = imagesearcharea(\"Images/PBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Pawn(\"PB\")\n continue\n # Tower\n pos = imagesearcharea(\"Images/TWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Tower(\"TW\")\n continue\n pos = imagesearcharea(\"Images/TBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Tower(\"TB\")\n continue\n # Horse\n pos = imagesearcharea(\"Images/HWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Horse(\"HW\")\n continue\n pos = imagesearcharea(\"Images/HBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Horse(\"HB\")\n continue\n # Bishop\n pos = imagesearcharea(\"Images/BWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Bishop(\"BW\")\n continue\n pos = imagesearcharea(\"Images/BBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Bishop(\"BB\")\n continue\n # King\n pos = imagesearcharea(\"Images/KWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = King(\"KW\")\n continue\n pos = imagesearcharea(\"Images/KBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = King(\"KB\")\n continue\n # Queen\n pos = imagesearcharea(\"Images/QWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Queen(\"QW\")\n continue\n pos = imagesearcharea(\"Images/QBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Queen(\"QB\")\n continue\n board[row][col] = None\n else: # White background\n # Pawn\n pos = imagesearcharea(\"Images/PWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Pawn(\"PW\")\n continue\n pos = imagesearcharea(\"Images/PBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Pawn(\"PB\")\n continue\n # Tower\n pos = imagesearcharea(\"Images/TWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Tower(\"TW\")\n continue\n pos = imagesearcharea(\"Images/TBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Tower(\"TB\")\n continue\n # Horse\n pos = imagesearcharea(\"Images/HWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Horse(\"HW\")\n continue\n pos = imagesearcharea(\"Images/HBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Horse(\"HB\")\n continue\n # Bishop\n pos = imagesearcharea(\"Images/BWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Bishop(\"BW\")\n continue\n pos = imagesearcharea(\"Images/BBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Bishop(\"BB\")\n continue\n # King\n pos = imagesearcharea(\"Images/KWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = King(\"KW\")\n continue\n pos = imagesearcharea(\"Images/KBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = King(\"KB\")\n continue\n # Queen\n pos = imagesearcharea(\"Images/QWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Queen(\"QW\")\n continue\n pos = imagesearcharea(\"Images/QBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Queen(\"QB\")\n continue\n board[row][col] = None\n\n if change:\n pyautogui.moveTo(start_pos[0] + 4 * dim_square[0],\n start_pos[1] - 4 * dim_square[1], 0.2)\n\n return change", "def player(board):\n if board == initial_state():\n return X\n\n total_x = 0\n total_o = 0\n\n for i in board:\n total_x += i.count(X)\n total_o += i.count(O)\n\n if (total_x + total_o) % 2 == 1:\n return O\n else:\n return X", "def changeCell(self, i, j):\n\t\t#If Cell is on Top row\n\t\tif(i==0):\n\t\t\tif(j==0):\n\t\t\t\tn = self.board[0][1] + self.board[1][0] + self.board[1][1]\n\t\t\telif(j==(self.size-1)):\n\t\t\t\tn = self.board[0][self.size-2] + self.board[1][self.size-2] + self.board[1][self.size-1]\n\t\t\telse:\n\t\t\t\tn = self.board[0][j-1] + self.board[1][j] + self.board[0][j+1] + self.board[1][j-1] + self.board[1][j+1]\n\t\t\t\n\t\t\tif((n == 2 and self.board[i][j] == 1) or n == 3):\n\t\t\t\treturn 1\n\t\t\telse:\n\t\t\t\treturn 0\n\t\t#If Cell on Bottom row\n\t\telif(i==(self.size-1)):\n\t\t\tif(j==0):\n\t\t\t\tn = self.board[self.size-1][1] + self.board[self.size-2][0] + self.board[self.size-2][1]\n\t\t\telif(j==(self.size-1)):\n\t\t\t\tn = self.board[self.size-1][self.size-2] + self.board[self.size-2][self.size-2] + self.board[self.size-2][self.size-1]\n\t\t\telse:\n\t\t\t\tn = self.board[self.size-1][j-1] + self.board[self.size-2][j] + self.board[self.size-1][j+1] + self.board[self.size-2][j-1] + self.board[self.size-2][j+1]\n\t\t\tif((n == 2 and self.board[i][j] == 1) or n == 3):\n\t\t\t\treturn 1\n\t\t\telse:\n\t\t\t\treturn 0\n\t\t#If Cell is in a middle row\n\t\telse:\n\t\t\tif(j==0):\n\t\t\t\tn = self.board[i-1][j] + self.board[i+1][j] + self.board[i][j+1] + self.board[i-1][j+1] + self.board[i+1][j+1]\n\t\t\telif(j==(self.size-1)):\n\t\t\t\tn = self.board[i-1][j] + self.board[i+1][j] + self.board[i][j-1] + self.board[i-1][j-1] + self.board[i+1][j-1]\n\t\t\telse:\n\t\t\t\tn = self.board[i-1][j] + self.board[i+1][j] + self.board[i][j-1] + self.board[i-1][j-1] + self.board[i+1][j-1] + self.board[i][j+1] + self.board[i-1][j+1] + self.board[i+1][j+1]\n\t\t\tif((n == 2 and self.board[i][j] == 1) or n == 3):\n\t\t\t\treturn 1\n\t\t\telse:\n\t\t\t\treturn 0", "def solve_board(bd):\n if is_solved(bd):\n print_board(bd)\n return\n elif len(next_valid_boards(bd)) == 0:\n return False\n else:\n for board in next_valid_boards(bd):\n solve_board(board)", "def __init__(self, num_rows = 4, num_cols = 4,\n first_mover = \"W\", top_left = \"B\",\n how_to_win = \">\", initial_config=[]):\n # initial_config was made for AI Othello to\n # get around pass-by-reference behavior of lists.\n if (4 > num_rows > 16) or num_rows % 2 != 0:\n raise Exception\n else:\n self._num_rows = num_rows\n if (4 > num_cols > 16) or num_cols % 2 != 0:\n raise Exception\n else:\n self._num_cols = num_cols\n if first_mover != \"B\" and first_mover != \"W\":\n raise Exception\n else:\n self._turn = first_mover\n if top_left != \"B\" and top_left != \"W\":\n raise Exception\n else:\n self._top_left = top_left\n if how_to_win != \">\" and how_to_win != \"<\":\n raise Exception\n else:\n self._how_to_win = how_to_win\n\n if initial_config == []:\n self._board = self._make_board(num_rows, num_cols, top_left)\n else:\n self._board = deepcopy(initial_config)\n \n self._game_over = False\n self._winner = \" \"\n self._tl_cell = (0, 0)\n self._tr_cell = (0, num_cols-1)\n self._bl_cell = (num_rows-1, 0)\n self._br_cell = (num_rows-1, num_cols-1)\n self._ls_cells = [(c, 0) for c in range(1, num_rows-1)]\n self._rs_cells = [(c, num_cols-1) for c in range(1, num_rows-1)]\n self._ts_cells = [(0, c) for c in range(1, num_cols-1)]\n self._bs_cells = [(num_rows-1, c) for c in range(1, num_cols-1)]\n #^Note how ranges start from 1 and go to num_rows-1 to avoid corners,\n #which are processed differently", "def ai_3(board: BoardState) -> BoardState:\n cur_piece = board.cpiece\n if cur_piece is not None:\n moved = False\n for (x,y) in board.open_spots:\n move = find_win_spot(cur_piece, board)\n if move:\n board[move] = board.cpiece_id\n moved = True\n break\n if not moved:\n board[choice(list(board.open_spots))] = board.cpiece_id\n board.cpiece_id = choose_none_winable_piece(board)\n else:\n board.cpiece_id = choose_none_winable_piece(board)\n\n if (board.cpiece_id is None) and not board.is_full:\n board.cpiece_id, _ = choice(list(board.unused_game_pieces))\n return board", "def get_new_game_state(self, game_state, line, vector, current_color): \n\n #Determine if the move is parallel to the line\n parallel = False\n \n if len(line) > 1:\n if (line[0][0]+vector[0], line[0][1]+vector[1]) == line[1]:\n parallel = True\n if (line[-1][0]+vector[0], line[-1][1]+vector[1]) == line[-2]:\n parallel = True\n\n\n if parallel:\n\n #Find the rear marble in the motion\n start = line[0] if sum_tuples(line[0], vector) == line[1] else line[-1]\n end = line[-1] if start==line[0] else line[0]\n\n off_end = sum_tuples(end, vector)\n if coord_in_board(off_end) and game_state[off_end] == current_color: return None\n\n counting_others = False\n self_count = 0\n other_count = 0\n current = start\n chain = [2]\n #Put the marbles in chain until an empty space or the edge is reached\n while coord_in_board(current) and game_state[current]!=2:\n\n current_marble = game_state[current]\n if current_marble == current_color:\n if counting_others: \n return None\n else:\n self_count+=1\n else:\n other_count+=1\n counting_others=True\n \n if self_count>3 or other_count > 3 or other_count>=self_count: return None\n\n chain.append(current_marble)\n current = (current[0] + vector[0], current[1]+vector[1])\n\n #Check if ball is being pushed off\n if not counting_others and not coord_in_board(current): \n return None\n \n #Lay down the chain onto the new game state\n new_game_state = game_state.copy()\n current = start\n for marble in chain:\n x,y = current\n if ((1<=x<=9) and (1<=y<=9) and (y-4 <= x <= y+4)):\n new_game_state[current] = marble\n current = current[0]+vector[0], current[1]+vector[1]\n\n return new_game_state\n\n else: #Perpendicular moves\n\n for coord in line:\n move_coord = coord[0]+vector[0], coord[1]+vector[1]\n \n x,y = move_coord\n in_board = ((1<=x<=9) and (1<=y<=9) and (y-4 <= x <= y+4))\n if in_board and game_state[move_coord] != 2:\n return None\n elif not in_board:\n return None\n\n new_game_state = game_state.copy()\n for coord in line:\n new_game_state[coord] = 2\n move_coord = coord[0]+vector[0], coord[1]+vector[1]\n x,y = coord\n if (1<=x<=9) and (1<=y<=9) and (y-4 <= x <= y+4):\n new_game_state[move_coord] = current_color\n\n return new_game_state", "def get_board_state(self):\n\n board_state = ''\n for i in range(0, 3):\n board_state += ''.join([self.board['{}{}'.format(i, j)] for j in range(0, 3)])\n return board_state", "def assignState(self):\n\t\tblack = ['r', 'n', 'b','q','k','b','n','r']\n\t\twhite = ['R','N','B','Q','K','B','N','R']\n\n\t\tfor i in range(8):\n\t\t\tself.squares[8*i + 0].state = black[i]\n\t\t\tself.squares[8*i + 1].state = 'p'\n\t\t\tself.squares[8*i + 2].state = '.'\n\t\t\tself.squares[8*i + 3].state = '.'\n\t\t\tself.squares[8*i + 4].state = '.'\n\t\t\tself.squares[8*i + 5].state = '.'\n\t\t\tself.squares[8*i + 6].state = 'P'\n\t\t\tself.squares[8*i + 7].state = white[i]\n\n\t\tfor square in self.squares:\n\t\t\tself.boardMatrix.append(square.state)", "def next_move(self, board):\n \n return self.best_move(self.score_columns(board))", "def result(board, action):\n newstate = [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]\n rows = 3\n columns = 3\n for i in range(rows):\n for j in range(columns):\n newstate[i][j] = board[i][j]\n# print(newstate)\n# print(action)\n ival = action[0]\n jval = action[1]\n if ival > 2:\n raise Exception(\"invalid i action\")\n if jval > 2:\n raise Exception(\"invalid j action\")\n if board[ival][jval] != EMPTY:\n raise Exception(\"invalid action\")\n else:\n if player(board) == X:\n newstate[ival][jval] = X\n else:\n newstate[ival][jval] = O\n return newstate\n\n #raise NotImplementedError", "def solve_2x2(self):\r\n # replace with your code\r\n assert self.row1_invariant(1), '2x2 Dont pass row1_invariant(1)'\r\n whole_move = ''\r\n current_position = self.current_position(0, 0)\r\n # print 'Zero position =', current_position\r\n counter = 0\r\n \r\n\r\n \r\n # if current_position == (0,0):\r\n # print (0,0)\r\n # move_to_00 = 'rdlu' \r\n if current_position == (0,1):\r\n # print (0,1)\r\n move_to_00 = 'l'\r\n if current_position == (1,0):\r\n # print (1,0)\r\n move_to_00 = 'u'\r\n if current_position == (1,1):\r\n # print (1,1)\r\n move_to_00 = 'ul'\r\n whole_move += move_to_00\r\n self.update_puzzle(move_to_00)\r\n # print self\r\n # print self.get_number(1,1) < self.get_number(1,0)\r\n \r\n while self.get_number(0,0) != 0 or self.get_number(0,1) != 1:\r\n \r\n # print 'Aloha in loop!'\r\n counter +=1\r\n move = 'rdlu'\r\n whole_move += move\r\n self.update_puzzle(move)\r\n # print self\r\n if counter >5:\r\n break\r\n return whole_move" ]
[ "0.8105419", "0.74080914", "0.7388347", "0.73284864", "0.7323322", "0.72788733", "0.7252915", "0.7215804", "0.72030765", "0.72030765", "0.7184667", "0.70937914", "0.70153326", "0.7014133", "0.7008801", "0.7003514", "0.7002293", "0.69419354", "0.693517", "0.69348997", "0.6908507", "0.69000673", "0.6897155", "0.68825406", "0.68706083", "0.68639475", "0.680844", "0.6763261", "0.6758143", "0.6748912", "0.6732388", "0.6730651", "0.67302865", "0.6730218", "0.6716199", "0.67072415", "0.6697106", "0.6686803", "0.6686069", "0.66838014", "0.6683459", "0.6668849", "0.666485", "0.66589147", "0.6657164", "0.66500616", "0.66171855", "0.6596276", "0.6589492", "0.6558643", "0.6552841", "0.65444696", "0.65268177", "0.6514629", "0.6498557", "0.64975005", "0.6497002", "0.6493691", "0.6467423", "0.6466476", "0.64604795", "0.6458959", "0.6444556", "0.64379406", "0.64342237", "0.6428214", "0.64180845", "0.6413758", "0.6405216", "0.63795304", "0.6362181", "0.6361813", "0.6353251", "0.63522816", "0.6344578", "0.6343548", "0.6340937", "0.63388085", "0.6338002", "0.63376856", "0.63376856", "0.6333187", "0.63287544", "0.6322808", "0.6281488", "0.6279367", "0.6252217", "0.6240589", "0.62278295", "0.62258077", "0.6224688", "0.62163687", "0.62117046", "0.6211699", "0.6193729", "0.6190101", "0.616511", "0.6162786", "0.6157419", "0.6154687", "0.6154634" ]
0.0
-1
Solve tile in column zero on specified row (> 1) Updates puzzle and returns a move string
def solve_col0_tile(self, target_row): move_str = 'ur' self.update_puzzle(move_str) cur_row, cur_col = self.current_position(target_row, 0) if cur_row == target_row and cur_col == 0: move_str += 'r' * (self._width - 2) else: move_str += self.position_tile(target_row-1, 1, cur_row, cur_col) move_str += 'ruldrdlurdluurddlur' move_str += 'r' * (self._width - 2) self.update_puzzle(move_str[2:]) return move_str
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve_puzzle(self):\n\n move_str = \"\"\n \n # Move zero tile to bottom right corner tile of puzzle.\n zero_pos = self.current_position(0,0) \n vert_dist = (self.get_height() - 1) - zero_pos[0]\n horiz_dist = (self.get_width() - 1) - zero_pos[1]\n move_str += ((\"d\" * vert_dist) + (\"r\" * horiz_dist))\n self.update_puzzle(move_str)\n \n # Solve lower rows\n if self.get_height() > 2:\n for row in range(self.get_height() - 1, 1, -1):\n for col in range(self.get_width() - 1, -1, -1):\n if col != 0:\n move_str += self.solve_interior_tile(row, col)\n else:\n move_str += self.solve_col0_tile(row)\n \n # Solve top 2 rows\n if self.get_width() > 2:\n for col in range(self.get_width() - 1, 1, -1):\n move_str += self.solve_row1_tile(col)\n move_str += self.solve_row0_tile(col)\n \n # Solve 2x2\n move_str += self.solve_2x2()\n\n return move_str", "def solve_puzzle(self):\n cur0_row, cur0_col = self.current_position(0, 0)\n move_str = 'd' * (self._height - cur0_row - 1) + 'r' * (self._width - cur0_col - 1)\n self.update_puzzle(move_str)\n for row in range(self._height-1, 1, -1):\n for col in range(self._width-1, -1, -1):\n assert self.lower_row_invariant(row, col)\n if col != 0:\n move_str += self.solve_interior_tile(row, col)\n else:\n move_str += self.solve_col0_tile(row)\n for col in range(self._width-1, 1, -1):\n assert self.row1_invariant(col)\n move_str += self.solve_row1_tile(col)\n assert self.row0_invariant(col)\n move_str += self.solve_row0_tile(col)\n move_str += self.solve_2x2()\n return move_str", "def solve_col0_tile(self, target_row):\r\n # check if curr_pos (i, 0) where i > 1\r\n assert self.lower_row_invariant(target_row, 0)\r\n move = \"ur\"\r\n self.update_puzzle(move)\r\n row, col = self.current_position(target_row, 0)\r\n if row == target_row and col == 0:\r\n move_to_target = (self.get_width() - 2) * \"r\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n else:\r\n move_to_target = self.move_to_target(target_row - 1, 1, row, col)\r\n # add solver move to str\r\n move_to_target += \"ruldrdlurdluurddlu\" + (self.get_width() - 1) * \"r\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n assert self.lower_row_invariant(target_row - 1, self.get_width() - 1)\r\n return move", "def solve_col0_tile(self, target_row):\r\n moves_str = \"\"\r\n # move the zero tile from (i,0) to (i−1,1) \r\n # using the move string \"ur\"\r\n moves_str += \"ur\"\r\n temp_grid = Puzzle(self._height, self._width, self._grid)\r\n temp_grid.update_puzzle(moves_str)\r\n # If the target tile is now at position (i,0)\r\n # you can simply move tile zero to the end of row i−1\r\n current_row, current_col = temp_grid.current_position(target_row, 0)\r\n zero_row, zero_col = temp_grid.current_position(0, 0)\r\n if current_row == target_row and current_col == 0:\r\n rights = self._width - 1 - zero_col\r\n for dummy_r in range(rights):\r\n moves_str += \"r\" \r\n # However, if the target tile is not positioned at (i,0)\r\n # we suggest the following solution strategy:\r\n else:\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"ruldrdlurdluurddlu\"\r\n for dummy_r in range(self._width - 1):\r\n moves_str += \"r\"\r\n \r\n print \"solve_col0_tile\"\r\n print moves_str\r\n self.update_puzzle(moves_str)\r\n print self._grid\r\n return moves_str", "def solve_puzzle(self):\r\n # initialize some values and start tile at bottom right corner\r\n col = self.get_width() - 1\r\n row = self.get_height() - 1\r\n move = \"\"\r\n curr_row, curr_col = self.current_position(0, 0)\r\n move_to_target = abs(curr_col - col) * \"r\" + abs(curr_row - row) * \"d\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n\r\n # apply solver methods\r\n for dummy_i in range(row, 1, -1):\r\n for dummy_j in range(col, 0, -1):\r\n move += self.solve_interior_tile(dummy_i, dummy_j)\r\n move += self.solve_col0_tile(dummy_i)\r\n \r\n for dummy_j in range(col, 1, -1):\r\n move += self.solve_row1_tile(dummy_j)\r\n move += self.solve_row0_tile(dummy_j)\r\n \r\n move += self.solve_2x2()\r\n return move", "def solve_col0_tile(self, target_row):\n solved_row, solved_col = self.current_position(target_row, 0)\n movements = \"\"\n if solved_row == target_row - 1 and solved_col == 0:\n movements = \"u\"\n else:\n local_board = self.clone()\n local_board.update_puzzle(\"ur\")\n movements = \"ur\" + local_board.move_tile(\n target_row - 1, 1, target_row *\n self.get_width()) + \"ruldrdlurdluurddlu\"\n movements += \"r\" * (self.get_width() - 1)\n self.update_puzzle(movements)\n return movements", "def solve_row1_tile(self, target_col):\r\n # replace with your code\r\n whole_move = ''\r\n if self._grid[1][target_col] != 0:\r\n # print \"DEBUG CASE WHEN ZERO IN JOPA solve_row1_tile \"\r\n \r\n # print self\r\n # print 'Solwing tile', self._grid[1][target_col]\r\n \r\n # print 'Searchind indexes of ZERO'\r\n for row in self._grid:\r\n for col in row:\r\n if col == 0:\r\n zero_row, zero_col = self._grid.index(row), row.index(col)\r\n break\r\n # print 'ZERO indexes=', (zero_row, zero_col)\r\n #####Moving zero to correct place\r\n #path_down = (1 - zero_row) * 'd'\r\n # path_left = (zero_col - target_col) * 'l'\r\n if target_col - zero_col > 0:\r\n #path_right = (target_col - zero_col) * 'r'\r\n path_of_zero = (1 - zero_row) * 'd' + (target_col - zero_col) * 'r'\r\n else:\r\n path_of_zero = (1 - zero_row) * 'd'\r\n #zero_col -= len(filter(lambda x: x=='l', path_of_zero))\r\n #zero_row -= len(filter(lambda x: x=='u', path_of_zero))\r\n zero_col += len(filter(lambda x: x=='r', path_of_zero))\r\n zero_row += len(filter(lambda x: x=='d', path_of_zero))\r\n self.update_puzzle(path_of_zero)\r\n # print 'Grid after moving ZERO to target spot'\r\n # print self\r\n whole_move += path_of_zero\r\n\r\n assert self.row1_invariant(target_col), 'Some trouble in row1_invariant' \r\n \r\n #current_row, current_col = self.current_position(1, target_col)\r\n zero_row, zero_col = 1, target_col\r\n ######Moving zero tile to the target tile\r\n #path_up = (zero_row - current_row) * 'u'\r\n #path_side = (zero_col - current_col) * 'l'\r\n path_for_zero = (zero_row - self.current_position(1, target_col)[0]) * 'u' + (zero_col - self.current_position(1, target_col)[1]) * 'l'\r\n whole_move += path_for_zero\r\n zero_col -= len(filter(lambda x: x=='l', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(1, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (1, target_col)\r\n counter = 0\r\n while self.current_position(1, target_col) != \\\r\n (1, target_col) or (zero_row, zero_col) != (0, target_col):\r\n # print 'Welcome to while loop!'\r\n cyclic_moves = ''\r\n #### Case 3 if ZERO located in the left side of the target tile\r\n ### like in the owel-test case\r\n #current_position = self.current_position(1, target_col)\r\n current_col = self.current_position(1, target_col)[1]\r\n counter +=1\r\n if self.current_position(1, target_col) == \\\r\n (1, target_col):\r\n # print 'ZERO not under TT'\r\n cyclic_moves = 'ur'\r\n whole_move += cyclic_moves\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif zero_col < current_col and self._grid[zero_row+1][zero_col] < \\\r\n self._grid[self.current_position(1, target_col)[0]][self.current_position(1, target_col)[1]]:\r\n # print 'ZERO tile located in the left side and down move is POSIBLE'\r\n if current_col != target_col:\r\n # print 'not under the target place'\r\n cyclic_moves = 'drrul'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n elif current_col == target_col:\r\n # print 'Target tile under target place'\r\n cyclic_moves = 'dru'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif current_col != target_col and self._grid[zero_row+1][zero_col] > \\\r\n self._grid[self.current_position(1, target_col)[0]][self.current_position(1, target_col)[1]]:\r\n # print 'not under the target place'\r\n cyclic_moves = 'urrdl'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves)) \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n # elif zero_col < current_col and self._grid[zero_row+1][zero_col] > \\\r\n # self._grid[current_position[0]][current_position[1]]:\r\n # # print 'ZERO tile located in the left side and down move IS NOT POSIBLE'\r\n # if current_col != target_col:\r\n # # print 'not under the target place'\r\n # cyclic_moves = 'urrdl'\r\n # whole_move += cyclic_moves\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n # zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n # elif current_col == target_col:\r\n # # print 'Target tile under target place'\r\n # cyclic_moves = 'urd'\r\n # whole_move += cyclic_moves\r\n # zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n # zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n #cyclic_moves +='ur'\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n if counter > 10:\r\n break\r\n return whole_move", "def solve_col0_tile(self, target_row):\n # replace with your code\n string = ''\n target = self.current_position(target_row, 0)\n row_difference = target_row - target[0]\n col_difference = target[1]\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'r' * (col_difference - 1)\n if row_difference > 1:\n string += 'druld' * (row_difference - 1)\n string += 'rulld' * (col_difference - 1)\n string += 'ruldrdlurdluurddlu'\n elif col_difference == 0:\n string += 'rddlu' * (row_difference - 2)\n if row_difference > 1:\n string += 'rd'\n string += 'l'\n string += 'ruldrdlurdluurddlu'\n string += 'r' * (self._width - 1)\n print 'Col 0 Path', string\n self.update_puzzle(string)\n assert self.lower_row_invariant(target_row - 1, self._width -1), 'False string'\n return string", "def solve_col0_tile(self, target_row):\r\n # print '----------------------------------'\r\n # print 'SOLVING ZERO COLOUMN'\r\n assert self.lower_row_invariant(target_row,0), \"Invarian is False\"\r\n whole_move = ''\r\n #current_position = self.current_position(target_row, 0)\r\n current_row, current_col = self.current_position(target_row, 0)\r\n zero_row, zero_col = target_row, 0\r\n # print 'Target tile position=',current_position\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (target_row, 0)\r\n # print '------------------------------------------'\r\n # print 'Recommended move ur'\r\n \r\n recomended_move = 'ur'\r\n whole_move += recomended_move\r\n zero_col += len(filter(lambda x: x=='r', recomended_move))\r\n zero_row -= len(filter(lambda x: x=='u', recomended_move))\r\n self.update_puzzle(recomended_move)\r\n # print 'Grid after move:', recomended_move\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n current_position = self.current_position(target_row, 0)\r\n current_row, current_col = current_position\r\n if self.current_position(target_row, 0) == \\\r\n (target_row, 0):\r\n # print 'TT stays in correct place after recomended move'\r\n zero_to_place_move = 'r' * (self._width-1 - zero_col)\r\n self.update_puzzle(zero_to_place_move)\r\n whole_move += zero_to_place_move\r\n # print self\r\n assert self.lower_row_invariant(target_row-1,self._width-1), \"Invarian is False\"\r\n return whole_move\r\n #move ZERO tile to the right\r\n else:\r\n # print '------------------------------'\r\n # print 'After base move we are do not finde puzzle'\r\n # print 'Lets move zero towards TT'\r\n ### reposition TT to (target_row -1, 1)\r\n ### reposition ZERO tile to (target_row-1,0)\r\n \r\n ######Moving zero tile to the target tile\r\n #path_up = (zero_row - current_row) * 'u'\r\n #path_side = (current_col - zero_col) * 'r'\r\n path_for_zero = (zero_row - current_row) * 'u' + (current_col - zero_col) * 'r'\r\n whole_move += path_for_zero\r\n zero_col += len(filter(lambda x: x=='r', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n counter = 0\r\n while self.current_position(target_row, 0) != (target_row -1, 1) or \\\r\n (zero_row,zero_col) != (target_row-1,0):\r\n cyclic_moves = ''\r\n current_position = self.current_position(target_row, 0)\r\n current_row, current_col = current_position\r\n # print 'We are in while loop'\r\n counter += 1\r\n if zero_row < current_row:\r\n # print 'Zero row under current TT '\r\n if self.current_position(target_row, 0) == (target_row -1, 1):\r\n # print 'TT is in the reccomended posiont (target_row -1, 1) \\n and we are movind zero to the left side of TT '\r\n cyclic_moves = 'ld'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n else:\r\n # print 'TT should be one tile down'\r\n cyclic_moves = 'lddru'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n #### Case 1 if ZERO located in the right of\r\n #### target tile (after it)\r\n if zero_col > current_col:\r\n # print ' Look in the up puzzle, zero in the right side'\r\n if current_col != 1:\r\n # print 'not under target place (target_row -1, 1)'\r\n cyclic_moves = 'dllur'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n whole_move += cyclic_moves\r\n # elif current_col == 1 and self._grid[zero_row+1][zero_col] < \\\r\n # self._grid[current_position[0]][current_position[1]]:\r\n elif current_col == 1: \r\n # print 'Tile tat is under ZERO is',self._grid[zero_row+1][zero_col] \r\n # print 'TT under target place'\r\n cyclic_moves = 'dlu'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif self._grid[zero_row+1][zero_col] > \\\r\n self._grid[current_position[0]][current_position[1]]:\r\n print 'Tile under zero is illegal to move and we use upper cycle move '\r\n \r\n cyclic_moves = 'ul'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n if counter > 10:\r\n break\r\n ### Solwing 3x2 puzzle\r\n # print '--------------------------'\r\n # print 'Lets solve 3x2 puzzle formed recently'\r\n move3x2 = 'ruldrdlurdluurddlur'\r\n whole_move += move3x2\r\n zero_col -= len(filter(lambda x: x=='l', move3x2))\r\n zero_col += len(filter(lambda x: x=='r', move3x2))\r\n zero_row += len(filter(lambda x: x=='d', move3x2))\r\n zero_row -= len(filter(lambda x: x=='u', move3x2))\r\n self.update_puzzle(move3x2)\r\n # print 'Grid afret 3x2 solver move'\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n #####Moving ZERO to the (target_row - 1, n - 1) position where\r\n ##### 'n' is a grid height.\r\n # print self._width-1 - zero_col\r\n zero_to_place_move = 'r' * (self._width-1 - zero_col)\r\n whole_move += zero_to_place_move\r\n self.update_puzzle(zero_to_place_move)\r\n # print self\r\n assert self.lower_row_invariant(target_row-1,self._width-1), \"Invarian is False\"\r\n return whole_move", "def solve_row0_tile(self, target_col):\r\n # replace with your code\r\n assert self.row0_invariant(target_col), 'Some trouble in row0_invariant' \r\n whole_move = ''\r\n #current_position = self.current_position(0, target_col)\r\n current_row, current_col = self.current_position(0, target_col)\r\n # print self.get_number(current_row, current_col)\r\n zero_row, zero_col = 0, target_col\r\n # print 'Target tile position=',self.current_position(0, target_col)\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (0, target_col)\r\n # print '------------------------------------------'\r\n # print 'Recommended move ld'\r\n \r\n recomended_move = 'ld'\r\n whole_move += recomended_move\r\n zero_col -= len(filter(lambda x: x=='l', recomended_move))\r\n zero_row += len(filter(lambda x: x=='d', recomended_move))\r\n self.update_puzzle(recomended_move)\r\n # print 'Grid after move:', recomended_move\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(0, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (0, target_col)\r\n #####Case when we check if recomended move solves the tile\r\n if self.current_position(0, target_col) == (0, target_col):\r\n # print 'Congrads recomended move made great deal !!'\r\n return whole_move\r\n #####If not, we position TT to (1, target_col-1),\r\n ##### and ZEOR to (1, target_col-2)\r\n else:\r\n # print '------------------------------'\r\n # print 'After base move we are do not finde puzzle'\r\n # print 'Lets move zero towards TT'\r\n \r\n ######Moving zero tile to the target tile\r\n path_up = (zero_row - current_row) * 'u'\r\n path_side = (zero_col - current_col) * 'l'\r\n path_for_zero = path_up + path_side\r\n whole_move += path_for_zero\r\n zero_col -= len(filter(lambda x: x=='l', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n \r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(0, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (0, target_col)\r\n counter = 0\r\n # print self.current_position(0, target_col) != (1, target_col-1)\r\n # print (zero_row,zero_col) != (1,target_col-2)\r\n ####POitioning TT and zero into positions that can be solvable\r\n while self.current_position(0, target_col) != (1, target_col-1) or \\\r\n (zero_row,zero_col) != (1,target_col-2):\r\n counter +=1\r\n #current_position = self.current_position(0, target_col)\r\n current_row, current_col = self.current_position(0, target_col)\r\n cyclic_moves = ''\r\n # print 'Aloha in the loop'\r\n if zero_col < current_col:\r\n # print 'ZERO tile located in the left side and down move IS NOT POSIBLE'\r\n \r\n\r\n if current_col != target_col-1 and zero_row == 0:\r\n # print 'In the upper row and we can use down cycling'\r\n cyclic_moves = 'drrul'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n\r\n elif current_col != target_col-1:\r\n # print 'not under the target place'\r\n cyclic_moves = 'urrdl'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n elif current_col == target_col-1:\r\n # print 'Target tile under target place'\r\n # print 'DEBUG!!!!'\r\n # print self\r\n # print zero_col, target_col\r\n if zero_col == 0 and current_col == 1:\r\n cyclic_moves = 'druld'\r\n elif zero_row == 0:\r\n cyclic_moves = 'druld'\r\n \r\n else:\r\n cyclic_moves = 'urd'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n elif zero_row > current_row:\r\n # print 'DEBUG'\r\n # print 'TT under zero tile'\r\n cyclic_moves = 'uld'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n \r\n if counter > 10:\r\n # print 'COUNTER break'\r\n break\r\n\r\n #####Solving using pattern 2 x 3 puzzle\r\n # print '--------------------------'\r\n # print 'Lets solve 2x3 puzzle formed recently'\r\n move2x3 = \"urdlurrdluldrruld\"\r\n whole_move += move2x3\r\n zero_col -= len(filter(lambda x: x=='l', move2x3))\r\n zero_col += len(filter(lambda x: x=='r', move2x3))\r\n zero_row += len(filter(lambda x: x=='d', move2x3))\r\n zero_row -= len(filter(lambda x: x=='u', move2x3))\r\n self.update_puzzle(move2x3)\r\n # print self\r\n assert self.row1_invariant(target_col-1), 'Some trouble in row1_invariant' \r\n return whole_move", "def solve_row0_tile(self, target_col):\n solved_row, solved_col = self.current_position(0, target_col)\n movements = \"\"\n if solved_col == target_col - 1 and solved_row == 0:\n movements = \"ld\"\n else:\n local_board = self.clone()\n local_board.update_puzzle(\"ld\")\n movements = \"ld\" + local_board.move_tile(\n 1, target_col - 1, target_col) + \"urdlurrdluldrruld\"\n self.update_puzzle(movements)\n return movements", "def solve_puzzle(self):\n # move zero tile to the lower right corner\n row, col = self.current_position(0, 0)\n movements = \"d\" * (self.get_height() - 1 - row) + \"r\" * (\n self.get_width() - 1 - col)\n self.update_puzzle(movements)\n # solve rowid from 2 by row\n for row in range(self.get_height() - 1, 1, -1):\n for col in range(self.get_width() - 1, -1, -1):\n assert self.lower_row_invariant(row, col)\n if col == 0:\n movements += self.solve_col0_tile(row)\n assert self.lower_row_invariant(row - 1,\n self.get_width() - 1)\n else:\n movements += self.solve_interior_tile(row, col)\n assert self.lower_row_invariant(row, col - 1)\n # solve the uppermost two rows by column\n for col in range(self.get_width() - 1, 1, -1):\n for row in range(1, -1, -1):\n if row == 0:\n assert self.row0_invariant(col)\n movements += self.solve_row0_tile(col)\n assert self.row1_invariant(col - 1)\n else:\n assert self.row1_invariant(col)\n movements += self.solve_row1_tile(col)\n assert self.row0_invariant(col)\n movements += self.solve_2x2()\n return movements", "def solve_row1_tile(self, target_col):\r\n moves_str = \"\"\r\n current_row, current_col = self.current_position(1, target_col)\r\n zero_row, zero_col = self.current_position(0, 0)\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"ur\"\r\n self.update_puzzle(moves_str)\r\n print \"solve_row1_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve_row0_tile(self, target_col):\n move_str = 'ld'\n self.update_puzzle(move_str)\n cur_row, cur_col = self.current_position(0, target_col)\n if cur_row == 0 and cur_col == target_col:\n return move_str\n else:\n move_str += self.position_tile(1, target_col-1, cur_row, cur_col)\n move_str += 'urdlurrdluldrruld'\n self.update_puzzle(move_str[2:])\n return move_str", "def solve_puzzle(self):\n # replace with your code\n string = ''\n width = self._width\n height = self._height\n zero = self.current_position(0, 0)\n row_to_zero = height - 1 - zero[0]\n col_to_zero = width - 1 - zero[1]\n string += 'r' * col_to_zero\n string += 'd' * row_to_zero\n self.update_puzzle(string)\n if width == 2 and height == 2:\n string += self.solve_2x2()\n elif width > 2 and height == 2:\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n elif width == 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n string += self.solve_2x2()\n elif width > 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n #for row in range(height - 1, -1, -1):\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n return string", "def solve_row0_tile(self, target_col):\r\n # move the zero tile from position (0,j) to (1,j−1) \r\n # using the move string \"ld\" \r\n moves_str = \"\"\r\n moves_str += \"ld\"\r\n # check whether target tile is at position (0,j).\r\n temp_grid = Puzzle(self._height, self._width, self._grid)\r\n temp_grid.update_puzzle(moves_str) \r\n current_row, current_col = temp_grid.current_position(0, target_col)\r\n zero_row, zero_col = temp_grid.current_position(0, 0)\r\n \r\n # If target tile is not at position (0,j).\r\n # reposition the target tile to position (1,j−1) \r\n # with tile zero in position (1,j−2).\r\n if current_row != 0 or current_col != target_col:\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"urdlurrdluldrruld\"\r\n \r\n self.update_puzzle(moves_str)\r\n print \"solve_row0_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve_col0_tile(self, target_row):\n \n assert target_row > 1, \"target_row cannot be in rows 0 or 1.\"\n assert self.lower_row_invariant(target_row, 0), \"tiles to right and below incorrectly ordered\"\n\n # Move zero tile from target position (target_row, 0) to (target_row - 1, 1).\n self.update_puzzle(\"ur\")\n\n move_str = \"\"\n \n # correct_tile's position is determined after moving zero tile \"ur\" \n # because its position relative to zero tile may have changed as a result.\n correct_tile = self.current_position(target_row, 0)\n \n # Moves to reposition correct_tile to target position.\n if self.get_number(correct_tile[0], correct_tile[1]) != self.get_number(target_row, 0):\n move_str += str(self.position_tile(target_row - 1, 1, correct_tile))\n move_str += str(\"ruldrdlurdluurddlur\")\n\n # Moves to reposition zero tile to end of column of target_row + 1.\n move_str += str(\"r\" * (self.get_width() - 2)) \n \n self.update_puzzle(move_str)\n\n assert self.lower_row_invariant(target_row - 1, self.get_width() - 1) \n \n move_str = \"ur\" + move_str\n return move_str", "def solve_row0_tile(self, target_col):\n # replace with your code\n string = ''\n assert self.row0_invariant(target_col), 'False precondition'\n target = self.current_position(0, target_col)\n row_difference = target[0]\n col_difference = target_col - target[1]\n if row_difference == 0:\n if col_difference == 1:\n string += 'ld'\n elif col_difference > 1:\n string += 'l' * col_difference\n string += 'drrul' * (col_difference - 2)\n string += 'druld'\n string += 'urdlurrdluldrruld'\n elif row_difference == 1:\n if col_difference == 1:\n string += 'lld'\n string += 'urdlurrdluldrruld'\n elif col_difference > 1:\n string += 'ld'\n string += 'l' * (col_difference - 1)\n string += 'urrdl' * (col_difference - 2)\n string += 'urdlurrdluldrruld'\n print 'Row 0 Path', string\n self.update_puzzle(string)\n assert self.row1_invariant(target_col - 1), 'False string'\n return string", "def solve_row1_tile(self, target_col):\n cur_row, cur_col = self.current_position(1, target_col)\n move_str = self.position_tile(1, target_col, cur_row, cur_col, need_ld=False)\n self.update_puzzle(move_str)\n return move_str", "def solve_row1_tile(self, target_col):\r\n row, col = self.current_position(1, target_col)\r\n move = self.move_to_target(1, target_col, row, col)\r\n # for next move\r\n move += \"ur\"\r\n \r\n self.update_puzzle(move)\r\n return move", "def solve_puzzle(self):\r\n moves_str = \"\"\r\n # move zero to the most botton right\r\n zero_row, zero_col = self.current_position(0, 0)\r\n downs = self._height - 1 - zero_row\r\n rights = self._width - 1 - zero_col\r\n for dummy_d in range(downs):\r\n moves_str += \"d\"\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n self.update_puzzle(moves_str)\r\n # Solve the bottom m−2 rows of the puzzle \r\n # in a row by row manner from bottom to top. \r\n # Each individual row will be solved in a right to left order.\r\n if self._height > 2 and self._width > 2:\r\n for row in range(self._height - 1, 1, -1):\r\n for col in range(self._width - 1, 0, -1):\r\n assert self.lower_row_invariant(row, col)\r\n moves_str += self.solve_interior_tile(row, col)\r\n assert self.lower_row_invariant(row, col - 1)\r\n assert self.lower_row_invariant(row, 0)\r\n moves_str += self.solve_col0_tile(row)\r\n assert self.lower_row_invariant(row - 1, self._width - 1)\r\n # Solve the rightmost n−2 columns of the top two rows\r\n # in a right to left order). \r\n # Each column consists of two unsolved positions \r\n # and will be solved in a bottom to top order.\r\n for col in range(self._width - 1, 1, -1):\r\n assert self.row1_invariant(col)\r\n moves_str += self.solve_row1_tile(col)\r\n assert self.row0_invariant(col)\r\n moves_str += self.solve_row0_tile(col)\r\n assert self.row1_invariant(col - 1)\r\n # Solve the upper left 2×2 portion of the puzzle directly.\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n \r\n elif self._height <=2 and self._width > 2:\r\n for col in range(self._width - 1, 1, -1):\r\n assert self.row1_invariant(col)\r\n moves_str += self.solve_row1_tile(col)\r\n assert self.row0_invariant(col)\r\n moves_str += self.solve_row0_tile(col)\r\n assert self.row1_invariant(col - 1)\r\n # Solve the upper left 2×2 portion of the puzzle directly.\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n elif self._height <= 2 and self._width <= 2:\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n #elif self._height > 2 and self._width <= 2:\r\n \r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve_row0_tile(self, target_col):\r\n assert self.row0_invariant(target_col)\r\n move = \"ld\"\r\n self.update_puzzle(move)\r\n \r\n row, col = self.current_position(0, target_col)\r\n if row == 0 and col == target_col:\r\n return move\r\n else:\r\n move_to_target = self.move_to_target(1, target_col - 1, row, col)\r\n # 2x3 puzzle solver\r\n move_to_target += \"urdlurrdluldrruld\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n return move", "def solve_interior_tile(self, target_row, target_col):\r\n whole_move = ''\r\n # replace with your code\r\n if self._grid[target_row][target_col] != 0:\r\n # print \"DEBUG CASE WHEN ZERO IN JOPA\"\r\n \r\n # print self\r\n # print 'Solwing tile', self._grid[target_row][target_col]\r\n \r\n # print 'Searchind indexes of ZERO'\r\n for row in self._grid:\r\n for col in row:\r\n if col == 0:\r\n zero_row, zero_col = self._grid.index(row), row.index(col)\r\n break\r\n # print 'ZERO indexes=', (zero_row, zero_col)\r\n #####Moving zero to correct place\r\n #path_down = (target_row - zero_row) * 'd'\r\n #path_left = (zero_col - target_col) * 'l'\r\n if target_col - zero_col > 0:\r\n #path_right = (target_col - zero_col) * 'r'\r\n path_of_zero = (zero_col - target_col) * 'l' + (target_row - zero_row) * 'd' + (target_col - zero_col) * 'r'\r\n else:\r\n path_of_zero = (zero_col - target_col) * 'l' + (target_row - zero_row) * 'd'\r\n zero_col -= len(filter(lambda x: x=='l', path_of_zero))\r\n zero_col += len(filter(lambda x: x=='r', path_of_zero))\r\n zero_row += len(filter(lambda x: x=='d', path_of_zero))\r\n self.update_puzzle(path_of_zero)\r\n # print 'Grid after moving ZERO to target spot'\r\n # print self\r\n whole_move += path_of_zero\r\n assert self.lower_row_invariant(target_row, target_col), \"Invarian is False\"\r\n \r\n #current_position = self.current_position(target_row, target_col)\r\n #current_row, current_col = self.current_position(target_row, target_col)\r\n # print 'Target tile position=',current_position\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (target_row, target_col)\r\n \r\n ######Moving zero tile to the target tile\r\n path_up = (target_row - self.current_position(target_row, target_col)[0]) * 'u'\r\n zero_row = target_row - len(path_up)\r\n if target_col < self.current_position(target_row, target_col)[1]: # Right move\r\n path_side = (self.current_position(target_row, target_col)[1] - target_col) * 'r'\r\n zero_col = target_col + len(path_side)\r\n else: # Left move\r\n path_side = (target_col - self.current_position(target_row, target_col)[1]) * 'l'\r\n zero_col = target_col - len(path_side)\r\n \r\n #path_for_zero = path_up + path_side\r\n # print '------------------------------------------'\r\n # print 'Move to ZERO =', path_for_zero\r\n \r\n self.update_puzzle(path_up + path_side)\r\n \r\n # print 'Grid after move:'\r\n # print self\r\n # current_position = self.current_position(target_row, target_col) \r\n # current_row, current_col = current_position\r\n # print 'Updated Target tile position=',current_position\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, target_col)\r\n # print '-----------------------------------------'\r\n \r\n\r\n ###### New moves after moving ZERO tile into target tile\r\n # counter = 0\r\n whole_move += path_up + path_side\r\n while self.current_position(target_row, target_col) != \\\r\n (target_row, target_col) or zero_col != target_col - 1:\r\n # tt_in_home = self.current_position(target_row, target_col) == \\\r\n # (target_row, target_col)\r\n\r\n cyclic_moves = ''\r\n # counter += 1\r\n #current_position = self.current_position(target_row, target_col) \r\n #current_col = self.current_position(target_row, target_col)[1]\r\n # print 'Zero coloumn', zero_col, '== Target coloumn', target_col\r\n # print zero_col == target_col \r\n \r\n #### Case 1 if ZERO located in ther right of\r\n #### target tile (after it)\r\n if zero_col > self.current_position(target_row, target_col)[1]:\r\n # print ' Look in the up puzzle, zero on the right side'\r\n # if self.current_position(target_row, target_col)[1] != target_col:\r\n # # print 'not under target place'\r\n # cyclic_moves = 'dllur'\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n if self.current_position(target_row, target_col)[1] == target_col and self._grid[zero_row+1][zero_col] < \\\r\n self._grid[self.current_position(target_row, target_col)[0]][self.current_position(target_row, target_col)[1]]:\r\n # print 'Tile tat is under ZERO is',self._grid[zero_row+1][zero_col] \r\n # print 'TT under target place'\r\n cyclic_moves = 'dlu'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n #zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # elif self._grid[zero_row+1][zero_col] > \\\r\n # self._grid[self.current_position(target_row, target_col)[0]][self.current_position(target_row, target_col)[1]]:\r\n # # print 'Tile under zero is illegal to move and we use upper cycle move '\r\n \r\n # cyclic_moves = 'ul'\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n #### Case 2 if ZERO located under the target tile, and both\r\n #### of them located under the target position of the target tile\r\n elif zero_col == self.current_position(target_row, target_col)[1] and zero_col == target_col:\r\n # print 'Both under the target place'\r\n # print 'TT in home=', tt_in_home\r\n if self.current_position(target_row, target_col) == \\\r\n (target_row, target_col):\r\n cyclic_moves = 'ld'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n #zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n \r\n else:\r\n cyclic_moves = 'lddru'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n \r\n #### Case 3 if ZERO located in the left side of the target tile\r\n ### like in the owel-test case\r\n elif zero_col < self.current_position(target_row, target_col)[1]:\r\n # print 'ZERO tile located in the left side'\r\n if self.current_position(target_row, target_col)[1] != target_col:\r\n # print 'not under the target place'\r\n cyclic_moves = 'drrul'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif self.current_position(target_row, target_col)[1] == target_col:\r\n # print 'Target tile under target place'\r\n cyclic_moves = 'dru'\r\n #zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loot counter =',counter\r\n whole_move += cyclic_moves\r\n # if counter > 12:\r\n # break\r\n # print 'Tile is solved with move '+ whole_move\r\n assert self.lower_row_invariant(target_row, target_col-1), \"Invarian is False\"\r\n return whole_move", "def solve_row0_tile(self, target_col):\n assert target_col > 1, \"target_col must be > 1\"\n assert self.row0_invariant(target_col), \"tiles to right and below incorrectly ordered\"\n \n # Move zero tile from target position (0, target_col) to (1, target_col - 1) \n self.update_puzzle(\"ld\")\n \n move_str = \"\"\n\n # correct_tile's position is determined after moving zero tile \"ld\"\n # because its position relative to zero tile may have changed as a result.\n correct_tile = self.current_position(0, target_col) \n \n # Moves to reposition correct_tile to target position, and\n # the zero tile to (1, target_col - 1).\n if self.get_number(correct_tile[0], correct_tile[1]) != self.get_number(0, target_col):\n move_str += str(self.position_tile(1, target_col - 1, correct_tile))\n move_str += str(\"urdlurrdluldrruld\")\n\n self.update_puzzle(move_str)\n\n assert self.row1_invariant(target_col - 1), \"tiles to right and below incorrectly ordered\"\n \n move_str = \"ld\" + move_str\n return move_str", "def solve_row1_tile(self, target_col):\n assert target_col > 1, \"target_col must be > 1\"\n assert self.row1_invariant(target_col), \"tiles to right and below incorrectly ordered\"\n\n # Moves correct_tile to the target position (1, target_col),\n # and the zero tile above the target position at (0, target_col). \n correct_tile = self.current_position(1, target_col)\n move_str = self.position_tile(1, target_col, correct_tile) \n move_str += \"ur\"\n self.update_puzzle(move_str)\n\n assert self.row0_invariant(target_col)\n \n return move_str", "def solve_2x2(self):\n # move zero tile to the left-up corner\n self.update_puzzle(\"lu\")\n movements = \"rdlu\"\n for _ in range(3):\n self.update_puzzle(\"rdlu\")\n if self.row0_invariant(0):\n return \"lu\" + movements\n movements += \"rdlu\"\n # the final 2x2 cannot be solved\n return \"\"", "def solve_row1_tile(self, target_col):\n # replace with your code\n print target_col\n assert self.row1_invariant(target_col), 'False precondition'\n string = ''\n target = self.current_position(1, target_col)\n row_difference = 1 - target[0]\n col_difference = target_col - target[1]\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'l' * col_difference\n if row_difference == 0:\n string += 'urrdl' * (col_difference - 1)\n string += 'ur'\n elif row_difference == 1:\n string += 'drrul' * (col_difference - 1)\n string += 'dru'\n elif col_difference < 0:\n col_difference = abs(col_difference)\n string += 'r' * col_difference\n string += 'dllur' * (col_difference - 1)\n string += 'dlu'\n print 'Row 1 Path', string\n self.update_puzzle(string)\n assert self.row0_invariant(target_col), 'False string'\n return string", "def position_tile(self, zero_row, zero_col, correct_tile):\n \n ans = \"\" \n vert_dist = abs(zero_row - correct_tile[0])\n horiz_dist = abs(zero_col - correct_tile[1])\n \n # Updates ans, the move string, based the correct_tile's\n # position relative to the target position.\n \n # SAME ROW\n if vert_dist == 0:\n # Left of target\n if zero_col > correct_tile[1]:\n # Moves zero tile left to correct_tile's position.\n ans += str(\"l\" * horiz_dist)\n # Moves correct_tile right to target position,\n # and moves zero tile to left of target position.\n if horiz_dist > 1:\n ans += str(\"urrdl\" * (horiz_dist - 1))\n # Right of target\n else:\n # Moves zero tile right to correct_tile's position.\n ans += str(\"r\" * horiz_dist)\n # Moves correct_tile left to target position,\n # and moves zero tile to left of target position.\n ans += str(\"ulldr\" * (horiz_dist - 1))\n ans += str(\"ulld\")\n \n # SAME COL\n elif horiz_dist == 0:\n # Moves zero tile up to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n # Moves correct_tile down to target position, \n # and moves zero tile to left of target position.\n if vert_dist > 1:\n ans += str(\"lddru\" * (vert_dist - 1))\n ans += str(\"ld\")\n \n # UPPER LEFT\n elif correct_tile[1] < zero_col:\n # Moves zero tile up and left to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n ans += str(\"l\" * horiz_dist)\n # Moves correct_tile right and down to target position,\n # and moves zero tile to left of target position.\n ans += str(\"drrul\" * (horiz_dist - 1))\n ans += str(\"druld\" * vert_dist)\n\n # UPPER RIGHT\n else:\n # Moves zero tile up and right to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n ans += str(\"r\" * horiz_dist)\n # This if-elif-else statement moves correct_tile left and down to target position.\n # If statement is only used when target position is in row 2.\n if vert_dist == 1 and correct_tile[0] == 0:\n ans += str(\"dllur\" * (horiz_dist - 1))\n ans += str(\"dluld\")\n # Elif statement used when correct_tile is in the row above target position.\n elif vert_dist == 1: \n ans += str(\"ulldr\" * (horiz_dist - 1))\n ans += str(\"ullddruld\")\n # Else statement used when correct_tile is 1+ rows above target position.\n else:\n ans += str(\"dllur\" * (horiz_dist - 1))\n ans += str(\"dlu\")\n ans += str(\"lddru\" * (vert_dist - 1))\n ans += str(\"ld\")\n \n return ans", "def move_tile(self, target_row, target_col, val):\n # a little bit twisted here for the use of both solve_interior_tile and solve_col0_tile\n solved_row, solved_col = self.current_position(0, val)\n movements = \"\"\n if solved_row == target_row and solved_col == target_col:\n return \"\"\n if solved_row == target_row:\n if target_col > solved_col:\n movements = \"l\" * (target_col - solved_col) + \"urrdl\" * (\n target_col - solved_col - 1)\n else:\n movements = \"r\" * (solved_col - target_col) + \"ulldr\" * (\n solved_col - target_col - 1) + \"ulld\"\n elif solved_col == target_col:\n movements = \"u\" * (target_row - solved_row) + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n elif solved_col < target_col:\n if solved_col == 0:\n movements = \"l\" * (target_col - solved_col) + \"u\" * (\n target_row - solved_row) + \"rddlu\" * (\n target_row - solved_row - 1) + \"rdl\" + \"urrdl\" * (\n target_col - solved_col - 1)\n else:\n movements = \"l\" * (target_col - solved_col) + \"u\" * (\n target_row - solved_row) + \"lddru\" * (\n target_row - solved_row - 1) + \"rdl\" + \"urrdl\" * (\n target_col - solved_col - 1)\n elif solved_col > target_col:\n if solved_row == 0:\n movements = \"u\" * (target_row - solved_row) + \"r\" * (\n solved_col - target_col) + \"dllur\" * (\n solved_col - target_col - 1) + \"dlu\" + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n else:\n movements = \"u\" * (target_row - solved_row) + \"r\" * (\n solved_col - target_col) + \"ulldr\" * (\n solved_col - target_col - 1) + \"ullddru\" + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n return movements", "def solve_puzzle(self):\r\n \r\n counter = 0\r\n rows = self._height-1\r\n cols = self._width-1\r\n # print rows, cols\r\n # print 'The greed has %s rows and %s coloumn indexes' %(rows, cols) \r\n solution_move = ''\r\n if self.get_number(0,0) == 0 and \\\r\n self.get_number(0,1) == 1:\r\n # print 'Congrads Puxxle is Aolved at start!!!!!'\r\n return ''\r\n #appropriate_number = (self._height * self._width) - 1\r\n appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'First appropriate_number=',appropriate_number\r\n # print \"Grid first tile that we will solwing has value =\", self._grid[rows][cols]\r\n \r\n while counter < 300:\r\n counter +=1\r\n # print self\r\n #appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'Appropriate number in loop=',appropriate_number\r\n # print 'We are solving %s index_row and %s index_col' %(rows, cols) \r\n ####Case when we use solve_interior_tile\r\n if rows > 1 and cols > 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n cols -= 1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solving interior tile', (rows, cols)\r\n solution_move += self.solve_interior_tile(rows, cols)\r\n # print 'Solution move=', solution_move\r\n cols -= 1\r\n #### Case when we use solve_col0_tile\r\n elif rows > 1 and cols == 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n cols = self._width-1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solwing tile 0 in row', rows\r\n # print 'Appropriate number here ='\r\n solution_move += self.solve_col0_tile(rows)\r\n # print 'Solution move=', solution_move\r\n rows -=1\r\n cols = self._width-1\r\n\r\n\r\n #### Cases when we use solve_row0_tile\r\n elif rows == 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n #cols = self._width-1\r\n appropriate_number -= self._width\r\n\r\n else:\r\n # print 'Solving upper 2 rows right side'\r\n solution_move += self.solve_row1_tile(cols)\r\n rows -=1\r\n appropriate_number -= self._width\r\n #### Cases when we use solve_row1_tile \r\n if rows < 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows += 1\r\n cols -= 1\r\n appropriate_number +=self._width-1\r\n else:\r\n # print '(1,J) tile solved, lets solwe tile (0,j) in tile',(rows,cols)\r\n # print 'Greed after move solve_row1_tile'\r\n # print self\r\n solution_move += self.solve_row0_tile(cols)\r\n rows +=1\r\n cols -=1\r\n appropriate_number +=self._width-1\r\n\r\n\r\n #### Case when we use solve_2x2\r\n elif rows <= 1 and cols <= 1:\r\n # print 'We are solving 2x2 puzzle'\r\n solution_move += self.solve_2x2()\r\n if self._grid[0][0] == 0 and \\\r\n self._grid[0][1] == 1:\r\n # print 'Congrads Puxxle is SOLVED!!!!!'\r\n break\r\n\r\n\r\n\r\n\r\n if counter > 100:\r\n # print 'COUNTER BREAK'\r\n break\r\n # print solution_move, len(solution_move)\r\n return solution_move\r\n\r\n\r\n\r\n\r\n\r\n\r\n # for row in solution_greed._grid[::-1]:\r\n # print solution_greed._grid\r\n # print 'Row =',row\r\n \r\n # if solution_greed._grid.index(row) > 1:\r\n # print \"Case when we solwing Interior and Tile0 part\"\r\n \r\n\r\n # for col in solution_greed._grid[solution_greed._grid.index(row)][::-1]:\r\n # print 'Coloumn value=', col\r\n #print row[0]\r\n # if col !=row[0]:\r\n # print 'Case when we use just Interior tile solution'\r\n # print solution_greed._grid.index(row)\r\n # print row.index(col)\r\n \r\n # solution += solution_greed.solve_interior_tile(solution_greed._grid.index(row) , row.index(col))\r\n # print 'Solution =', solution\r\n # print self \r\n # print solution_greed._grid\r\n # elif col ==row[0]:\r\n # print 'Case when we use just Col0 solution'\r\n\r\n # else:\r\n # print 'Case when we solwing first two rows'\r\n\r\n #return \"\"\r", "def solve_row1_tile(self, target_col):\n movements = self.solve_interior_tile(1, target_col)\n self.update_puzzle(\"ur\")\n return movements + \"ur\"", "def solve_interior_tile(self, target_row, target_col):\r\n assert self._grid[target_row][target_col] == 0\r\n moves_str = \"\"\r\n target_current_row, target_current_col = self.current_position(target_row, target_col)\r\n \r\n moves_str += self.position_tile(target_row, target_col, target_current_row, target_current_col) \r\n \r\n self.update_puzzle(moves_str)\r\n print \"solve_interior_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve_2x2(self):\r\n # replace with your code\r\n assert self.row1_invariant(1), '2x2 Dont pass row1_invariant(1)'\r\n whole_move = ''\r\n current_position = self.current_position(0, 0)\r\n # print 'Zero position =', current_position\r\n counter = 0\r\n \r\n\r\n \r\n # if current_position == (0,0):\r\n # print (0,0)\r\n # move_to_00 = 'rdlu' \r\n if current_position == (0,1):\r\n # print (0,1)\r\n move_to_00 = 'l'\r\n if current_position == (1,0):\r\n # print (1,0)\r\n move_to_00 = 'u'\r\n if current_position == (1,1):\r\n # print (1,1)\r\n move_to_00 = 'ul'\r\n whole_move += move_to_00\r\n self.update_puzzle(move_to_00)\r\n # print self\r\n # print self.get_number(1,1) < self.get_number(1,0)\r\n \r\n while self.get_number(0,0) != 0 or self.get_number(0,1) != 1:\r\n \r\n # print 'Aloha in loop!'\r\n counter +=1\r\n move = 'rdlu'\r\n whole_move += move\r\n self.update_puzzle(move)\r\n # print self\r\n if counter >5:\r\n break\r\n return whole_move", "def solve_interior_tile(self, target_row, target_col):\n cur_row, cur_col = self.current_position(target_row, target_col)\n move_str = self.position_tile(target_row, target_col, cur_row, cur_col)\n self.update_puzzle(move_str)\n return move_str", "def solve_interior_tile(self, target_row, target_col):\n \n assert target_row > 1, \"target_row cannot be in rows 0 or 1.\"\n assert self.lower_row_invariant(target_row, target_col), \"tiles to right and below incorrectly ordered\"\n \n correct_tile = self.current_position(target_row, target_col) \n move_str = self.position_tile(target_row, target_col, correct_tile) \n self.update_puzzle(move_str)\n \n assert self.lower_row_invariant(target_row, target_col - 1), \"tiles to right and below incorrectly ordered\"\n \n return move_str", "def solve_2x2(self):\n cur_row, cur_col = self.current_position(0, 0)\n move_str = 'u' * cur_row + 'l' * cur_col\n self.update_puzzle(move_str)\n if self.check_2x2_solved():\n return move_str\n else:\n while not self.check_2x2_solved():\n move_str += 'rdlu'\n self.update_puzzle('rdlu')\n return move_str", "def solve_2x2(self):\n \n assert self.get_number(1,1) == 0, \"zero tile should be at row 1, col 1\"\n assert self.row1_invariant(1), \"tiles to right and below incorrectly ordered\"\n \n # Moves the zero tile to (0,0).\n self.update_puzzle(\"lu\")\n\n # Repositions the upper left 2x2 part up to 3 times, \n # each time checking whether the puzzle is solved.\n rotation_num = 0\n if self.row0_invariant(0) == False:\n for dummy_rotation in range(3):\n while self.row0_invariant(0) == False:\n rotation_num += 1\n self.update_puzzle(\"rdlu\")\n\n assert self.row0_invariant(0), \"tiles to right and below incorrectly ordered\"\n \n move_str = \"lu\" + (\"rdlu\" * rotation_num) \n return move_str", "def solve_interior_tile(self, target_row, target_col):\n # replace with your code\n string = ''\n target = self.current_position(target_row, target_col)\n row_difference = target_row - target[0]\n #print 'Row difference', row_difference\n col_difference = target_col - target[1]\n #print 'Col difference', col_difference\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'l' * col_difference\n if row_difference == 0 and col_difference > 1:\n string += 'urrdl' * (col_difference - 1)\n if row_difference == 1:\n string += 'urrdl' * (col_difference - 1)\n string += 'dru'\n if row_difference > 1:\n string += 'drrul' * (col_difference - 1)\n string += 'dru'\n elif col_difference < 0:\n col_difference = abs(col_difference)\n string += 'r' * col_difference\n if row_difference == 1:\n string += 'ulldr' * (col_difference - 1)\n string += 'ullddru'\n if row_difference > 1:\n string += 'dllur' * (col_difference - 1)\n string += 'dlu'\n string += 'lddru' * (row_difference - 1)\n if row_difference > 0:\n string += 'ld'\n print 'Interior Path', string\n self.update_puzzle(string)\n assert self.lower_row_invariant(target_row, target_col - 1), 'False string'\n return string", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction\n for row in self._grid:\n print row\n print", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][\n zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][\n zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def solve_2x2(self):\r\n assert self.row1_invariant(1)\r\n pos_1_0 = self.get_number(1, 0)\r\n pos_0_0 = self.get_number(0, 0)\r\n pos_0_1 = self.get_number(0, 1)\r\n # create grid and solve individual cases\r\n grid = [pos_1_0, pos_0_0, pos_0_1]\r\n if grid == [self.get_width(), 1, self.get_width() + 1]:\r\n move = \"ul\"\r\n elif grid == [1, self.get_width() + 1, self.get_width()]:\r\n move = \"lurdlu\"\r\n elif grid == [self.get_width() + 1, self.get_width(), 1]:\r\n move = \"lu\"\r\n self.update_puzzle(move)\r\n return move", "def solve(self, board: List[List[str]]) -> None:\n n = len(board)\n if n == 0:\n return\n m = len(board[0])\n for i in range(m):\n self.traverse(board, 0, i, n, m)\n self.traverse(board, n - 1, i, n, m)\n for i in range(n):\n self.traverse(board, i, 0, n, m)\n self.traverse(board, i, m - 1, n, m)\n for i in range(n):\n for j in range(m):\n board[i][j] = 'X' if board[i][j] == 'O' else board[i][j]\n for i in range(n):\n for j in range(m):\n board[i][j] = 'O' if board[i][j] == '_' else board[i][j]", "def solve(self, board) -> None:\n if board == [[]] or board == []:\n return\n\n r, c = len(board), len(board[0])\n\n from collections import deque\n queue = deque()\n for i in range(r):\n for j in range(c):\n if (i == 0 or i == r - 1 or j == 0 or j == c - 1) and board[i][j] == 'O':\n queue.append([i, j])\n board[i][j] = 'M'\n\n while queue:\n i, j = queue.popleft()\n for x, y in ((i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1)):\n if 0 <= x <= r - 1 and 0 <= y <= c - 1 and board[x][y] == 'O':\n board[x][y] = 'M'\n queue.append([x, y])\n\n for i in range(r):\n for j in range(c):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n elif board[i][j] == 'M':\n board[i][j] = 'O'", "def solve(puzzle):\n print(\"Solving...\")\n array_puzzle = np.asarray(puzzle)\n array_puzzle.flags.writeable = False # Turn off writable flags to prevent data being ovewritten accidentally.\n goal_state = __generate_goal(len(array_puzzle[0]), len(array_puzzle))\n\n flat_puzzle = list(chain.from_iterable(puzzle)) # Flatten the list\n\n # If the puzzle doesn't contain 0, exit.\n try:\n flat_puzzle.remove(0) # Remove 0 from the list\n except:\n print(\"All puzzles must include an open tile (0).\")\n return None\n\n inversions = __count_inversions(flat_puzzle) # Count the inversions\n\n # width = len(array_puzzle[0]) # Get the width of the puzzle (columns)\n # length = len(array_puzzle) # Get the length of the puzzle (rows)\n\n oddEven = __odd_or_even(len(array_puzzle[0])) # Determine if the width is odd or even.\n start_position = __find_start(array_puzzle) # Find the start position's row\n solvable = __is_solvable(oddEven, inversions, len(array_puzzle), start_position) # Cleck if the puzzle is solvable.\n\n # If the puzzle is not solvable, return None.\n if(solvable == \"None\"):\n return None\n\n # If we cannot calculate a* (for example the given values are not all in sequential order (1-5) 4 is replaced by 6 (1,2,3,5,6))\n try:\n return __a_star(array_puzzle, goal_state)\n except:\n print(\"Please make sure there are no duplicate or skipped inputs.\")\n return None\n\n # This code was used in testing to print out the string.\n # solved = __a_star(array_puzzle, goal_state)\n # Return the moves needed to complete the puzzle.\n # return print(str(__build_string(solved)) + \" (\" + str(len(solved)) + \")\")", "def solve(self, board: List[List[str]]) -> None:\n if board == [] or board == [[]]: # corner case\n return\n\n r, c = len(board), len(board[0])\n\n def dfs(i, j): # visited i, j neighbors and change o to M\n if i < 0 or i > r - 1 or j < 0 or j > c - 1 or board[i][j] == 'X' or board[i][j] == 'M':\n return\n\n board[i][j] = 'M'\n dfs(i - 1, j)\n dfs(i + 1, j)\n dfs(i, j - 1)\n dfs(i, j + 1)\n\n for i in range(r):\n for j in range(c):\n if (i == 0 or i == r - 1 or j == 0 or j == c - 1) and board[i][j] == 'O':\n dfs(i, j)\n\n for i in range(r):\n for j in range(c):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n elif board[i][j] == 'M':\n board[i][j] = 'O'", "def solve(self, board: List[List[str]]) -> None:\n rows = len(board)\n cols = len(board[0])\n\n def explore(i, j):\n if i < 0 or i >= rows:\n return\n if j < 0 or j >= cols:\n return\n if board[i][j] != \"O\":\n return\n board[i][j] = \"Y\"\n explore(i + 1, j)\n explore(i - 1, j)\n explore(i, j + 1)\n explore(i, j - 1)\n\n for i in [0, rows - 1]:\n for j in range(cols):\n explore(i, j)\n\n for j in [0, cols - 1]:\n for i in range(rows):\n explore(i, j)\n\n for i in range(rows):\n for j in range(cols):\n if board[i][j] == \"Y\":\n board[i][j] = \"O\"\n elif board[i][j] == \"O\":\n board[i][j] = \"X\"", "def solve_2x2(self):\r\n moves_str = \"\"\r\n # move zero to the most upper left\r\n zero_row, zero_col = self.current_position(0, 0)\r\n ups = zero_row - 0\r\n lefts = zero_col - 0\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n for dummy_l in range(lefts):\r\n moves_str += \"l\"\r\n \r\n # get zero, one, two, three positions\r\n self.update_puzzle(moves_str)\r\n zero_row, zero_col = self.current_position(0, 0)\r\n one_row, one_col = self.current_position(0, 1)\r\n two_row, two_col = self.current_position(1, 0)\r\n three_row, three_col = self.current_position(1, 1)\r\n counter = 0\r\n while counter <= 3 and \\\r\n (zero_row != 0 or zero_col != 0 or \\\r\n one_row!= 0 or one_col != 1 or \\\r\n two_row != 1 or two_col != 0 or \\\r\n three_row != 1 or three_col != 1):\r\n move = CIRCLES[\"RIGHT_CIRCLES\"]\r\n moves_str += move\r\n self.update_puzzle(move)\r\n counter += 1\r\n zero_row, zero_col = self.current_position(0, 0)\r\n one_row, one_col = self.current_position(0, 1)\r\n two_row, two_col = self.current_position(1, 0)\r\n three_row, three_col = self.current_position(1, 1)\r\n \r\n print \"solve_2x2\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve(self, board: List[List[str]]) -> None:\n if board is None or len(board) == 0:\n return \n row, col = len(board), len(board[0])\n for i in range(row):\n self.dfs(board, i, 0)\n self.dfs(board, i, col - 1)\n for j in range(col):\n self.dfs(board, 0, j)\n self.dfs(board, row-1, j)\n for i in range(row):\n for j in range(col):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n if board[i][j] == '-':\n board[i][j] = 'O'", "def position_tile(self, target_row, target_col, current_row, current_col):\r\n moves_str = \"\"\r\n # current target is on the upper of 0\r\n if current_col == target_col and current_row < target_row:\r\n ups = target_row - current_row\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n for dummy_cycle in range(ups - 1):\r\n moves_str += CIRCLES[\"LEFT_CIRCLE\"]\r\n moves_str += \"ld\"\r\n # current target is on the left of 0\r\n elif current_row == target_row and current_col < target_col:\r\n lefts = target_col - current_col\r\n for dummy_l in range(lefts):\r\n moves_str += \"l\"\r\n for dummy_cycle in range(lefts - 1):\r\n moves_str += CIRCLES[\"UP_CIRCLE\"]\r\n # current target is on the upperleft of 0\r\n elif current_row < target_row and current_col < target_col:\r\n ups = target_row - current_row\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n lefts = target_col - current_col\r\n for dummy_l in range(lefts):\r\n moves_str += \"l\"\r\n for dummy_cycle in range(lefts - 1):\r\n if current_row <= 0: # can not go up\r\n moves_str += CIRCLES[\"DOWN_CIRCLE\"]\r\n else:\r\n moves_str += CIRCLES[\"UP_CIRCLE\"]\r\n moves_str += \"dru\"\r\n for dummy_cycle in range(ups - 1):\r\n moves_str += CIRCLES[\"LEFT_CIRCLE\"]\r\n moves_str += \"ld\"\r\n # current target is on the upperright of 0\r\n elif current_col > target_col and current_row < target_row:\r\n ups = target_row - current_row\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n rights = current_col - target_col\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n for dummy_cycle in range(rights - 1):\r\n if current_row <= 0: # can not go up\r\n moves_str += CIRCLES[\"DOWN_LEFT_CIRCLE\"]\r\n else:\r\n moves_str += CIRCLES[\"UP_LEFT_CIRCLE\"] \r\n moves_str += \"dlu\"\r\n for dummy_cycle in range(ups - 1):\r\n moves_str += CIRCLES[\"LEFT_CIRCLE\"]\r\n moves_str += \"ld\"\r\n # current target is on the right of 0\r\n elif current_col > target_col and current_row == target_row:\r\n rights = current_col - target_col\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n for dummy_cycle in range(rights - 1):\r\n if current_row <= 0: # can not go up\r\n moves_str += CIRCLES[\"DOWN_LEFT_CIRCLE\"]\r\n else:\r\n moves_str += CIRCLES[\"UP_LEFT_CIRCLE\"] \r\n moves_str += \"ulld\"\r\n return moves_str", "def solve(self, board: List[List[str]]) -> None:\n if not board:\n return\n m, n = len(board), len(board[0])\n\n def dfs(x, y):\n if not 0 <= x < m or not 0 <= y < n or board[x][y] != \"O\":\n return\n direction = [[1, 0], [0, 1], [-1, 0], [0, -1]]\n board[x][y] = \"Y\"\n for d in direction:\n dfs(x+d[0], y+d[1])\n\n for i in range(m):\n dfs(i, 0)\n dfs(i, n-1)\n for i in range(1, n-1):\n dfs(0, i)\n dfs(m-1, i)\n\n for i in range(m):\n for j in range(n):\n if board[i][j] == \"O\":\n board[i][j] = \"X\"\n elif board[i][j] == \"Y\":\n board[i][j] = \"O\"", "def human_go(self, board):\r\n coord_pattern = re.compile(\"[0-{}]$\".format(board.shape[1]))\r\n print(\"Enter Column and press enter.\")\r\n input_str = input(\"(from 0-6)\\n\")\r\n if not coord_pattern.match(input_str):\r\n print(\"That is not in the right format, please try again...\")\r\n return self.human_go(board)\r\n else:\r\n col = int(input_str)\r\n if board[0][col] != 0:\r\n print(\"That column is already full, please try again\")\r\n self.human_go()\r\n else:\r\n for row in board[::-1]:\r\n if row[col] == 0:\r\n row[col] = -1\r\n return board", "def solve_tour(self):\n\t\tboard = [[-1 for _ in range(self.N)]for _ in range(self.N)]\n\t\tboard[0][0] = 0\n\n\t\tz = self.find_tour(board, 0, 0, 1)\n\t\tif z:\n\t\t\tfor i in range(self.N):\n\t\t\t\tfor j in range(self.N):\n\t\t\t\t\tself.solution.append(board[i][j])\n\t\t\tprint board\n\t\t\treturn self.solution\n\t\t\t\t\n\t\telse:\n\t\t\tprint(\"No solution\")", "def solve(self, board: List[List[str]]) -> None:\n if len(board) is 0: return\n width = len(board[0]) - 1\n height = len(board) - 1\n def help(x: int, y: int):\n if x < 0 or x > height or y < 0 or y > width:\n return\n if board[x][y] is \"O\":\n board[x][y] = \"g\"\n help(x - 1, y)\n help(x, y - 1)\n help(x + 1, y)\n help(x, y + 1)\n\n for i in range(width + 1):\n if board[0][i] is \"O\":\n help(0, i)\n if board[height][i] is \"O\":\n help(height, i)\n for i in range(1, height):\n if board[i][0] is \"O\":\n help(i, 0)\n if board[i][width] is \"O\":\n help(i, width)\n\n print(board)\n for i in range(width + 1):\n for j in range(height + 1):\n if board[j][i] is \"O\":\n board[j][i] = \"X\"\n elif board[j][i] is \"g\":\n board[j][i] = \"O\"", "def solve_puzzle(board):\n # Propagate value effects\n board = simplify_puzzle(board, [])\n\n # Brute force remaining cells\n board = brute(board)\n\n # Verify that the puzzle was successfully solved\n assert get_length(board)==81\n assert valid_attempt(board)\n\n return board", "def solve(self, board: List[List[str]]) -> None:\r\n if not board:\r\n return\r\n m, n = len(board), len(board[0])\r\n board_filter = lambda p: 0 <= p[0] < m and 0 <= p[1] < n and board[p[0]][p[1]] == 'O'\r\n queue = list(filter(board_filter, [(x, y) for r in range(max(m, n)) \r\n for x, y in ((r, 0), (r, n-1), (0, r), (m-1, r))]))\r\n while queue:\r\n x, y = queue.pop()\r\n board[x][y] = 'W'\r\n queue.extend(list(filter(board_filter, ((x-1, y), (x+1, y), (x, y-1), (x, y+1)))))\r\n for row in board:\r\n for i, c in enumerate(row):\r\n row[i] = 'XO'[c=='W']", "def solve(self):\n # If board is filled, board is trivially solved\n if self.check_full_board():\n return self.done\n\n # Iterate over every square in the board\n for row in range(self.num_rows):\n for col in range(self.num_columns):\n\n # If square is empty, begin plugging in possible values\n if self.check_empty_space(row, col):\n for val in range(1, 10):\n if not self.check_row(val, row) and \\\n not self.check_column(val, col) and \\\n not self.check_box(val, self.what_box(row, col)):\n self.board[row][col] = val\n \n if self.solve():\n return self.done()\n \n # Didn't work; undo assigment\n self.board[row][col] = ' '\n\n # Bad path; backtrack\n return False", "def solve(self):\n dim = self.puzzle.dimension\n\n # initial loop\n for value, (row, col) in self.puzzle:\n if value:\n self.clear_row(row, value)\n self.clear_col(col, value)\n self.clear_subgrid(row, col, value)\n self.updates.add((value, (row, col)))\n for ps in self.possibilities:\n ps.discard((row, col))\n\n while self.updates:\n while self.updates:\n # while self.updates:\n value, (row, col) = self.updates.pop()\n for i in range(1, dim + 1):\n self.check_row(i, value)\n self.check_col(i, value)\n for i in range(2, 8, 3):\n self.check_subgrid(row, i, value)\n self.check_subgrid(i, col, value)\n\n for value, (row, col) in self.puzzle:\n if not value:\n self.check_cell(row, col)\n\n # for value in range(1, dim + 1):\n # for row in [2, 5, 8]:\n # for col in [2, 5, 8]:\n # self.check_subgrid(row, col, value)", "def move(self, row, col, player):\r\n if player == 1:\r\n self.mat[row][col] = 1\r\n else:\r\n self.mat[row][col] = -1\r\n if self.checkrow(player,row) or self.checkcol(player,col):\r\n return player\r\n if row == col or row + col == self.size-1:\r\n if self.checkdiag(player):\r\n return player\r\n return 0", "def move(self, row: int, col: int, player: int):\n def addup(dict_name, invalid_set, another_invalid, locx, locy):\n if locx == locy:\n diag_name = (1,1)\n if diag_name not in invalid_set:\n dict_name[diag_name] += 1\n if dict_name[diag_name] == self.tar:\n return player\n another_invalid.add(diag_name)\n if locx == self.tar-1-locy:\n diag_name = (-1, -1)\n if diag_name not in invalid_set:\n dict_name[diag_name] += 1\n if dict_name[diag_name] == self.tar:\n return player\n another_invalid.add(diag_name)\n curcol = (locy, None)\n currow = (None, locx)\n if curcol not in invalid_set:\n dict_name[curcol] += 1\n if dict_name[curcol] == self.tar:\n return player\n another_invalid.add(curcol)\n if currow not in invalid_set:\n dict_name[currow] += 1\n if dict_name[currow] == self.tar:\n return player\n another_invalid.add(currow)\n return 0\n res = 0\n if (row, col) not in self.walked:\n if player == 1:\n res = addup(self.p1, self.invalid_1, self.invalid_2, row, col)\n if player == 2:\n res = addup(self.p2, self.invalid_2, self.invalid_1, row, col)\n self.walked.add((row, col))\n return res", "def solve(self, board: List[List[str]]) -> None:", "def move(self, row: int, col: int, player: int) -> int:\n self.board[row][col] = 1 if player == 1 else -1\n rowsum = sum(self.board[row])\n colsum = sum([self.board[r][col] for r in range(self.n)])\n diagsum1 = sum([self.board[i][i] for i in range(self.n)])\n diagsum2 = sum([self.board[i][-i-1] for i in range(self.n)])\n if player == 1:\n if rowsum == self.n or colsum == self.n or diagsum1 == self.n or diagsum2 == self.n:\n return 1\n else:\n if rowsum == -self.n or colsum == -self.n or diagsum1 == -self.n or diagsum2 == -self.n:\n return 2\n return 0", "def solve(self, board):\n def dfs(board, r, c):\n if r < 0 or c < 0 or r > rows - 1 or c > cols - 1 or board[r][c] == 'X' or board[r][c] == '#':\n return\n board[r][c] = '#'\n dfs(board, r - 1, c)\n dfs(board, r + 1, c)\n dfs(board, r, c - 1)\n dfs(board, r, c + 1)\n\n if len(board) == 0:\n return;\n rows = len(board)\n cols = len(board[0])\n for i in range(rows):\n for j in range(cols):\n if (i == 0 or j == 0 or i == rows - 1 or j == cols - 1) and board[i][j] == 'O':\n dfs(board, i, j)\n for i in range(rows):\n for j in range(cols):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n elif board[i][j] == '#':\n board[i][j] = 'O'", "def solve(self, board: List[List[str]]) -> None:\n if len(board) == 0:\n return\n self.h = len(board)\n self.w = len(board[0])\n self.board = board\n for i in range(self.h):\n for j in range(self.w):\n if i == 0 or i == self.h-1 or j == 0 or j == self.w-1:\n #print (i,j)\n self.dfs((i,j))\n for i in range(self.h):\n for j in range(self.w):\n if self.board[i][j]==\"O\":\n self.board[i][j]=\"X\"\n for i in range(self.h):\n for j in range(self.w):\n if self.board[i][j]==\"#\":\n self.board[i][j]=\"O\"", "def solve(self, board: List[List[str]]) -> None:\n if len(board) == 0: return\n\n m = len(board)\n n = len(board[0])\n\n uf = UnionFind(m * n + 1)\n dummy = m * n\n\n # connect 'O' at first and last col with dummy\n for i in range(m):\n if board[i][0] == 'O':\n uf.union(dummy, i * n)\n if board[i][-1] == 'O':\n uf.union(dummy, i * n + n - 1)\n\n # connect 'O' at first and last row with dummy\n for j in range(n):\n if board[0][j] == 'O':\n uf.union(dummy, j)\n if board[-1][j] == 'O':\n uf.union(dummy, n * (m-1) + j)\n\n d = [(1, 0), (0, 1), (0, -1), (-1, 0)]\n\n for i in range(1, m-1):\n for j in range(1, n-1):\n if board[i][j] == 'O':\n for di, dj in d:\n x = i+di\n y = j+dj\n if board[x][y] == 'O':\n uf.union(x*n+y, i*n+j)\n\n # change not connected 'O' with 'X'\n for i in range(1, m-1):\n for j in range(1, n-1):\n if not uf.connected(dummy, i * n + j):\n board[i][j] = 'X'", "def solve(self, board: 'List[List[str]]') -> 'None':\n if not board:\n return\n m, n = len(board), len(board[0])\n save = [ij for k in range(m + n) for ij in ((0, k), (m - 1, k), (k, 0), (k, n - 1))]\n while save:\n i, j = save.pop()\n if -1 < i < m and -1 < j < n and board[i][j] == 'O':\n board[i][j] = 'S'\n save += (i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1)\n for row in board:\n for i, c in enumerate(row):\n row[i] = 'XO'[c == 'S']", "def solve(self, board: List[List[str]]) -> None:\n def dfs(board, i, j):\n m = len(board)\n n = len(board[0])\n if i < 0 or i >= m or j < 0 or j >= n: return\n\n if board[i][j] != 'O': return\n\n board[i][j] = '#'\n [dfs(board, i+di, j+dj) for di, dj in [(0, 1), (1, 0), (0, -1), (-1, 0)]]\n\n if len(board) == 0: return\n m = len(board)\n n = len(board[0])\n\n for i in range(m):\n dfs(board, i, 0)\n dfs(board, i, n-1)\n\n for j in range(n):\n dfs(board, 0, j)\n dfs(board, m-1, j)\n\n for i in range(m):\n for j in range(n):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n\n for i in range(m):\n for j in range(n):\n if board[i][j] == '#':\n board[i][j] = 'O'", "def solve(self, board: List[List[str]]) -> None:\n\n def expansion(i,j):\n for di,dj in {(-1,0),(1,0),(0,-1),(0,1)}:\n if -1<i+di<m and -1<j+dj<n and state[i+di][j+dj]=='O':\n return True\n return False\n\n if not board:\n return board\n\n m = len(board)\n n = len(board[0]) \n state = [['X']*n for _ in range(m)]\n\n for j in range(n):\n state[0][j] = board[0][j]\n state[m-1][j] = board[m-1][j]\n \n for i in range(m):\n state[i][0] = board[i][0]\n state[i][n-1] = board[i][n-1]\n \n flag = 1\n\n while flag:\n flag = 0\n\n for k in range(1, (1+min(m,n))//2):\n for j in range(k,n-k):\n if board[k][j]=='O' and state[k][j] == 'X' and expansion(k,j):\n state[k][j] = 'O'\n flag = 1\n \n if board[m-1-k][j]=='O' and state[m-1-k][j] == 'X' and expansion(m-1-k,j):\n state[m-1-k][j] = 'O'\n flag = 1\n \n for i in range(k,m-k):\n if board[i][k]=='O' and state[i][k] == 'X' and expansion(i,k):\n state[i][k] = 'O'\n flag = 1\n \n if board[i][n-1-k]=='O' and state[i][n-1-k] == 'X' and expansion(i,n-1-k):\n state[i][n-1-k] = 'O'\n flag = 1\n\n board[:] = state[:]", "def move(self, row: int, col: int, player: int) -> int:\n if player == 1:\n self.newList[row][col] = 1\n self.colSum[col] += 1\n self.rowSum[row] += 1\n if row == col:\n self.diag += 1\n if row + col == (self.n - 1):\n self.revDiag += 1\n if self.rowSum[row] == self.n or self.colSum[col] == self.n or self.diag == self.n or self.revDiag == self.n:\n return 1\n if player == 2:\n self.newList[row][col] = -1\n self.colSum[col] -= 1\n self.rowSum[row] -= 1\n if row == col:\n self.diag -= 1\n if row + col == (self.n - 1):\n self.revDiag -= 1\n if self.rowSum[row] == -self.n or self.colSum[col] == -self.n or self.diag == -self.n or self.revDiag == -self.n:\n return 2\n \n return 0", "def move_to_target(self, target_row, target_col, row, col):\r\n move = \"\"\r\n # typical move to move target tile to target pos.\r\n solver_move = \"druld\"\r\n # move up first\r\n move = (target_row - row) * \"u\"\r\n # conditional statements for moving the tile:\r\n # 1. case curr_pos of tile and target_tile are in same col\r\n if (target_col - col) == 0:\r\n move += \"ld\" + ((target_row - row) - 1) * solver_move\r\n else:\r\n # 2. curr_pos of tile is on the left of target pos\r\n if (target_col - col) > 0:\r\n move += (target_col - col) * \"l\"\r\n if row == 0:\r\n move += (abs(target_col - col) - 1) * \"drrul\"\r\n else:\r\n move += (abs(target_col - col) - 1) * \"urrdl\"\r\n # 3. curr_pos of tile is on the right of target pos:\r\n elif (target_col - col) < 0:\r\n move += (abs(target_col - col) - 1) * \"r\"\r\n if row == 0:\r\n move += abs(target_col - col) * \"rdllu\"\r\n else:\r\n move += abs(target_col - col) * \"rulld\"\r\n move += (target_row - row) * solver_move\r\n return move", "def solve(self, board: List[List[str]]) -> None:\n if not board:\n return\n x=len(board)\n y=len(board[0])\n visit=[[False if board[i][j]=='X' else True for j in range(y)] for i in range(x)]\n for i in range(x):\n for j in range(y):\n if visit[i][j] and board[i][j]=='O':\n queue=[[i,j]]\n visit[i][j]=False\n k=0\n surround=True\n while len(queue)>k:\n if queue[k][0]==0 or queue[k][0]==x-1 or queue[k][1]==y-1 or queue[k][1]==0:\n surround=False\n if queue[k][1]!=y-1 and visit[queue[k][0]][queue[k][1]+1]:\n queue.append([queue[k][0],queue[k][1]+1])\n visit[queue[k][0]][queue[k][1]+1]=False\n if queue[k][1]!=0 and visit[queue[k][0]][queue[k][1]-1]:\n queue.append([queue[k][0],queue[k][1]-1])\n visit[queue[k][0]][queue[k][1]-1]=False\n if queue[k][0]!=x-1 and visit[queue[k][0]+1][queue[k][1]]:\n queue.append([queue[k][0]+1,queue[k][1]])\n visit[queue[k][0]+1][queue[k][1]]=False\n if queue[k][0]!=0 and visit[queue[k][0]-1][queue[k][1]]:\n queue.append([queue[k][0]-1,queue[k][1]])\n visit[queue[k][0]-1][queue[k][1]]=False\n k+=1\n if surround:\n for i1,j1 in queue:\n board[i1][j1]='X'\n return", "def find_best_move(board):\n new_board = board.get_board()\n\n # X | X | X <-- Check for win on this row\n # ---------\n # 3 | 4 | 5\n # ---------\n # 6 | 7 | 9\n if new_board[0] == new_board[1] and new_board[2] == \"2\":\n return 2\n elif new_board[0] == new_board[2] and new_board[1] == \"1\":\n return 1\n elif new_board[1] == new_board[2] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | 2\n # ---------\n # X | X | X <-- Check for win on this row\n # ---------\n # 6 | 7 | 9\n elif new_board[3] == new_board[4] and new_board[5] == \"5\":\n return 5\n elif new_board[3] == new_board[5] and new_board[4] == \"4\":\n return 4\n elif new_board[4] == new_board[5] and new_board[3] == \"3\":\n return 3\n\n # 0 | 1 | 2\n # ---------\n # 3 | 4 | 5\n # ---------\n # X | X | X <-- Check for win on this row\n elif new_board[6] == new_board[7] and new_board[8] == \"8\":\n return 8\n elif new_board[6] == new_board[8] and new_board[7] == \"7\":\n return 7\n elif new_board[7] == new_board[8] and new_board[6] == \"6\":\n return 6\n\n # X | 1 | 2 Check for win on column one\n # ---------\n # X | 4 | 5\n # ---------\n # X | 7 | 9\n elif new_board[0] == new_board[3] and new_board[6] == \"6\":\n return 6\n elif new_board[0] == new_board[6] and new_board[3] == \"3\":\n return 3\n elif new_board[6] == new_board[3] and new_board[0] == \"0\":\n return 0\n\n # 0 | X | 2 Checks for win on column two\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | X | 9\n elif new_board[1] == new_board[4] and new_board[7] == \"7\":\n return 7\n elif new_board[1] == new_board[7] and new_board[4] == \"4\":\n return 4\n elif new_board[7] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | 4 | X\n # ---------\n # 6 | 7 | X\n elif new_board[2] == new_board[5] and new_board[8] == \"8\":\n return 8\n elif new_board[2] == new_board[8] and new_board[5] == \"5\":\n return 5\n elif new_board[8] == new_board[5] and new_board[2] == \"2\":\n return 2\n\n # X | 1 | 2\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | 7 | X\n elif new_board[0] == new_board[4] and new_board[8] == \"8\":\n return 8\n elif new_board[0] == new_board[8] and new_board[4] == \"4\":\n return 4\n elif new_board[8] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | X | 5\n # ---------\n # X | 7 | 9\n elif new_board[2] == new_board[4] and new_board[6] == \"6\":\n return 6\n elif new_board[2] == new_board[6] and new_board[4] == \"4\":\n return 4\n elif new_board[6] == new_board[4] and new_board[2] == \"2\":\n return 2\n\n # If corners are empty, play there\n elif new_board[0] == \"0\" or new_board[2] == \"2\" or new_board[6] == \"6\" or new_board[8] == \"8\":\n try_spot = 0\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2\n\n # If middle is empty, play there\n elif new_board[4] == \"4\":\n return 4\n\n # Finally if edges are empty try there\n elif new_board[1] == \"1\" or new_board[3] == \"3\" or new_board[5] == \"5\" or new_board[7] == \"7\":\n try_spot = 1\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2", "def solve(self, board: List[List[str]]) -> None:\n def DFS(board, i, j):\n q = []\n q.append([i, j])\n \n while q:\n x, y = q.pop()\n board[x][y] = \"*\"\n neighbors = ((0, 1), (0, -1), (1, 0), (-1, 0))\n for dx, dy in neighbors:\n if 0 <= x + dx <= len(board) - 1 and 0 <= y + dy <= len(board[0]) - 1 and board[x + dx][y + dy] == \"O\":\n q.append([x + dx, y + dy])\n \n \n # first row\n i = 0\n for j in range(len(board[0])):\n if board[i][j] == \"O\":\n DFS(board, i, j)\n \n # last row\n i = len(board) - 1\n for j in range(len(board[0])):\n if board[i][j] == \"O\":\n DFS(board, i, j)\n \n # first column\n j = 0\n for i in range(len(board)):\n if board[i][j] == \"O\":\n DFS(board, i, j)\n \n # last column\n j = len(board[0]) - 1\n for i in range(len(board)):\n if board[i][j] == \"O\":\n DFS(board, i, j)\n \n \n for i in range(len(board)):\n for j in range(len(board[0])):\n if board[i][j] == \"O\":\n board[i][j] = \"X\"\n elif board[i][j] == \"*\":\n board[i][j] = \"O\"", "def solve_util(self, board, col):\n try:\n if col == self.N:\n self.print_sol(board)\n return True\n\n # Trying to place this queen in all rows one by one\n res = False\n for i in range(self.N):\n if self.is_safe(board, i, col):\n board[i][col] = 1\n res = self.solve_util(board, col + 1) or res\n if type(res) == dict:\n return res\n board[i][col] = 0 # Backtracking...\n\n # if queen cannot be placed in any row in this col, then alas\n # we return false..\n return res\n except KeyboardInterrupt:\n print('Keyboard Interrupted!')\n return self.Outputs", "def solve(self, board):\r\n if not board or not board[0]:\r\n return\r\n \r\n self.m = len(board)\r\n self.n = len(board[0])\r\n boarder = []\r\n \r\n # Collecting all the 'O' on the boarder\r\n for i in range(self.m):\r\n if board[i][0] == 'O':\r\n boarder.append([i, 0])\r\n if board[i][self.n-1] == 'O':\r\n boarder.append([i, self.n-1])\r\n for j in range(self.n):\r\n if board[0][j] == 'O':\r\n boarder.append([0, j])\r\n if board[self.m-1][j] == 'O':\r\n boarder.append([self.m-1, j])\r\n \r\n for row, col in boarder:\r\n self.BFS(board, row, col)\r\n \r\n for row in range(self.m):\r\n for col in range(self.n):\r\n if board[row][col] == 'O':\r\n board[row][col] = 'X'\r\n elif board[row][col] == 'E':\r\n board[row][col] = 'O'\r\n print(board)", "def update(self, move):\n\n if not 0 <= move < 7:\n raise InvalidMove\n\n placed = False\n x = None\n y = None\n\n for row in reversed(xrange(self._rows)):\n if not self._board[row][move]:\n self._board[row][move] = self.current_player\n placed = True\n x = move\n y = row\n break\n\n if not placed:\n raise InvalidMove\n\n return (x, y)", "def solve(board) -> None:\n rows = len(board)\n if rows==0:\n return board\n cols = len(board[0])\n \n def is_border(rc):\n (rr, cc) =rc\n if rr<rows and rr< cols and rr>=0 and cc>=0 and board[rr][cc]=='O' and (rr==0 or rr==rows-1 or cc==0 or cc==cols-1):\n return True\n return False\n \n transf = []\n for r in range(rows):\n for c in range(cols):\n if board[r][c]=='O' and not is_border((r,c)) and not any(map(is_border, [(r-1, c), (r+1, c), (r, c-1), (r, c+1)])):\n transf.append((r,c))\n if transf:\n for r,c in transf:\n board[r][c]='X'\n return board", "def move(self, row, col, player):\n offset = player * 2 - 3 # 1 or -1\n self.row[row] += offset\n self.col[col] += offset\n if row == col:\n self.diag += offset\n if row + col == self.n - 1:\n self.anti_diag += offset\n if self.n in [self.row[row], self.col[col], self.diag, self.anti_diag]:\n return 2\n if -self.n in [self.row[row], self.col[col], self.diag, self.anti_diag]:\n return 1\n return 0", "def move(self, row: int, col: int, player: int) -> int:\n n = self.n\n if player == 1:\n self.rows_1[row] += 1\n self.cols_1[col] += 1\n if player == 2:\n self.rows_2[row] += 1\n self.cols_2[col] += 1\n if row == col:\n self.diag1[row] = player\n if row + col + 1 == n:\n self.diag2[row] = player\n f = 0\n g = 0\n for i in range(n):\n if self.rows_1[row] == n or self.cols_1[col] == n:\n return 1\n if self.rows_2[row] == n or self.cols_2[col] == n:\n return 2 \n if self.diag1[i] != self.diag1[0]:\n f = 1\n if self.diag2[i] != self.diag2[0]:\n g = 1\n if f == 0:\n return self.diag1[0]\n if g == 0:\n return self.diag2[0]\n return 0", "def solve(self, board: List[List[str]]) -> None:\n if len(board) <= 2 or len(board[0]) <= 2:\n return\n\n self.nRow, self.nCol = len(board), len(board[0])\n\n def helper(kr, kc):\n board[kr][kc] = '.'\n kr > 0 and board[kr - 1][kc] == 'O' and helper(kr - 1, kc)\n kr < self.nRow - 1 and board[kr + 1][kc] == 'O' and helper(kr + 1, kc)\n kc > 0 and board[kr][kc - 1] == 'O' and helper(kr, kc - 1)\n kc < self.nCol - 1 and board[kr][kc + 1] == 'O' and helper(kr, kc + 1)\n\n for kr in [0, self.nRow - 1]:\n for kc in range(self.nCol):\n if board[kr][kc] == 'O':\n helper(kr, kc)\n for kc in [0, self.nCol - 1]:\n for kr in range(self.nRow):\n if board[kr][kc] == 'O':\n helper(kr, kc)\n\n for kr in range(self.nRow):\n for kc in range(self.nCol):\n if board[kr][kc] == 'O':\n board[kr][kc] = 'X'\n elif board[kr][kc] == '.':\n board[kr][kc] = 'O'\n\n return", "def minimax(state, depth, player):\n if depth == 9:\n row = choice([0, 1, 2])\n col = choice([0, 1, 2])\n return row, col, ''\n\n if player == COMP:\n best = [-1, -1, float(\"-inf\")]\n else:\n best = [-1, -1, float(\"inf\")]\n\n if depth == 0 or state.has_tic_tac_toe(COMP) or state.has_tic_tac_toe(HUMAN):\n score = heuristic(state, depth)\n return [-1, -1, score]\n \"\"\"\n Checks if any of the player is one away from winning in any board and make the appropriate move.\n \"\"\"\n if player==COMP:\n empty_cells=get_empty_cells(state)\n dangerous_cells=state.is_one_away_from_tic_tac_toe((player%2)+1)\n if dangerous_cells:\n found_dangerous_cells=True\n else:\n found_dangerous_cells=False\n print \"no dangerous local boards\"\n favoring_cells=state.is_one_away_from_tic_tac_toe(player)\n if favoring_cells:\n found_favoring_cells=True\n else:\n found_favoring_cells=False\n print \"no favoring local boards\"\n if found_dangerous_cells==False and found_favoring_cells==False:\n pass\n if found_dangerous_cells==False and found_favoring_cells==True:\n empty_cells[:]=[]\n for cell in favoring_cells:\n empty_cells.append(cell)\n if found_dangerous_cells==True and found_favoring_cells==False:\n empty_cells[:]=[]\n for cell in dangerous_cells:\n empty_cells.append(cell)\n if found_dangerous_cells==True and found_favoring_cells==True:\n empty_cells[:]=[]\n for cell in dangerous_cells:\n empty_cells.append(cell)\n else:\n empty_cells=get_empty_cells(state)\n for cell in empty_cells:\n row, col = cell[0], cell[1]\n state.board[row][col] = player\n score = minimax(state, depth - 1, (player % 2) + 1)\n state.board[row][col] = 0\n score[0], score[1] = row, col\n if player == COMP:\n if score[2] >= best[2]:\n if score[2]==best[2]:\n \"\"\"\n Favors middle positions over sides or corners\n MIDDLE > CORNERS > SIDES\n \"\"\"\n if (best[0]==0 and best[1]==0) or (best[0]==0 and best[1]==2) or (best[0]==2 and best[1]==0) or (best[0]==2 and best[1]==2):\n if score[0]==0 and score[1]==0: #favoring centre position over diagonal position\n best=score\n print(\"centre position chosen over diagonal positions\")\n else:\n if ((score[0]==0 and score[1]==1) or (score[0]==1 and score[1]==0) or (score[0]==1 and score[1]==2) or (score[0]==2 and score[1]==1))==0:\n best=score #favoring any position over side position as long as the new position is not a side position too\n print(\"diagonal and centre positions chosen over side positions\")\n else:\n best = score\n else:\n bestMoves=[]\n if score[2] < best[2]:\n best=score\n return best", "def make_move(board, picked_column, player):\n row = find_first_free_cell(board, picked_column)\n board[row][picked_column] = player\n return board, row", "def solve_(self, x, y, board, path):\n if self.SOLVED:\n return\n if self.is_done(board):\n self.print_path(path)\n self.SOLVED = True\n return\n for new_x, new_y in self.next_click(x, y, board):\n if new_x is None or new_y is None:\n return\n new_board = self.click(new_x, new_y, board)\n self.solve_(\n x=0, y=0,\n board=new_board,\n path=path + [((new_x, new_y), new_board)]\n )", "def position_tile(self, target_row, target_col, cur_row, cur_col, need_ld=True):\n move_str = ''\n if cur_row == target_row:\n if cur_col < target_col:\n move_str += 'l' * (target_col - cur_col)\n if target_col - cur_col > 1:\n move_str += 'ur'\n move_str += 'druldru' * (target_col - cur_col - 1)\n else:\n move_str += 'ur' if not need_ld else ''\n need_ld = False\n else:\n move_str += 'r' * (cur_col - target_col)\n if cur_col - target_col > 1:\n move_str += 'ul'\n move_str += 'dlurdlu' * (cur_col - target_col - 1)\n else:\n need_ld = False\n else:\n move_str += 'u' * (target_row - cur_row)\n if cur_col < target_col:\n move_str += ('l' * (target_col - cur_col) + 'dru')\n move_str += 'druldru' * (target_col - cur_col - 1)\n move_str += 'lddru' * (target_row - cur_row - 1)\n elif cur_col > target_col:\n move_str += ('r' * (cur_col - target_col) + 'dlu')\n move_str += 'dlurdlu' * (cur_col - target_col - 1)\n move_str += 'lddru' * (target_row - cur_row - 1)\n else:\n move_str += 'lddru' * (target_row - cur_row - 1)\n if need_ld:\n move_str += 'ld'\n return move_str", "def solve(self, board: 'List[List[str]]') -> 'None':\n\n def dfs(i, j, tmp):\n nonlocal flag\n if i < 0 or i >= len(board) or j < 0 or j >= len(board[0]):\n flag = False\n return\n if board[i][j] != 'O' or [i, j] in tmp:\n return\n tmp.append([i, j])\n dfs(i - 1, j, tmp)\n dfs(i + 1, j, tmp)\n dfs(i, j + 1, tmp)\n dfs(i, j - 1, tmp)\n return tmp\n\n change = []\n for i in range(len(board)):\n for j in range(len(board[0])):\n if board[i][j] == 'O' and [i, j] not in change:\n tmp = []\n flag = True\n tmp = dfs(i, j, tmp[:])\n if flag:\n for loc in tmp:\n i, j = loc[0], loc[1]\n board[i][j] = 'X'\n\n for loc in change:\n i, j = loc[0], loc[1]\n board[i][j] = 'X'", "def test_perform_move(self):\n p = hw.create_tile_puzzle(3, 3)\n self.assertFalse(p.perform_move(\"taco\"))\n self.assertTrue(p.perform_move('up'))\n self.assertEqual(p.get_board(), [[1,2,3],[4,5,0],[7,8,6]])\n self.assertFalse(p.perform_move('right'))\n p = hw.create_tile_puzzle(2, 4)\n self.assertTrue(p.perform_move('left'))\n self.assertTrue(p.perform_move('up'))\n self.assertFalse(p.perform_move('up'))\n self.assertEqual(p.get_board(), [[1,2,0,4],[5,6,3,7]])\n p = hw.create_tile_puzzle(1, 4)\n self.assertTrue(p.perform_move('left'))\n self.assertTrue(p.perform_move('left'))\n self.assertTrue(p.perform_move('left'))\n self.assertFalse(p.perform_move('down'))\n self.assertFalse(p.perform_move('left'))\n self.assertEqual(p.get_board(), [[0,1,2,3]])", "def move(self, row, col, player):\n if self.winning == True:\n return\n if player == 1:\n val = 1\n else:\n val = -1\n self.row[row] += val\n self.col[col] += val\n if row == col:\n self.diagonal += val\n n = len(self.row)\n if row + col == n - 1:\n self.antidiagonal += val\n if abs(self.row[row]) == n or abs(self.col[col]) == n or abs(self.diagonal) == n or abs(self.antidiagonal) == n:\n self.winning = True\n return player\n return 0", "def move(self, row, col, player):\n toadd = 1 if player == 1 else -1\n \n self.row[row] += toadd\n self.col[col] += toadd\n if row == col: self.diagonal += toadd\n if col == self.n - row -1 : self.antidiag += toadd\n \n if abs(self.row[row]) == self.n or abs(self.col[col]) == self.n or abs(self.diagonal) == self.n or abs(self.antidiag) == self.n:\n return player\n else:\n return 0", "def move(self, row, col, player):\n if self.winning == True:\n return\n self.matrix[row][col] = player\n n = len(self.matrix)\n indicator = True\n for i in range(n):\n if self.matrix[row][i] != player:\n indicator = False\n break\n if indicator == True:\n self.winning = True\n return player\n \n indicator = True\n for i in range(n):\n if self.matrix[i][col] != player:\n indicator = False\n break\n if indicator == True:\n self.winning = True\n return player\n \n if row == col:\n indicator = True\n for i in range(n):\n if self.matrix[i][i] != player:\n indicator = False\n break\n if indicator == True:\n self.winning = True\n return player\n if row + col == n - 1:\n indicator = True\n for i in range(n):\n if self.matrix[i][n - 1 - i] != player:\n indicator = False\n break\n if indicator == True:\n self.winning = True\n return player\n return 0", "def solve_step(self,puzzle_grid,x,y):\n self.puzzleGrid = puzzle_grid\n if(self.foundStep == False):\n self.targetCell = self.puzzleGrid.grid[x][y]\n if(self.targetCell.isSolved == False):\n self.calculate_possibilities()\n if len(self.targetCell.possibilities) == 1: #README method 1\n self.targetCell.solve()\n return True\n else:\n return self.check_neighbours() #README method 2", "def solve(board):\r\n \r\n #An O(2mn) time solution; the first O(mn) traversal is to preform a bfs on all tiles attached to edge 'O' tiles (can't convert to 'X's); the second is to convert all remaining 'O's into 'X's\r\n \r\n def bfs(curr,r,c):\r\n if not curr: return\r\n prev = len(curr)\r\n for n in range(0,prev):\r\n i,j = curr[n][0],curr[n][1]\r\n board[i][j] = 'A'\r\n for x,y in [(-1, 0), (0, -1), (1, 0), (0, 1)]:\r\n x_n = i+x\r\n y_n = j+y\r\n if x_n >= 0 and x_n < r and y_n >= 0 and y_n < c and board[x_n][y_n] == \"O\":\r\n curr += [(x_n,y_n)]\r\n bfs(curr[prev:],r,c)\r\n\r\n \r\n q,r,c = [],len(board),len(board[0])\r\n if not r or q: return\r\n\r\n for i in range(r):\r\n for j in range(c):\r\n if (i==0 or j==0 or i==r-1 or j==c-1) and board[i][j] == \"O\":\r\n q += [(i,j)]\r\n \r\n bfs(q,r,c)\r\n\r\n for i in range(r):\r\n for j in range(c): \r\n if board[i][j] == \"O\": \r\n board[i][j] = \"X\"\r\n elif board[i][j] == \"A\":\r\n board[i][j] = \"O\"\r\n \r\n return", "def move(self, row: int, col: int, player: int) -> int:\n s = -1 if player == 1 else 1\n\n self.rows[row] += s\n if abs(self.rows[row]) == self.n:\n return player\n\n self.cols[col] += s\n if abs(self.cols[col]) == self.n:\n return player\n\n if row == col:\n self.diagonals[0] += s\n if abs(self.diagonals[0]) == self.n:\n return player\n\n if (row + col) == self.n - 1:\n self.diagonals[1] += s\n if abs(self.diagonals[1]) == self.n:\n return player\n\n return 0", "def solve(self, board) -> None:\n for index in range (1, len(board)-1):\n arr = board[index]\n for ch in range(1, len(arr)-1):\n if arr[ch] is 'O':\n safe = True\n if ch-1 == 0 and arr[ch-1] is 'O':\n safe = False\n if ch +1 == len(arr)-1 and arr[ch+1] is 'O':\n safe = False\n if index -1 == 0 and board[index-1][ch] is 'O':\n safe = False\n if index + 1 == len(board)-1 and board[index + 1][ch] is 'O':\n safe = False\n if safe:\n arr[ch] = 'X'", "def solve(self, board) -> None:\n coords = []\n board_len = len(board)\n row_len = len(board[0]) - 1\n # top\n # coords.append([[0, i] for i, q in enumerate(board[0]) if q == \"O\"])\n # # bottom\n # coords.append(\n # [[board_len, i] for i, q in enumerate(board[board_len]) if q == \"O\"]\n # )\n for i in range(board_len):\n row_coord = [[i,indx] for indx, q in enumerate(board[i]) if q == \"O\"]\n # import pdb; pdb.set_trace()\n for x in row_coord:\n coords.append(x)\n for x in coords:\n if len(x) == 0:\n continue\n if x[0] == 0:\n print(\"top border\")\n elif x[0] == board_len - 1:\n print(\"bottom border\")\n elif x[1] == 0:\n print(\"left border\")\n elif x[1] == row_len:\n prin(\"right border\")", "def make_move(self, column):\r\n trans_board = numpy.transpose(self.__board[::1]) # transpose the\r\n # board so that columns are now arrays\r\n if 0 not in trans_board[column] or self.get_winner() or column >= \\\r\n self.BOARD_COLUMNS or column < 0:\r\n # column is full, illegal or the game is already finished\r\n return self.ILLEGAL_MOVE # exception?\r\n else:\r\n reversed_col = list(reversed(trans_board[column]))\r\n for hole in reversed_col:\r\n if hole == 0:\r\n row_i = self.BOARD_ROWS - 1 - reversed_col.index(hole)\r\n self.__board[row_i][column] = self.__cur_player\r\n winner = self.get_winner()\r\n if winner: # is not none\r\n return winner\r\n self.__switch_player()", "def solve_soduku(sudoku, screen):\n\n myfont = pygame.font.SysFont('Times New Roman', 30)\n\n # Creates a copy of the sudoku board so that we don't mess up the original board\n solved_board = sudoku.board\n\n # Stores the index of the next number that should be tried (the index will be used with the possible_nums list)\n try_new_nums = [[0] * 9 for y in range(9)]\n\n # Creates a list that will act like a stack for the depth first search (stores tuples (row, col) for each unsolved square)\n nodes = [sudoku.find_next_empty_node((0, -1))]\n\n done = False\n\n # Keeps running until the puzzle is either solved or runs out of possible combinations\n while len(nodes) != 0:\n\n time.sleep(.001)\n\n if not done:\n update_grid(screen, (nodes[len(nodes) - 1][0], nodes[len(nodes) - 1][1]), solved_board, myfont)\n draw_lines(screen, [0, 0, 0])\n\n pygame.display.update()\n\n # finds all possible numbers that can go into the current unsolved square\n one = set(sudoku.check_vertically(nodes[len(nodes) - 1], solved_board))\n two = set(sudoku.check_horizontally(nodes[len(nodes) - 1], solved_board))\n three = set(sudoku.check_box(nodes[len(nodes) - 1], solved_board))\n possible_nums = list(one.intersection(two).intersection(three))\n\n # Determines if there is a number that can be put into the current unsolved square\n if len(possible_nums) > 0:\n\n # Stores the current number in the current unsolved square\n curr_num = solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]]\n\n # Stores the next number that will be tried in the current unsolved square\n possible_next_num = possible_nums[\n try_new_nums[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] % len(possible_nums)]\n\n # Makes sure that the code doesn't get stuck trying the same combos\n if try_new_nums[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] == len(possible_nums):\n solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = 0\n try_new_nums[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = 0\n nodes.pop()\n continue\n\n # Makes sure that the code doesn't get stuck on trying the same number\n if possible_next_num == curr_num:\n solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = 0\n nodes.pop()\n continue\n\n # Sets the unsolved square to the next number that is to be tried\n solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = possible_next_num\n\n # Changes which index will be used to find a different number if the new number does not work\n try_new_nums[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] += 1\n\n # if there are no possible numbers for the current square, it backtracks to the last number that can change\n else:\n solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = 0\n nodes.pop()\n continue\n\n # Determines if there is still an empty unsolved square left\n if sudoku.has_next_emtpy_node(nodes[len(nodes) - 1]):\n nodes.append(sudoku.find_next_empty_node(nodes[len(nodes) - 1]))\n else:\n update_grid(screen, (nodes[len(nodes) - 1][0], nodes[len(nodes) - 1][1]), solved_board, myfont)\n draw_lines(screen, [0, 0, 0])\n done = True" ]
[ "0.80090946", "0.8002715", "0.79543203", "0.7940283", "0.7870043", "0.7847743", "0.7818097", "0.77486104", "0.774018", "0.7731879", "0.77268267", "0.7707487", "0.77027786", "0.76879483", "0.7634697", "0.76258874", "0.7590558", "0.7560754", "0.7522215", "0.75079787", "0.75023305", "0.74233866", "0.7398959", "0.73937315", "0.738067", "0.736368", "0.7357382", "0.7326966", "0.731067", "0.7291728", "0.71680576", "0.71333486", "0.7080731", "0.7066218", "0.70588666", "0.7035129", "0.6974126", "0.69386816", "0.68907726", "0.6720271", "0.6720271", "0.6720271", "0.6713385", "0.6713165", "0.6713165", "0.667646", "0.665483", "0.6640822", "0.65920466", "0.6521621", "0.6511639", "0.6478555", "0.6446402", "0.644615", "0.6430197", "0.6418134", "0.63970447", "0.63651913", "0.635803", "0.6357214", "0.6356915", "0.6327227", "0.630201", "0.6282386", "0.6250894", "0.62454027", "0.6231285", "0.62211096", "0.6216771", "0.6215996", "0.62067497", "0.6201949", "0.61826205", "0.6179529", "0.617567", "0.61739886", "0.6143129", "0.6139901", "0.6138184", "0.61225945", "0.61218303", "0.61206347", "0.6099728", "0.60626864", "0.6058793", "0.60554326", "0.6046122", "0.6045858", "0.6039182", "0.6038449", "0.6036707", "0.6032714", "0.6029427", "0.602873", "0.602484", "0.60149074", "0.5987895", "0.5981511", "0.5981155", "0.59749615" ]
0.777891
7
Solve the tile in row zero at the specified column Updates puzzle and returns a move string
def solve_row0_tile(self, target_col): move_str = 'ld' self.update_puzzle(move_str) cur_row, cur_col = self.current_position(0, target_col) if cur_row == 0 and cur_col == target_col: return move_str else: move_str += self.position_tile(1, target_col-1, cur_row, cur_col) move_str += 'urdlurrdluldrruld' self.update_puzzle(move_str[2:]) return move_str
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve_col0_tile(self, target_row):\r\n moves_str = \"\"\r\n # move the zero tile from (i,0) to (i−1,1) \r\n # using the move string \"ur\"\r\n moves_str += \"ur\"\r\n temp_grid = Puzzle(self._height, self._width, self._grid)\r\n temp_grid.update_puzzle(moves_str)\r\n # If the target tile is now at position (i,0)\r\n # you can simply move tile zero to the end of row i−1\r\n current_row, current_col = temp_grid.current_position(target_row, 0)\r\n zero_row, zero_col = temp_grid.current_position(0, 0)\r\n if current_row == target_row and current_col == 0:\r\n rights = self._width - 1 - zero_col\r\n for dummy_r in range(rights):\r\n moves_str += \"r\" \r\n # However, if the target tile is not positioned at (i,0)\r\n # we suggest the following solution strategy:\r\n else:\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"ruldrdlurdluurddlu\"\r\n for dummy_r in range(self._width - 1):\r\n moves_str += \"r\"\r\n \r\n print \"solve_col0_tile\"\r\n print moves_str\r\n self.update_puzzle(moves_str)\r\n print self._grid\r\n return moves_str", "def solve_col0_tile(self, target_row):\r\n # check if curr_pos (i, 0) where i > 1\r\n assert self.lower_row_invariant(target_row, 0)\r\n move = \"ur\"\r\n self.update_puzzle(move)\r\n row, col = self.current_position(target_row, 0)\r\n if row == target_row and col == 0:\r\n move_to_target = (self.get_width() - 2) * \"r\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n else:\r\n move_to_target = self.move_to_target(target_row - 1, 1, row, col)\r\n # add solver move to str\r\n move_to_target += \"ruldrdlurdluurddlu\" + (self.get_width() - 1) * \"r\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n assert self.lower_row_invariant(target_row - 1, self.get_width() - 1)\r\n return move", "def solve_puzzle(self):\n\n move_str = \"\"\n \n # Move zero tile to bottom right corner tile of puzzle.\n zero_pos = self.current_position(0,0) \n vert_dist = (self.get_height() - 1) - zero_pos[0]\n horiz_dist = (self.get_width() - 1) - zero_pos[1]\n move_str += ((\"d\" * vert_dist) + (\"r\" * horiz_dist))\n self.update_puzzle(move_str)\n \n # Solve lower rows\n if self.get_height() > 2:\n for row in range(self.get_height() - 1, 1, -1):\n for col in range(self.get_width() - 1, -1, -1):\n if col != 0:\n move_str += self.solve_interior_tile(row, col)\n else:\n move_str += self.solve_col0_tile(row)\n \n # Solve top 2 rows\n if self.get_width() > 2:\n for col in range(self.get_width() - 1, 1, -1):\n move_str += self.solve_row1_tile(col)\n move_str += self.solve_row0_tile(col)\n \n # Solve 2x2\n move_str += self.solve_2x2()\n\n return move_str", "def solve_col0_tile(self, target_row):\n solved_row, solved_col = self.current_position(target_row, 0)\n movements = \"\"\n if solved_row == target_row - 1 and solved_col == 0:\n movements = \"u\"\n else:\n local_board = self.clone()\n local_board.update_puzzle(\"ur\")\n movements = \"ur\" + local_board.move_tile(\n target_row - 1, 1, target_row *\n self.get_width()) + \"ruldrdlurdluurddlu\"\n movements += \"r\" * (self.get_width() - 1)\n self.update_puzzle(movements)\n return movements", "def solve_puzzle(self):\n cur0_row, cur0_col = self.current_position(0, 0)\n move_str = 'd' * (self._height - cur0_row - 1) + 'r' * (self._width - cur0_col - 1)\n self.update_puzzle(move_str)\n for row in range(self._height-1, 1, -1):\n for col in range(self._width-1, -1, -1):\n assert self.lower_row_invariant(row, col)\n if col != 0:\n move_str += self.solve_interior_tile(row, col)\n else:\n move_str += self.solve_col0_tile(row)\n for col in range(self._width-1, 1, -1):\n assert self.row1_invariant(col)\n move_str += self.solve_row1_tile(col)\n assert self.row0_invariant(col)\n move_str += self.solve_row0_tile(col)\n move_str += self.solve_2x2()\n return move_str", "def solve_col0_tile(self, target_row):\n move_str = 'ur'\n self.update_puzzle(move_str)\n cur_row, cur_col = self.current_position(target_row, 0)\n if cur_row == target_row and cur_col == 0:\n move_str += 'r' * (self._width - 2)\n else:\n move_str += self.position_tile(target_row-1, 1, cur_row, cur_col)\n move_str += 'ruldrdlurdluurddlur'\n move_str += 'r' * (self._width - 2)\n self.update_puzzle(move_str[2:])\n return move_str", "def solve_col0_tile(self, target_row):\n # replace with your code\n string = ''\n target = self.current_position(target_row, 0)\n row_difference = target_row - target[0]\n col_difference = target[1]\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'r' * (col_difference - 1)\n if row_difference > 1:\n string += 'druld' * (row_difference - 1)\n string += 'rulld' * (col_difference - 1)\n string += 'ruldrdlurdluurddlu'\n elif col_difference == 0:\n string += 'rddlu' * (row_difference - 2)\n if row_difference > 1:\n string += 'rd'\n string += 'l'\n string += 'ruldrdlurdluurddlu'\n string += 'r' * (self._width - 1)\n print 'Col 0 Path', string\n self.update_puzzle(string)\n assert self.lower_row_invariant(target_row - 1, self._width -1), 'False string'\n return string", "def solve_col0_tile(self, target_row):\r\n # print '----------------------------------'\r\n # print 'SOLVING ZERO COLOUMN'\r\n assert self.lower_row_invariant(target_row,0), \"Invarian is False\"\r\n whole_move = ''\r\n #current_position = self.current_position(target_row, 0)\r\n current_row, current_col = self.current_position(target_row, 0)\r\n zero_row, zero_col = target_row, 0\r\n # print 'Target tile position=',current_position\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (target_row, 0)\r\n # print '------------------------------------------'\r\n # print 'Recommended move ur'\r\n \r\n recomended_move = 'ur'\r\n whole_move += recomended_move\r\n zero_col += len(filter(lambda x: x=='r', recomended_move))\r\n zero_row -= len(filter(lambda x: x=='u', recomended_move))\r\n self.update_puzzle(recomended_move)\r\n # print 'Grid after move:', recomended_move\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n current_position = self.current_position(target_row, 0)\r\n current_row, current_col = current_position\r\n if self.current_position(target_row, 0) == \\\r\n (target_row, 0):\r\n # print 'TT stays in correct place after recomended move'\r\n zero_to_place_move = 'r' * (self._width-1 - zero_col)\r\n self.update_puzzle(zero_to_place_move)\r\n whole_move += zero_to_place_move\r\n # print self\r\n assert self.lower_row_invariant(target_row-1,self._width-1), \"Invarian is False\"\r\n return whole_move\r\n #move ZERO tile to the right\r\n else:\r\n # print '------------------------------'\r\n # print 'After base move we are do not finde puzzle'\r\n # print 'Lets move zero towards TT'\r\n ### reposition TT to (target_row -1, 1)\r\n ### reposition ZERO tile to (target_row-1,0)\r\n \r\n ######Moving zero tile to the target tile\r\n #path_up = (zero_row - current_row) * 'u'\r\n #path_side = (current_col - zero_col) * 'r'\r\n path_for_zero = (zero_row - current_row) * 'u' + (current_col - zero_col) * 'r'\r\n whole_move += path_for_zero\r\n zero_col += len(filter(lambda x: x=='r', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n counter = 0\r\n while self.current_position(target_row, 0) != (target_row -1, 1) or \\\r\n (zero_row,zero_col) != (target_row-1,0):\r\n cyclic_moves = ''\r\n current_position = self.current_position(target_row, 0)\r\n current_row, current_col = current_position\r\n # print 'We are in while loop'\r\n counter += 1\r\n if zero_row < current_row:\r\n # print 'Zero row under current TT '\r\n if self.current_position(target_row, 0) == (target_row -1, 1):\r\n # print 'TT is in the reccomended posiont (target_row -1, 1) \\n and we are movind zero to the left side of TT '\r\n cyclic_moves = 'ld'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n else:\r\n # print 'TT should be one tile down'\r\n cyclic_moves = 'lddru'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n #### Case 1 if ZERO located in the right of\r\n #### target tile (after it)\r\n if zero_col > current_col:\r\n # print ' Look in the up puzzle, zero in the right side'\r\n if current_col != 1:\r\n # print 'not under target place (target_row -1, 1)'\r\n cyclic_moves = 'dllur'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n whole_move += cyclic_moves\r\n # elif current_col == 1 and self._grid[zero_row+1][zero_col] < \\\r\n # self._grid[current_position[0]][current_position[1]]:\r\n elif current_col == 1: \r\n # print 'Tile tat is under ZERO is',self._grid[zero_row+1][zero_col] \r\n # print 'TT under target place'\r\n cyclic_moves = 'dlu'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif self._grid[zero_row+1][zero_col] > \\\r\n self._grid[current_position[0]][current_position[1]]:\r\n print 'Tile under zero is illegal to move and we use upper cycle move '\r\n \r\n cyclic_moves = 'ul'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n if counter > 10:\r\n break\r\n ### Solwing 3x2 puzzle\r\n # print '--------------------------'\r\n # print 'Lets solve 3x2 puzzle formed recently'\r\n move3x2 = 'ruldrdlurdluurddlur'\r\n whole_move += move3x2\r\n zero_col -= len(filter(lambda x: x=='l', move3x2))\r\n zero_col += len(filter(lambda x: x=='r', move3x2))\r\n zero_row += len(filter(lambda x: x=='d', move3x2))\r\n zero_row -= len(filter(lambda x: x=='u', move3x2))\r\n self.update_puzzle(move3x2)\r\n # print 'Grid afret 3x2 solver move'\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n #####Moving ZERO to the (target_row - 1, n - 1) position where\r\n ##### 'n' is a grid height.\r\n # print self._width-1 - zero_col\r\n zero_to_place_move = 'r' * (self._width-1 - zero_col)\r\n whole_move += zero_to_place_move\r\n self.update_puzzle(zero_to_place_move)\r\n # print self\r\n assert self.lower_row_invariant(target_row-1,self._width-1), \"Invarian is False\"\r\n return whole_move", "def solve_row0_tile(self, target_col):\n solved_row, solved_col = self.current_position(0, target_col)\n movements = \"\"\n if solved_col == target_col - 1 and solved_row == 0:\n movements = \"ld\"\n else:\n local_board = self.clone()\n local_board.update_puzzle(\"ld\")\n movements = \"ld\" + local_board.move_tile(\n 1, target_col - 1, target_col) + \"urdlurrdluldrruld\"\n self.update_puzzle(movements)\n return movements", "def solve_puzzle(self):\r\n # initialize some values and start tile at bottom right corner\r\n col = self.get_width() - 1\r\n row = self.get_height() - 1\r\n move = \"\"\r\n curr_row, curr_col = self.current_position(0, 0)\r\n move_to_target = abs(curr_col - col) * \"r\" + abs(curr_row - row) * \"d\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n\r\n # apply solver methods\r\n for dummy_i in range(row, 1, -1):\r\n for dummy_j in range(col, 0, -1):\r\n move += self.solve_interior_tile(dummy_i, dummy_j)\r\n move += self.solve_col0_tile(dummy_i)\r\n \r\n for dummy_j in range(col, 1, -1):\r\n move += self.solve_row1_tile(dummy_j)\r\n move += self.solve_row0_tile(dummy_j)\r\n \r\n move += self.solve_2x2()\r\n return move", "def solve_row1_tile(self, target_col):\r\n # replace with your code\r\n whole_move = ''\r\n if self._grid[1][target_col] != 0:\r\n # print \"DEBUG CASE WHEN ZERO IN JOPA solve_row1_tile \"\r\n \r\n # print self\r\n # print 'Solwing tile', self._grid[1][target_col]\r\n \r\n # print 'Searchind indexes of ZERO'\r\n for row in self._grid:\r\n for col in row:\r\n if col == 0:\r\n zero_row, zero_col = self._grid.index(row), row.index(col)\r\n break\r\n # print 'ZERO indexes=', (zero_row, zero_col)\r\n #####Moving zero to correct place\r\n #path_down = (1 - zero_row) * 'd'\r\n # path_left = (zero_col - target_col) * 'l'\r\n if target_col - zero_col > 0:\r\n #path_right = (target_col - zero_col) * 'r'\r\n path_of_zero = (1 - zero_row) * 'd' + (target_col - zero_col) * 'r'\r\n else:\r\n path_of_zero = (1 - zero_row) * 'd'\r\n #zero_col -= len(filter(lambda x: x=='l', path_of_zero))\r\n #zero_row -= len(filter(lambda x: x=='u', path_of_zero))\r\n zero_col += len(filter(lambda x: x=='r', path_of_zero))\r\n zero_row += len(filter(lambda x: x=='d', path_of_zero))\r\n self.update_puzzle(path_of_zero)\r\n # print 'Grid after moving ZERO to target spot'\r\n # print self\r\n whole_move += path_of_zero\r\n\r\n assert self.row1_invariant(target_col), 'Some trouble in row1_invariant' \r\n \r\n #current_row, current_col = self.current_position(1, target_col)\r\n zero_row, zero_col = 1, target_col\r\n ######Moving zero tile to the target tile\r\n #path_up = (zero_row - current_row) * 'u'\r\n #path_side = (zero_col - current_col) * 'l'\r\n path_for_zero = (zero_row - self.current_position(1, target_col)[0]) * 'u' + (zero_col - self.current_position(1, target_col)[1]) * 'l'\r\n whole_move += path_for_zero\r\n zero_col -= len(filter(lambda x: x=='l', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(1, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (1, target_col)\r\n counter = 0\r\n while self.current_position(1, target_col) != \\\r\n (1, target_col) or (zero_row, zero_col) != (0, target_col):\r\n # print 'Welcome to while loop!'\r\n cyclic_moves = ''\r\n #### Case 3 if ZERO located in the left side of the target tile\r\n ### like in the owel-test case\r\n #current_position = self.current_position(1, target_col)\r\n current_col = self.current_position(1, target_col)[1]\r\n counter +=1\r\n if self.current_position(1, target_col) == \\\r\n (1, target_col):\r\n # print 'ZERO not under TT'\r\n cyclic_moves = 'ur'\r\n whole_move += cyclic_moves\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif zero_col < current_col and self._grid[zero_row+1][zero_col] < \\\r\n self._grid[self.current_position(1, target_col)[0]][self.current_position(1, target_col)[1]]:\r\n # print 'ZERO tile located in the left side and down move is POSIBLE'\r\n if current_col != target_col:\r\n # print 'not under the target place'\r\n cyclic_moves = 'drrul'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n elif current_col == target_col:\r\n # print 'Target tile under target place'\r\n cyclic_moves = 'dru'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif current_col != target_col and self._grid[zero_row+1][zero_col] > \\\r\n self._grid[self.current_position(1, target_col)[0]][self.current_position(1, target_col)[1]]:\r\n # print 'not under the target place'\r\n cyclic_moves = 'urrdl'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves)) \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n # elif zero_col < current_col and self._grid[zero_row+1][zero_col] > \\\r\n # self._grid[current_position[0]][current_position[1]]:\r\n # # print 'ZERO tile located in the left side and down move IS NOT POSIBLE'\r\n # if current_col != target_col:\r\n # # print 'not under the target place'\r\n # cyclic_moves = 'urrdl'\r\n # whole_move += cyclic_moves\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n # zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n # elif current_col == target_col:\r\n # # print 'Target tile under target place'\r\n # cyclic_moves = 'urd'\r\n # whole_move += cyclic_moves\r\n # zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n # zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n #cyclic_moves +='ur'\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n if counter > 10:\r\n break\r\n return whole_move", "def solve_row1_tile(self, target_col):\r\n moves_str = \"\"\r\n current_row, current_col = self.current_position(1, target_col)\r\n zero_row, zero_col = self.current_position(0, 0)\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"ur\"\r\n self.update_puzzle(moves_str)\r\n print \"solve_row1_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve_col0_tile(self, target_row):\n \n assert target_row > 1, \"target_row cannot be in rows 0 or 1.\"\n assert self.lower_row_invariant(target_row, 0), \"tiles to right and below incorrectly ordered\"\n\n # Move zero tile from target position (target_row, 0) to (target_row - 1, 1).\n self.update_puzzle(\"ur\")\n\n move_str = \"\"\n \n # correct_tile's position is determined after moving zero tile \"ur\" \n # because its position relative to zero tile may have changed as a result.\n correct_tile = self.current_position(target_row, 0)\n \n # Moves to reposition correct_tile to target position.\n if self.get_number(correct_tile[0], correct_tile[1]) != self.get_number(target_row, 0):\n move_str += str(self.position_tile(target_row - 1, 1, correct_tile))\n move_str += str(\"ruldrdlurdluurddlur\")\n\n # Moves to reposition zero tile to end of column of target_row + 1.\n move_str += str(\"r\" * (self.get_width() - 2)) \n \n self.update_puzzle(move_str)\n\n assert self.lower_row_invariant(target_row - 1, self.get_width() - 1) \n \n move_str = \"ur\" + move_str\n return move_str", "def solve_row0_tile(self, target_col):\r\n # replace with your code\r\n assert self.row0_invariant(target_col), 'Some trouble in row0_invariant' \r\n whole_move = ''\r\n #current_position = self.current_position(0, target_col)\r\n current_row, current_col = self.current_position(0, target_col)\r\n # print self.get_number(current_row, current_col)\r\n zero_row, zero_col = 0, target_col\r\n # print 'Target tile position=',self.current_position(0, target_col)\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (0, target_col)\r\n # print '------------------------------------------'\r\n # print 'Recommended move ld'\r\n \r\n recomended_move = 'ld'\r\n whole_move += recomended_move\r\n zero_col -= len(filter(lambda x: x=='l', recomended_move))\r\n zero_row += len(filter(lambda x: x=='d', recomended_move))\r\n self.update_puzzle(recomended_move)\r\n # print 'Grid after move:', recomended_move\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(0, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (0, target_col)\r\n #####Case when we check if recomended move solves the tile\r\n if self.current_position(0, target_col) == (0, target_col):\r\n # print 'Congrads recomended move made great deal !!'\r\n return whole_move\r\n #####If not, we position TT to (1, target_col-1),\r\n ##### and ZEOR to (1, target_col-2)\r\n else:\r\n # print '------------------------------'\r\n # print 'After base move we are do not finde puzzle'\r\n # print 'Lets move zero towards TT'\r\n \r\n ######Moving zero tile to the target tile\r\n path_up = (zero_row - current_row) * 'u'\r\n path_side = (zero_col - current_col) * 'l'\r\n path_for_zero = path_up + path_side\r\n whole_move += path_for_zero\r\n zero_col -= len(filter(lambda x: x=='l', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n \r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(0, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (0, target_col)\r\n counter = 0\r\n # print self.current_position(0, target_col) != (1, target_col-1)\r\n # print (zero_row,zero_col) != (1,target_col-2)\r\n ####POitioning TT and zero into positions that can be solvable\r\n while self.current_position(0, target_col) != (1, target_col-1) or \\\r\n (zero_row,zero_col) != (1,target_col-2):\r\n counter +=1\r\n #current_position = self.current_position(0, target_col)\r\n current_row, current_col = self.current_position(0, target_col)\r\n cyclic_moves = ''\r\n # print 'Aloha in the loop'\r\n if zero_col < current_col:\r\n # print 'ZERO tile located in the left side and down move IS NOT POSIBLE'\r\n \r\n\r\n if current_col != target_col-1 and zero_row == 0:\r\n # print 'In the upper row and we can use down cycling'\r\n cyclic_moves = 'drrul'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n\r\n elif current_col != target_col-1:\r\n # print 'not under the target place'\r\n cyclic_moves = 'urrdl'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n elif current_col == target_col-1:\r\n # print 'Target tile under target place'\r\n # print 'DEBUG!!!!'\r\n # print self\r\n # print zero_col, target_col\r\n if zero_col == 0 and current_col == 1:\r\n cyclic_moves = 'druld'\r\n elif zero_row == 0:\r\n cyclic_moves = 'druld'\r\n \r\n else:\r\n cyclic_moves = 'urd'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n elif zero_row > current_row:\r\n # print 'DEBUG'\r\n # print 'TT under zero tile'\r\n cyclic_moves = 'uld'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n \r\n if counter > 10:\r\n # print 'COUNTER break'\r\n break\r\n\r\n #####Solving using pattern 2 x 3 puzzle\r\n # print '--------------------------'\r\n # print 'Lets solve 2x3 puzzle formed recently'\r\n move2x3 = \"urdlurrdluldrruld\"\r\n whole_move += move2x3\r\n zero_col -= len(filter(lambda x: x=='l', move2x3))\r\n zero_col += len(filter(lambda x: x=='r', move2x3))\r\n zero_row += len(filter(lambda x: x=='d', move2x3))\r\n zero_row -= len(filter(lambda x: x=='u', move2x3))\r\n self.update_puzzle(move2x3)\r\n # print self\r\n assert self.row1_invariant(target_col-1), 'Some trouble in row1_invariant' \r\n return whole_move", "def solve_row0_tile(self, target_col):\r\n # move the zero tile from position (0,j) to (1,j−1) \r\n # using the move string \"ld\" \r\n moves_str = \"\"\r\n moves_str += \"ld\"\r\n # check whether target tile is at position (0,j).\r\n temp_grid = Puzzle(self._height, self._width, self._grid)\r\n temp_grid.update_puzzle(moves_str) \r\n current_row, current_col = temp_grid.current_position(0, target_col)\r\n zero_row, zero_col = temp_grid.current_position(0, 0)\r\n \r\n # If target tile is not at position (0,j).\r\n # reposition the target tile to position (1,j−1) \r\n # with tile zero in position (1,j−2).\r\n if current_row != 0 or current_col != target_col:\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"urdlurrdluldrruld\"\r\n \r\n self.update_puzzle(moves_str)\r\n print \"solve_row0_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def position_tile(self, zero_row, zero_col, correct_tile):\n \n ans = \"\" \n vert_dist = abs(zero_row - correct_tile[0])\n horiz_dist = abs(zero_col - correct_tile[1])\n \n # Updates ans, the move string, based the correct_tile's\n # position relative to the target position.\n \n # SAME ROW\n if vert_dist == 0:\n # Left of target\n if zero_col > correct_tile[1]:\n # Moves zero tile left to correct_tile's position.\n ans += str(\"l\" * horiz_dist)\n # Moves correct_tile right to target position,\n # and moves zero tile to left of target position.\n if horiz_dist > 1:\n ans += str(\"urrdl\" * (horiz_dist - 1))\n # Right of target\n else:\n # Moves zero tile right to correct_tile's position.\n ans += str(\"r\" * horiz_dist)\n # Moves correct_tile left to target position,\n # and moves zero tile to left of target position.\n ans += str(\"ulldr\" * (horiz_dist - 1))\n ans += str(\"ulld\")\n \n # SAME COL\n elif horiz_dist == 0:\n # Moves zero tile up to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n # Moves correct_tile down to target position, \n # and moves zero tile to left of target position.\n if vert_dist > 1:\n ans += str(\"lddru\" * (vert_dist - 1))\n ans += str(\"ld\")\n \n # UPPER LEFT\n elif correct_tile[1] < zero_col:\n # Moves zero tile up and left to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n ans += str(\"l\" * horiz_dist)\n # Moves correct_tile right and down to target position,\n # and moves zero tile to left of target position.\n ans += str(\"drrul\" * (horiz_dist - 1))\n ans += str(\"druld\" * vert_dist)\n\n # UPPER RIGHT\n else:\n # Moves zero tile up and right to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n ans += str(\"r\" * horiz_dist)\n # This if-elif-else statement moves correct_tile left and down to target position.\n # If statement is only used when target position is in row 2.\n if vert_dist == 1 and correct_tile[0] == 0:\n ans += str(\"dllur\" * (horiz_dist - 1))\n ans += str(\"dluld\")\n # Elif statement used when correct_tile is in the row above target position.\n elif vert_dist == 1: \n ans += str(\"ulldr\" * (horiz_dist - 1))\n ans += str(\"ullddruld\")\n # Else statement used when correct_tile is 1+ rows above target position.\n else:\n ans += str(\"dllur\" * (horiz_dist - 1))\n ans += str(\"dlu\")\n ans += str(\"lddru\" * (vert_dist - 1))\n ans += str(\"ld\")\n \n return ans", "def solve_row0_tile(self, target_col):\n # replace with your code\n string = ''\n assert self.row0_invariant(target_col), 'False precondition'\n target = self.current_position(0, target_col)\n row_difference = target[0]\n col_difference = target_col - target[1]\n if row_difference == 0:\n if col_difference == 1:\n string += 'ld'\n elif col_difference > 1:\n string += 'l' * col_difference\n string += 'drrul' * (col_difference - 2)\n string += 'druld'\n string += 'urdlurrdluldrruld'\n elif row_difference == 1:\n if col_difference == 1:\n string += 'lld'\n string += 'urdlurrdluldrruld'\n elif col_difference > 1:\n string += 'ld'\n string += 'l' * (col_difference - 1)\n string += 'urrdl' * (col_difference - 2)\n string += 'urdlurrdluldrruld'\n print 'Row 0 Path', string\n self.update_puzzle(string)\n assert self.row1_invariant(target_col - 1), 'False string'\n return string", "def solve_row0_tile(self, target_col):\n assert target_col > 1, \"target_col must be > 1\"\n assert self.row0_invariant(target_col), \"tiles to right and below incorrectly ordered\"\n \n # Move zero tile from target position (0, target_col) to (1, target_col - 1) \n self.update_puzzle(\"ld\")\n \n move_str = \"\"\n\n # correct_tile's position is determined after moving zero tile \"ld\"\n # because its position relative to zero tile may have changed as a result.\n correct_tile = self.current_position(0, target_col) \n \n # Moves to reposition correct_tile to target position, and\n # the zero tile to (1, target_col - 1).\n if self.get_number(correct_tile[0], correct_tile[1]) != self.get_number(0, target_col):\n move_str += str(self.position_tile(1, target_col - 1, correct_tile))\n move_str += str(\"urdlurrdluldrruld\")\n\n self.update_puzzle(move_str)\n\n assert self.row1_invariant(target_col - 1), \"tiles to right and below incorrectly ordered\"\n \n move_str = \"ld\" + move_str\n return move_str", "def solve_puzzle(self):\n # replace with your code\n string = ''\n width = self._width\n height = self._height\n zero = self.current_position(0, 0)\n row_to_zero = height - 1 - zero[0]\n col_to_zero = width - 1 - zero[1]\n string += 'r' * col_to_zero\n string += 'd' * row_to_zero\n self.update_puzzle(string)\n if width == 2 and height == 2:\n string += self.solve_2x2()\n elif width > 2 and height == 2:\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n elif width == 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n string += self.solve_2x2()\n elif width > 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n #for row in range(height - 1, -1, -1):\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n return string", "def solve_puzzle(self):\n # move zero tile to the lower right corner\n row, col = self.current_position(0, 0)\n movements = \"d\" * (self.get_height() - 1 - row) + \"r\" * (\n self.get_width() - 1 - col)\n self.update_puzzle(movements)\n # solve rowid from 2 by row\n for row in range(self.get_height() - 1, 1, -1):\n for col in range(self.get_width() - 1, -1, -1):\n assert self.lower_row_invariant(row, col)\n if col == 0:\n movements += self.solve_col0_tile(row)\n assert self.lower_row_invariant(row - 1,\n self.get_width() - 1)\n else:\n movements += self.solve_interior_tile(row, col)\n assert self.lower_row_invariant(row, col - 1)\n # solve the uppermost two rows by column\n for col in range(self.get_width() - 1, 1, -1):\n for row in range(1, -1, -1):\n if row == 0:\n assert self.row0_invariant(col)\n movements += self.solve_row0_tile(col)\n assert self.row1_invariant(col - 1)\n else:\n assert self.row1_invariant(col)\n movements += self.solve_row1_tile(col)\n assert self.row0_invariant(col)\n movements += self.solve_2x2()\n return movements", "def solve_row0_tile(self, target_col):\r\n assert self.row0_invariant(target_col)\r\n move = \"ld\"\r\n self.update_puzzle(move)\r\n \r\n row, col = self.current_position(0, target_col)\r\n if row == 0 and col == target_col:\r\n return move\r\n else:\r\n move_to_target = self.move_to_target(1, target_col - 1, row, col)\r\n # 2x3 puzzle solver\r\n move_to_target += \"urdlurrdluldrruld\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n return move", "def solve_row1_tile(self, target_col):\n cur_row, cur_col = self.current_position(1, target_col)\n move_str = self.position_tile(1, target_col, cur_row, cur_col, need_ld=False)\n self.update_puzzle(move_str)\n return move_str", "def solve_row1_tile(self, target_col):\n assert target_col > 1, \"target_col must be > 1\"\n assert self.row1_invariant(target_col), \"tiles to right and below incorrectly ordered\"\n\n # Moves correct_tile to the target position (1, target_col),\n # and the zero tile above the target position at (0, target_col). \n correct_tile = self.current_position(1, target_col)\n move_str = self.position_tile(1, target_col, correct_tile) \n move_str += \"ur\"\n self.update_puzzle(move_str)\n\n assert self.row0_invariant(target_col)\n \n return move_str", "def solve_puzzle(self):\r\n moves_str = \"\"\r\n # move zero to the most botton right\r\n zero_row, zero_col = self.current_position(0, 0)\r\n downs = self._height - 1 - zero_row\r\n rights = self._width - 1 - zero_col\r\n for dummy_d in range(downs):\r\n moves_str += \"d\"\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n self.update_puzzle(moves_str)\r\n # Solve the bottom m−2 rows of the puzzle \r\n # in a row by row manner from bottom to top. \r\n # Each individual row will be solved in a right to left order.\r\n if self._height > 2 and self._width > 2:\r\n for row in range(self._height - 1, 1, -1):\r\n for col in range(self._width - 1, 0, -1):\r\n assert self.lower_row_invariant(row, col)\r\n moves_str += self.solve_interior_tile(row, col)\r\n assert self.lower_row_invariant(row, col - 1)\r\n assert self.lower_row_invariant(row, 0)\r\n moves_str += self.solve_col0_tile(row)\r\n assert self.lower_row_invariant(row - 1, self._width - 1)\r\n # Solve the rightmost n−2 columns of the top two rows\r\n # in a right to left order). \r\n # Each column consists of two unsolved positions \r\n # and will be solved in a bottom to top order.\r\n for col in range(self._width - 1, 1, -1):\r\n assert self.row1_invariant(col)\r\n moves_str += self.solve_row1_tile(col)\r\n assert self.row0_invariant(col)\r\n moves_str += self.solve_row0_tile(col)\r\n assert self.row1_invariant(col - 1)\r\n # Solve the upper left 2×2 portion of the puzzle directly.\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n \r\n elif self._height <=2 and self._width > 2:\r\n for col in range(self._width - 1, 1, -1):\r\n assert self.row1_invariant(col)\r\n moves_str += self.solve_row1_tile(col)\r\n assert self.row0_invariant(col)\r\n moves_str += self.solve_row0_tile(col)\r\n assert self.row1_invariant(col - 1)\r\n # Solve the upper left 2×2 portion of the puzzle directly.\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n elif self._height <= 2 and self._width <= 2:\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n #elif self._height > 2 and self._width <= 2:\r\n \r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve_interior_tile(self, target_row, target_col):\r\n whole_move = ''\r\n # replace with your code\r\n if self._grid[target_row][target_col] != 0:\r\n # print \"DEBUG CASE WHEN ZERO IN JOPA\"\r\n \r\n # print self\r\n # print 'Solwing tile', self._grid[target_row][target_col]\r\n \r\n # print 'Searchind indexes of ZERO'\r\n for row in self._grid:\r\n for col in row:\r\n if col == 0:\r\n zero_row, zero_col = self._grid.index(row), row.index(col)\r\n break\r\n # print 'ZERO indexes=', (zero_row, zero_col)\r\n #####Moving zero to correct place\r\n #path_down = (target_row - zero_row) * 'd'\r\n #path_left = (zero_col - target_col) * 'l'\r\n if target_col - zero_col > 0:\r\n #path_right = (target_col - zero_col) * 'r'\r\n path_of_zero = (zero_col - target_col) * 'l' + (target_row - zero_row) * 'd' + (target_col - zero_col) * 'r'\r\n else:\r\n path_of_zero = (zero_col - target_col) * 'l' + (target_row - zero_row) * 'd'\r\n zero_col -= len(filter(lambda x: x=='l', path_of_zero))\r\n zero_col += len(filter(lambda x: x=='r', path_of_zero))\r\n zero_row += len(filter(lambda x: x=='d', path_of_zero))\r\n self.update_puzzle(path_of_zero)\r\n # print 'Grid after moving ZERO to target spot'\r\n # print self\r\n whole_move += path_of_zero\r\n assert self.lower_row_invariant(target_row, target_col), \"Invarian is False\"\r\n \r\n #current_position = self.current_position(target_row, target_col)\r\n #current_row, current_col = self.current_position(target_row, target_col)\r\n # print 'Target tile position=',current_position\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (target_row, target_col)\r\n \r\n ######Moving zero tile to the target tile\r\n path_up = (target_row - self.current_position(target_row, target_col)[0]) * 'u'\r\n zero_row = target_row - len(path_up)\r\n if target_col < self.current_position(target_row, target_col)[1]: # Right move\r\n path_side = (self.current_position(target_row, target_col)[1] - target_col) * 'r'\r\n zero_col = target_col + len(path_side)\r\n else: # Left move\r\n path_side = (target_col - self.current_position(target_row, target_col)[1]) * 'l'\r\n zero_col = target_col - len(path_side)\r\n \r\n #path_for_zero = path_up + path_side\r\n # print '------------------------------------------'\r\n # print 'Move to ZERO =', path_for_zero\r\n \r\n self.update_puzzle(path_up + path_side)\r\n \r\n # print 'Grid after move:'\r\n # print self\r\n # current_position = self.current_position(target_row, target_col) \r\n # current_row, current_col = current_position\r\n # print 'Updated Target tile position=',current_position\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, target_col)\r\n # print '-----------------------------------------'\r\n \r\n\r\n ###### New moves after moving ZERO tile into target tile\r\n # counter = 0\r\n whole_move += path_up + path_side\r\n while self.current_position(target_row, target_col) != \\\r\n (target_row, target_col) or zero_col != target_col - 1:\r\n # tt_in_home = self.current_position(target_row, target_col) == \\\r\n # (target_row, target_col)\r\n\r\n cyclic_moves = ''\r\n # counter += 1\r\n #current_position = self.current_position(target_row, target_col) \r\n #current_col = self.current_position(target_row, target_col)[1]\r\n # print 'Zero coloumn', zero_col, '== Target coloumn', target_col\r\n # print zero_col == target_col \r\n \r\n #### Case 1 if ZERO located in ther right of\r\n #### target tile (after it)\r\n if zero_col > self.current_position(target_row, target_col)[1]:\r\n # print ' Look in the up puzzle, zero on the right side'\r\n # if self.current_position(target_row, target_col)[1] != target_col:\r\n # # print 'not under target place'\r\n # cyclic_moves = 'dllur'\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n if self.current_position(target_row, target_col)[1] == target_col and self._grid[zero_row+1][zero_col] < \\\r\n self._grid[self.current_position(target_row, target_col)[0]][self.current_position(target_row, target_col)[1]]:\r\n # print 'Tile tat is under ZERO is',self._grid[zero_row+1][zero_col] \r\n # print 'TT under target place'\r\n cyclic_moves = 'dlu'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n #zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # elif self._grid[zero_row+1][zero_col] > \\\r\n # self._grid[self.current_position(target_row, target_col)[0]][self.current_position(target_row, target_col)[1]]:\r\n # # print 'Tile under zero is illegal to move and we use upper cycle move '\r\n \r\n # cyclic_moves = 'ul'\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n #### Case 2 if ZERO located under the target tile, and both\r\n #### of them located under the target position of the target tile\r\n elif zero_col == self.current_position(target_row, target_col)[1] and zero_col == target_col:\r\n # print 'Both under the target place'\r\n # print 'TT in home=', tt_in_home\r\n if self.current_position(target_row, target_col) == \\\r\n (target_row, target_col):\r\n cyclic_moves = 'ld'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n #zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n \r\n else:\r\n cyclic_moves = 'lddru'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n \r\n #### Case 3 if ZERO located in the left side of the target tile\r\n ### like in the owel-test case\r\n elif zero_col < self.current_position(target_row, target_col)[1]:\r\n # print 'ZERO tile located in the left side'\r\n if self.current_position(target_row, target_col)[1] != target_col:\r\n # print 'not under the target place'\r\n cyclic_moves = 'drrul'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif self.current_position(target_row, target_col)[1] == target_col:\r\n # print 'Target tile under target place'\r\n cyclic_moves = 'dru'\r\n #zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loot counter =',counter\r\n whole_move += cyclic_moves\r\n # if counter > 12:\r\n # break\r\n # print 'Tile is solved with move '+ whole_move\r\n assert self.lower_row_invariant(target_row, target_col-1), \"Invarian is False\"\r\n return whole_move", "def solve_2x2(self):\n # move zero tile to the left-up corner\n self.update_puzzle(\"lu\")\n movements = \"rdlu\"\n for _ in range(3):\n self.update_puzzle(\"rdlu\")\n if self.row0_invariant(0):\n return \"lu\" + movements\n movements += \"rdlu\"\n # the final 2x2 cannot be solved\n return \"\"", "def solve_row1_tile(self, target_col):\r\n row, col = self.current_position(1, target_col)\r\n move = self.move_to_target(1, target_col, row, col)\r\n # for next move\r\n move += \"ur\"\r\n \r\n self.update_puzzle(move)\r\n return move", "def solve_row1_tile(self, target_col):\n # replace with your code\n print target_col\n assert self.row1_invariant(target_col), 'False precondition'\n string = ''\n target = self.current_position(1, target_col)\n row_difference = 1 - target[0]\n col_difference = target_col - target[1]\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'l' * col_difference\n if row_difference == 0:\n string += 'urrdl' * (col_difference - 1)\n string += 'ur'\n elif row_difference == 1:\n string += 'drrul' * (col_difference - 1)\n string += 'dru'\n elif col_difference < 0:\n col_difference = abs(col_difference)\n string += 'r' * col_difference\n string += 'dllur' * (col_difference - 1)\n string += 'dlu'\n print 'Row 1 Path', string\n self.update_puzzle(string)\n assert self.row0_invariant(target_col), 'False string'\n return string", "def move_tile(self, target_row, target_col, val):\n # a little bit twisted here for the use of both solve_interior_tile and solve_col0_tile\n solved_row, solved_col = self.current_position(0, val)\n movements = \"\"\n if solved_row == target_row and solved_col == target_col:\n return \"\"\n if solved_row == target_row:\n if target_col > solved_col:\n movements = \"l\" * (target_col - solved_col) + \"urrdl\" * (\n target_col - solved_col - 1)\n else:\n movements = \"r\" * (solved_col - target_col) + \"ulldr\" * (\n solved_col - target_col - 1) + \"ulld\"\n elif solved_col == target_col:\n movements = \"u\" * (target_row - solved_row) + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n elif solved_col < target_col:\n if solved_col == 0:\n movements = \"l\" * (target_col - solved_col) + \"u\" * (\n target_row - solved_row) + \"rddlu\" * (\n target_row - solved_row - 1) + \"rdl\" + \"urrdl\" * (\n target_col - solved_col - 1)\n else:\n movements = \"l\" * (target_col - solved_col) + \"u\" * (\n target_row - solved_row) + \"lddru\" * (\n target_row - solved_row - 1) + \"rdl\" + \"urrdl\" * (\n target_col - solved_col - 1)\n elif solved_col > target_col:\n if solved_row == 0:\n movements = \"u\" * (target_row - solved_row) + \"r\" * (\n solved_col - target_col) + \"dllur\" * (\n solved_col - target_col - 1) + \"dlu\" + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n else:\n movements = \"u\" * (target_row - solved_row) + \"r\" * (\n solved_col - target_col) + \"ulldr\" * (\n solved_col - target_col - 1) + \"ullddru\" + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n return movements", "def solve_interior_tile(self, target_row, target_col):\r\n assert self._grid[target_row][target_col] == 0\r\n moves_str = \"\"\r\n target_current_row, target_current_col = self.current_position(target_row, target_col)\r\n \r\n moves_str += self.position_tile(target_row, target_col, target_current_row, target_current_col) \r\n \r\n self.update_puzzle(moves_str)\r\n print \"solve_interior_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve_row1_tile(self, target_col):\n movements = self.solve_interior_tile(1, target_col)\n self.update_puzzle(\"ur\")\n return movements + \"ur\"", "def solve_interior_tile(self, target_row, target_col):\n cur_row, cur_col = self.current_position(target_row, target_col)\n move_str = self.position_tile(target_row, target_col, cur_row, cur_col)\n self.update_puzzle(move_str)\n return move_str", "def solve_interior_tile(self, target_row, target_col):\n \n assert target_row > 1, \"target_row cannot be in rows 0 or 1.\"\n assert self.lower_row_invariant(target_row, target_col), \"tiles to right and below incorrectly ordered\"\n \n correct_tile = self.current_position(target_row, target_col) \n move_str = self.position_tile(target_row, target_col, correct_tile) \n self.update_puzzle(move_str)\n \n assert self.lower_row_invariant(target_row, target_col - 1), \"tiles to right and below incorrectly ordered\"\n \n return move_str", "def solve_2x2(self):\n cur_row, cur_col = self.current_position(0, 0)\n move_str = 'u' * cur_row + 'l' * cur_col\n self.update_puzzle(move_str)\n if self.check_2x2_solved():\n return move_str\n else:\n while not self.check_2x2_solved():\n move_str += 'rdlu'\n self.update_puzzle('rdlu')\n return move_str", "def solve_puzzle(self):\r\n \r\n counter = 0\r\n rows = self._height-1\r\n cols = self._width-1\r\n # print rows, cols\r\n # print 'The greed has %s rows and %s coloumn indexes' %(rows, cols) \r\n solution_move = ''\r\n if self.get_number(0,0) == 0 and \\\r\n self.get_number(0,1) == 1:\r\n # print 'Congrads Puxxle is Aolved at start!!!!!'\r\n return ''\r\n #appropriate_number = (self._height * self._width) - 1\r\n appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'First appropriate_number=',appropriate_number\r\n # print \"Grid first tile that we will solwing has value =\", self._grid[rows][cols]\r\n \r\n while counter < 300:\r\n counter +=1\r\n # print self\r\n #appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'Appropriate number in loop=',appropriate_number\r\n # print 'We are solving %s index_row and %s index_col' %(rows, cols) \r\n ####Case when we use solve_interior_tile\r\n if rows > 1 and cols > 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n cols -= 1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solving interior tile', (rows, cols)\r\n solution_move += self.solve_interior_tile(rows, cols)\r\n # print 'Solution move=', solution_move\r\n cols -= 1\r\n #### Case when we use solve_col0_tile\r\n elif rows > 1 and cols == 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n cols = self._width-1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solwing tile 0 in row', rows\r\n # print 'Appropriate number here ='\r\n solution_move += self.solve_col0_tile(rows)\r\n # print 'Solution move=', solution_move\r\n rows -=1\r\n cols = self._width-1\r\n\r\n\r\n #### Cases when we use solve_row0_tile\r\n elif rows == 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n #cols = self._width-1\r\n appropriate_number -= self._width\r\n\r\n else:\r\n # print 'Solving upper 2 rows right side'\r\n solution_move += self.solve_row1_tile(cols)\r\n rows -=1\r\n appropriate_number -= self._width\r\n #### Cases when we use solve_row1_tile \r\n if rows < 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows += 1\r\n cols -= 1\r\n appropriate_number +=self._width-1\r\n else:\r\n # print '(1,J) tile solved, lets solwe tile (0,j) in tile',(rows,cols)\r\n # print 'Greed after move solve_row1_tile'\r\n # print self\r\n solution_move += self.solve_row0_tile(cols)\r\n rows +=1\r\n cols -=1\r\n appropriate_number +=self._width-1\r\n\r\n\r\n #### Case when we use solve_2x2\r\n elif rows <= 1 and cols <= 1:\r\n # print 'We are solving 2x2 puzzle'\r\n solution_move += self.solve_2x2()\r\n if self._grid[0][0] == 0 and \\\r\n self._grid[0][1] == 1:\r\n # print 'Congrads Puxxle is SOLVED!!!!!'\r\n break\r\n\r\n\r\n\r\n\r\n if counter > 100:\r\n # print 'COUNTER BREAK'\r\n break\r\n # print solution_move, len(solution_move)\r\n return solution_move\r\n\r\n\r\n\r\n\r\n\r\n\r\n # for row in solution_greed._grid[::-1]:\r\n # print solution_greed._grid\r\n # print 'Row =',row\r\n \r\n # if solution_greed._grid.index(row) > 1:\r\n # print \"Case when we solwing Interior and Tile0 part\"\r\n \r\n\r\n # for col in solution_greed._grid[solution_greed._grid.index(row)][::-1]:\r\n # print 'Coloumn value=', col\r\n #print row[0]\r\n # if col !=row[0]:\r\n # print 'Case when we use just Interior tile solution'\r\n # print solution_greed._grid.index(row)\r\n # print row.index(col)\r\n \r\n # solution += solution_greed.solve_interior_tile(solution_greed._grid.index(row) , row.index(col))\r\n # print 'Solution =', solution\r\n # print self \r\n # print solution_greed._grid\r\n # elif col ==row[0]:\r\n # print 'Case when we use just Col0 solution'\r\n\r\n # else:\r\n # print 'Case when we solwing first two rows'\r\n\r\n #return \"\"\r", "def solve(self, board: List[List[str]]) -> None:\n n = len(board)\n if n == 0:\n return\n m = len(board[0])\n for i in range(m):\n self.traverse(board, 0, i, n, m)\n self.traverse(board, n - 1, i, n, m)\n for i in range(n):\n self.traverse(board, i, 0, n, m)\n self.traverse(board, i, m - 1, n, m)\n for i in range(n):\n for j in range(m):\n board[i][j] = 'X' if board[i][j] == 'O' else board[i][j]\n for i in range(n):\n for j in range(m):\n board[i][j] = 'O' if board[i][j] == '_' else board[i][j]", "def solve_2x2(self):\n \n assert self.get_number(1,1) == 0, \"zero tile should be at row 1, col 1\"\n assert self.row1_invariant(1), \"tiles to right and below incorrectly ordered\"\n \n # Moves the zero tile to (0,0).\n self.update_puzzle(\"lu\")\n\n # Repositions the upper left 2x2 part up to 3 times, \n # each time checking whether the puzzle is solved.\n rotation_num = 0\n if self.row0_invariant(0) == False:\n for dummy_rotation in range(3):\n while self.row0_invariant(0) == False:\n rotation_num += 1\n self.update_puzzle(\"rdlu\")\n\n assert self.row0_invariant(0), \"tiles to right and below incorrectly ordered\"\n \n move_str = \"lu\" + (\"rdlu\" * rotation_num) \n return move_str", "def solve_interior_tile(self, target_row, target_col):\n # replace with your code\n string = ''\n target = self.current_position(target_row, target_col)\n row_difference = target_row - target[0]\n #print 'Row difference', row_difference\n col_difference = target_col - target[1]\n #print 'Col difference', col_difference\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'l' * col_difference\n if row_difference == 0 and col_difference > 1:\n string += 'urrdl' * (col_difference - 1)\n if row_difference == 1:\n string += 'urrdl' * (col_difference - 1)\n string += 'dru'\n if row_difference > 1:\n string += 'drrul' * (col_difference - 1)\n string += 'dru'\n elif col_difference < 0:\n col_difference = abs(col_difference)\n string += 'r' * col_difference\n if row_difference == 1:\n string += 'ulldr' * (col_difference - 1)\n string += 'ullddru'\n if row_difference > 1:\n string += 'dllur' * (col_difference - 1)\n string += 'dlu'\n string += 'lddru' * (row_difference - 1)\n if row_difference > 0:\n string += 'ld'\n print 'Interior Path', string\n self.update_puzzle(string)\n assert self.lower_row_invariant(target_row, target_col - 1), 'False string'\n return string", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction\n for row in self._grid:\n print row\n print", "def solve_2x2(self):\r\n # replace with your code\r\n assert self.row1_invariant(1), '2x2 Dont pass row1_invariant(1)'\r\n whole_move = ''\r\n current_position = self.current_position(0, 0)\r\n # print 'Zero position =', current_position\r\n counter = 0\r\n \r\n\r\n \r\n # if current_position == (0,0):\r\n # print (0,0)\r\n # move_to_00 = 'rdlu' \r\n if current_position == (0,1):\r\n # print (0,1)\r\n move_to_00 = 'l'\r\n if current_position == (1,0):\r\n # print (1,0)\r\n move_to_00 = 'u'\r\n if current_position == (1,1):\r\n # print (1,1)\r\n move_to_00 = 'ul'\r\n whole_move += move_to_00\r\n self.update_puzzle(move_to_00)\r\n # print self\r\n # print self.get_number(1,1) < self.get_number(1,0)\r\n \r\n while self.get_number(0,0) != 0 or self.get_number(0,1) != 1:\r\n \r\n # print 'Aloha in loop!'\r\n counter +=1\r\n move = 'rdlu'\r\n whole_move += move\r\n self.update_puzzle(move)\r\n # print self\r\n if counter >5:\r\n break\r\n return whole_move", "def solve(self, board) -> None:\n if board == [[]] or board == []:\n return\n\n r, c = len(board), len(board[0])\n\n from collections import deque\n queue = deque()\n for i in range(r):\n for j in range(c):\n if (i == 0 or i == r - 1 or j == 0 or j == c - 1) and board[i][j] == 'O':\n queue.append([i, j])\n board[i][j] = 'M'\n\n while queue:\n i, j = queue.popleft()\n for x, y in ((i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1)):\n if 0 <= x <= r - 1 and 0 <= y <= c - 1 and board[x][y] == 'O':\n board[x][y] = 'M'\n queue.append([x, y])\n\n for i in range(r):\n for j in range(c):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n elif board[i][j] == 'M':\n board[i][j] = 'O'", "def human_go(self, board):\r\n coord_pattern = re.compile(\"[0-{}]$\".format(board.shape[1]))\r\n print(\"Enter Column and press enter.\")\r\n input_str = input(\"(from 0-6)\\n\")\r\n if not coord_pattern.match(input_str):\r\n print(\"That is not in the right format, please try again...\")\r\n return self.human_go(board)\r\n else:\r\n col = int(input_str)\r\n if board[0][col] != 0:\r\n print(\"That column is already full, please try again\")\r\n self.human_go()\r\n else:\r\n for row in board[::-1]:\r\n if row[col] == 0:\r\n row[col] = -1\r\n return board", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][\n zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][\n zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def solve(self, board: List[List[str]]) -> None:\n if board == [] or board == [[]]: # corner case\n return\n\n r, c = len(board), len(board[0])\n\n def dfs(i, j): # visited i, j neighbors and change o to M\n if i < 0 or i > r - 1 or j < 0 or j > c - 1 or board[i][j] == 'X' or board[i][j] == 'M':\n return\n\n board[i][j] = 'M'\n dfs(i - 1, j)\n dfs(i + 1, j)\n dfs(i, j - 1)\n dfs(i, j + 1)\n\n for i in range(r):\n for j in range(c):\n if (i == 0 or i == r - 1 or j == 0 or j == c - 1) and board[i][j] == 'O':\n dfs(i, j)\n\n for i in range(r):\n for j in range(c):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n elif board[i][j] == 'M':\n board[i][j] = 'O'", "def solve(self, board: List[List[str]]) -> None:\n if board is None or len(board) == 0:\n return \n row, col = len(board), len(board[0])\n for i in range(row):\n self.dfs(board, i, 0)\n self.dfs(board, i, col - 1)\n for j in range(col):\n self.dfs(board, 0, j)\n self.dfs(board, row-1, j)\n for i in range(row):\n for j in range(col):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n if board[i][j] == '-':\n board[i][j] = 'O'", "def position_tile(self, target_row, target_col, current_row, current_col):\r\n moves_str = \"\"\r\n # current target is on the upper of 0\r\n if current_col == target_col and current_row < target_row:\r\n ups = target_row - current_row\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n for dummy_cycle in range(ups - 1):\r\n moves_str += CIRCLES[\"LEFT_CIRCLE\"]\r\n moves_str += \"ld\"\r\n # current target is on the left of 0\r\n elif current_row == target_row and current_col < target_col:\r\n lefts = target_col - current_col\r\n for dummy_l in range(lefts):\r\n moves_str += \"l\"\r\n for dummy_cycle in range(lefts - 1):\r\n moves_str += CIRCLES[\"UP_CIRCLE\"]\r\n # current target is on the upperleft of 0\r\n elif current_row < target_row and current_col < target_col:\r\n ups = target_row - current_row\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n lefts = target_col - current_col\r\n for dummy_l in range(lefts):\r\n moves_str += \"l\"\r\n for dummy_cycle in range(lefts - 1):\r\n if current_row <= 0: # can not go up\r\n moves_str += CIRCLES[\"DOWN_CIRCLE\"]\r\n else:\r\n moves_str += CIRCLES[\"UP_CIRCLE\"]\r\n moves_str += \"dru\"\r\n for dummy_cycle in range(ups - 1):\r\n moves_str += CIRCLES[\"LEFT_CIRCLE\"]\r\n moves_str += \"ld\"\r\n # current target is on the upperright of 0\r\n elif current_col > target_col and current_row < target_row:\r\n ups = target_row - current_row\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n rights = current_col - target_col\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n for dummy_cycle in range(rights - 1):\r\n if current_row <= 0: # can not go up\r\n moves_str += CIRCLES[\"DOWN_LEFT_CIRCLE\"]\r\n else:\r\n moves_str += CIRCLES[\"UP_LEFT_CIRCLE\"] \r\n moves_str += \"dlu\"\r\n for dummy_cycle in range(ups - 1):\r\n moves_str += CIRCLES[\"LEFT_CIRCLE\"]\r\n moves_str += \"ld\"\r\n # current target is on the right of 0\r\n elif current_col > target_col and current_row == target_row:\r\n rights = current_col - target_col\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n for dummy_cycle in range(rights - 1):\r\n if current_row <= 0: # can not go up\r\n moves_str += CIRCLES[\"DOWN_LEFT_CIRCLE\"]\r\n else:\r\n moves_str += CIRCLES[\"UP_LEFT_CIRCLE\"] \r\n moves_str += \"ulld\"\r\n return moves_str", "def solve(self, board: List[List[str]]) -> None:\n if not board:\n return\n m, n = len(board), len(board[0])\n\n def dfs(x, y):\n if not 0 <= x < m or not 0 <= y < n or board[x][y] != \"O\":\n return\n direction = [[1, 0], [0, 1], [-1, 0], [0, -1]]\n board[x][y] = \"Y\"\n for d in direction:\n dfs(x+d[0], y+d[1])\n\n for i in range(m):\n dfs(i, 0)\n dfs(i, n-1)\n for i in range(1, n-1):\n dfs(0, i)\n dfs(m-1, i)\n\n for i in range(m):\n for j in range(n):\n if board[i][j] == \"O\":\n board[i][j] = \"X\"\n elif board[i][j] == \"Y\":\n board[i][j] = \"O\"", "def solve_2x2(self):\r\n moves_str = \"\"\r\n # move zero to the most upper left\r\n zero_row, zero_col = self.current_position(0, 0)\r\n ups = zero_row - 0\r\n lefts = zero_col - 0\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n for dummy_l in range(lefts):\r\n moves_str += \"l\"\r\n \r\n # get zero, one, two, three positions\r\n self.update_puzzle(moves_str)\r\n zero_row, zero_col = self.current_position(0, 0)\r\n one_row, one_col = self.current_position(0, 1)\r\n two_row, two_col = self.current_position(1, 0)\r\n three_row, three_col = self.current_position(1, 1)\r\n counter = 0\r\n while counter <= 3 and \\\r\n (zero_row != 0 or zero_col != 0 or \\\r\n one_row!= 0 or one_col != 1 or \\\r\n two_row != 1 or two_col != 0 or \\\r\n three_row != 1 or three_col != 1):\r\n move = CIRCLES[\"RIGHT_CIRCLES\"]\r\n moves_str += move\r\n self.update_puzzle(move)\r\n counter += 1\r\n zero_row, zero_col = self.current_position(0, 0)\r\n one_row, one_col = self.current_position(0, 1)\r\n two_row, two_col = self.current_position(1, 0)\r\n three_row, three_col = self.current_position(1, 1)\r\n \r\n print \"solve_2x2\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve(self, board: List[List[str]]) -> None:\n rows = len(board)\n cols = len(board[0])\n\n def explore(i, j):\n if i < 0 or i >= rows:\n return\n if j < 0 or j >= cols:\n return\n if board[i][j] != \"O\":\n return\n board[i][j] = \"Y\"\n explore(i + 1, j)\n explore(i - 1, j)\n explore(i, j + 1)\n explore(i, j - 1)\n\n for i in [0, rows - 1]:\n for j in range(cols):\n explore(i, j)\n\n for j in [0, cols - 1]:\n for i in range(rows):\n explore(i, j)\n\n for i in range(rows):\n for j in range(cols):\n if board[i][j] == \"Y\":\n board[i][j] = \"O\"\n elif board[i][j] == \"O\":\n board[i][j] = \"X\"", "def solve_2x2(self):\r\n assert self.row1_invariant(1)\r\n pos_1_0 = self.get_number(1, 0)\r\n pos_0_0 = self.get_number(0, 0)\r\n pos_0_1 = self.get_number(0, 1)\r\n # create grid and solve individual cases\r\n grid = [pos_1_0, pos_0_0, pos_0_1]\r\n if grid == [self.get_width(), 1, self.get_width() + 1]:\r\n move = \"ul\"\r\n elif grid == [1, self.get_width() + 1, self.get_width()]:\r\n move = \"lurdlu\"\r\n elif grid == [self.get_width() + 1, self.get_width(), 1]:\r\n move = \"lu\"\r\n self.update_puzzle(move)\r\n return move", "def solve(self, board: List[List[str]]) -> None:\r\n if not board:\r\n return\r\n m, n = len(board), len(board[0])\r\n board_filter = lambda p: 0 <= p[0] < m and 0 <= p[1] < n and board[p[0]][p[1]] == 'O'\r\n queue = list(filter(board_filter, [(x, y) for r in range(max(m, n)) \r\n for x, y in ((r, 0), (r, n-1), (0, r), (m-1, r))]))\r\n while queue:\r\n x, y = queue.pop()\r\n board[x][y] = 'W'\r\n queue.extend(list(filter(board_filter, ((x-1, y), (x+1, y), (x, y-1), (x, y+1)))))\r\n for row in board:\r\n for i, c in enumerate(row):\r\n row[i] = 'XO'[c=='W']", "def solve(self, board: List[List[str]]) -> None:", "def solve(self, board: List[List[str]]) -> None:\n if len(board) is 0: return\n width = len(board[0]) - 1\n height = len(board) - 1\n def help(x: int, y: int):\n if x < 0 or x > height or y < 0 or y > width:\n return\n if board[x][y] is \"O\":\n board[x][y] = \"g\"\n help(x - 1, y)\n help(x, y - 1)\n help(x + 1, y)\n help(x, y + 1)\n\n for i in range(width + 1):\n if board[0][i] is \"O\":\n help(0, i)\n if board[height][i] is \"O\":\n help(height, i)\n for i in range(1, height):\n if board[i][0] is \"O\":\n help(i, 0)\n if board[i][width] is \"O\":\n help(i, width)\n\n print(board)\n for i in range(width + 1):\n for j in range(height + 1):\n if board[j][i] is \"O\":\n board[j][i] = \"X\"\n elif board[j][i] is \"g\":\n board[j][i] = \"O\"", "def solve(self, board):\r\n if not board or not board[0]:\r\n return\r\n \r\n self.m = len(board)\r\n self.n = len(board[0])\r\n boarder = []\r\n \r\n # Collecting all the 'O' on the boarder\r\n for i in range(self.m):\r\n if board[i][0] == 'O':\r\n boarder.append([i, 0])\r\n if board[i][self.n-1] == 'O':\r\n boarder.append([i, self.n-1])\r\n for j in range(self.n):\r\n if board[0][j] == 'O':\r\n boarder.append([0, j])\r\n if board[self.m-1][j] == 'O':\r\n boarder.append([self.m-1, j])\r\n \r\n for row, col in boarder:\r\n self.BFS(board, row, col)\r\n \r\n for row in range(self.m):\r\n for col in range(self.n):\r\n if board[row][col] == 'O':\r\n board[row][col] = 'X'\r\n elif board[row][col] == 'E':\r\n board[row][col] = 'O'\r\n print(board)", "def solve(self, board: List[List[str]]) -> None:\n if len(board) == 0:\n return\n self.h = len(board)\n self.w = len(board[0])\n self.board = board\n for i in range(self.h):\n for j in range(self.w):\n if i == 0 or i == self.h-1 or j == 0 or j == self.w-1:\n #print (i,j)\n self.dfs((i,j))\n for i in range(self.h):\n for j in range(self.w):\n if self.board[i][j]==\"O\":\n self.board[i][j]=\"X\"\n for i in range(self.h):\n for j in range(self.w):\n if self.board[i][j]==\"#\":\n self.board[i][j]=\"O\"", "def solve(self, board: List[List[str]]) -> None:\n def dfs(board, i, j):\n m = len(board)\n n = len(board[0])\n if i < 0 or i >= m or j < 0 or j >= n: return\n\n if board[i][j] != 'O': return\n\n board[i][j] = '#'\n [dfs(board, i+di, j+dj) for di, dj in [(0, 1), (1, 0), (0, -1), (-1, 0)]]\n\n if len(board) == 0: return\n m = len(board)\n n = len(board[0])\n\n for i in range(m):\n dfs(board, i, 0)\n dfs(board, i, n-1)\n\n for j in range(n):\n dfs(board, 0, j)\n dfs(board, m-1, j)\n\n for i in range(m):\n for j in range(n):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n\n for i in range(m):\n for j in range(n):\n if board[i][j] == '#':\n board[i][j] = 'O'", "def solve(self, board: List[List[str]]) -> None:\n if len(board) <= 2 or len(board[0]) <= 2:\n return\n\n self.nRow, self.nCol = len(board), len(board[0])\n\n def helper(kr, kc):\n board[kr][kc] = '.'\n kr > 0 and board[kr - 1][kc] == 'O' and helper(kr - 1, kc)\n kr < self.nRow - 1 and board[kr + 1][kc] == 'O' and helper(kr + 1, kc)\n kc > 0 and board[kr][kc - 1] == 'O' and helper(kr, kc - 1)\n kc < self.nCol - 1 and board[kr][kc + 1] == 'O' and helper(kr, kc + 1)\n\n for kr in [0, self.nRow - 1]:\n for kc in range(self.nCol):\n if board[kr][kc] == 'O':\n helper(kr, kc)\n for kc in [0, self.nCol - 1]:\n for kr in range(self.nRow):\n if board[kr][kc] == 'O':\n helper(kr, kc)\n\n for kr in range(self.nRow):\n for kc in range(self.nCol):\n if board[kr][kc] == 'O':\n board[kr][kc] = 'X'\n elif board[kr][kc] == '.':\n board[kr][kc] = 'O'\n\n return", "def solve(self, board):\n def dfs(board, r, c):\n if r < 0 or c < 0 or r > rows - 1 or c > cols - 1 or board[r][c] == 'X' or board[r][c] == '#':\n return\n board[r][c] = '#'\n dfs(board, r - 1, c)\n dfs(board, r + 1, c)\n dfs(board, r, c - 1)\n dfs(board, r, c + 1)\n\n if len(board) == 0:\n return;\n rows = len(board)\n cols = len(board[0])\n for i in range(rows):\n for j in range(cols):\n if (i == 0 or j == 0 or i == rows - 1 or j == cols - 1) and board[i][j] == 'O':\n dfs(board, i, j)\n for i in range(rows):\n for j in range(cols):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n elif board[i][j] == '#':\n board[i][j] = 'O'", "def solve(self, board: List[List[str]]) -> None:\n if len(board) == 0: return\n\n m = len(board)\n n = len(board[0])\n\n uf = UnionFind(m * n + 1)\n dummy = m * n\n\n # connect 'O' at first and last col with dummy\n for i in range(m):\n if board[i][0] == 'O':\n uf.union(dummy, i * n)\n if board[i][-1] == 'O':\n uf.union(dummy, i * n + n - 1)\n\n # connect 'O' at first and last row with dummy\n for j in range(n):\n if board[0][j] == 'O':\n uf.union(dummy, j)\n if board[-1][j] == 'O':\n uf.union(dummy, n * (m-1) + j)\n\n d = [(1, 0), (0, 1), (0, -1), (-1, 0)]\n\n for i in range(1, m-1):\n for j in range(1, n-1):\n if board[i][j] == 'O':\n for di, dj in d:\n x = i+di\n y = j+dj\n if board[x][y] == 'O':\n uf.union(x*n+y, i*n+j)\n\n # change not connected 'O' with 'X'\n for i in range(1, m-1):\n for j in range(1, n-1):\n if not uf.connected(dummy, i * n + j):\n board[i][j] = 'X'", "def solve(self, board: 'List[List[str]]') -> 'None':\n if not board:\n return\n m, n = len(board), len(board[0])\n save = [ij for k in range(m + n) for ij in ((0, k), (m - 1, k), (k, 0), (k, n - 1))]\n while save:\n i, j = save.pop()\n if -1 < i < m and -1 < j < n and board[i][j] == 'O':\n board[i][j] = 'S'\n save += (i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1)\n for row in board:\n for i, c in enumerate(row):\n row[i] = 'XO'[c == 'S']", "def make_move(self, column):\r\n trans_board = numpy.transpose(self.__board[::1]) # transpose the\r\n # board so that columns are now arrays\r\n if 0 not in trans_board[column] or self.get_winner() or column >= \\\r\n self.BOARD_COLUMNS or column < 0:\r\n # column is full, illegal or the game is already finished\r\n return self.ILLEGAL_MOVE # exception?\r\n else:\r\n reversed_col = list(reversed(trans_board[column]))\r\n for hole in reversed_col:\r\n if hole == 0:\r\n row_i = self.BOARD_ROWS - 1 - reversed_col.index(hole)\r\n self.__board[row_i][column] = self.__cur_player\r\n winner = self.get_winner()\r\n if winner: # is not none\r\n return winner\r\n self.__switch_player()", "def move_to_target(self, target_row, target_col, row, col):\r\n move = \"\"\r\n # typical move to move target tile to target pos.\r\n solver_move = \"druld\"\r\n # move up first\r\n move = (target_row - row) * \"u\"\r\n # conditional statements for moving the tile:\r\n # 1. case curr_pos of tile and target_tile are in same col\r\n if (target_col - col) == 0:\r\n move += \"ld\" + ((target_row - row) - 1) * solver_move\r\n else:\r\n # 2. curr_pos of tile is on the left of target pos\r\n if (target_col - col) > 0:\r\n move += (target_col - col) * \"l\"\r\n if row == 0:\r\n move += (abs(target_col - col) - 1) * \"drrul\"\r\n else:\r\n move += (abs(target_col - col) - 1) * \"urrdl\"\r\n # 3. curr_pos of tile is on the right of target pos:\r\n elif (target_col - col) < 0:\r\n move += (abs(target_col - col) - 1) * \"r\"\r\n if row == 0:\r\n move += abs(target_col - col) * \"rdllu\"\r\n else:\r\n move += abs(target_col - col) * \"rulld\"\r\n move += (target_row - row) * solver_move\r\n return move", "def solve(self, board: List[List[str]]) -> None:\n if len(board) <= 2 or len(board[0]) <= 2:\n return\n\n nRow, nCol = len(board), len(board[0])\n\n def helper(kr, kc):\n board[kr][kc] = '.'\n kr > 0 and board[kr - 1][kc] == 'O' and helper(kr - 1, kc)\n kr < nRow - 1 and board[kr + 1][kc] == 'O' and helper(kr + 1, kc)\n kc > 0 and board[kr][kc - 1] == 'O' and helper(kr, kc - 1)\n kc < nCol - 1 and board[kr][kc + 1] == 'O' and helper(kr, kc + 1)\n\n for kr in [0, nRow - 1]:\n for kc in range(nCol):\n if board[kr][kc] == 'O':\n helper(kr, kc)\n for kc in [0, nCol - 1]:\n for kr in range(nRow):\n if board[kr][kc] == 'O':\n helper(kr, kc)\n\n for kr in range(nRow):\n for kc in range(nCol):\n if board[kr][kc] == 'O':\n board[kr][kc] = 'X'\n elif board[kr][kc] == '.':\n board[kr][kc] = 'O'\n\n return", "def move(self, row, col, player):\r\n if player == 1:\r\n self.mat[row][col] = 1\r\n else:\r\n self.mat[row][col] = -1\r\n if self.checkrow(player,row) or self.checkcol(player,col):\r\n return player\r\n if row == col or row + col == self.size-1:\r\n if self.checkdiag(player):\r\n return player\r\n return 0", "def solve(self, board: List[List[str]]) -> None:\n if not board:\n return\n x=len(board)\n y=len(board[0])\n visit=[[False if board[i][j]=='X' else True for j in range(y)] for i in range(x)]\n for i in range(x):\n for j in range(y):\n if visit[i][j] and board[i][j]=='O':\n queue=[[i,j]]\n visit[i][j]=False\n k=0\n surround=True\n while len(queue)>k:\n if queue[k][0]==0 or queue[k][0]==x-1 or queue[k][1]==y-1 or queue[k][1]==0:\n surround=False\n if queue[k][1]!=y-1 and visit[queue[k][0]][queue[k][1]+1]:\n queue.append([queue[k][0],queue[k][1]+1])\n visit[queue[k][0]][queue[k][1]+1]=False\n if queue[k][1]!=0 and visit[queue[k][0]][queue[k][1]-1]:\n queue.append([queue[k][0],queue[k][1]-1])\n visit[queue[k][0]][queue[k][1]-1]=False\n if queue[k][0]!=x-1 and visit[queue[k][0]+1][queue[k][1]]:\n queue.append([queue[k][0]+1,queue[k][1]])\n visit[queue[k][0]+1][queue[k][1]]=False\n if queue[k][0]!=0 and visit[queue[k][0]-1][queue[k][1]]:\n queue.append([queue[k][0]-1,queue[k][1]])\n visit[queue[k][0]-1][queue[k][1]]=False\n k+=1\n if surround:\n for i1,j1 in queue:\n board[i1][j1]='X'\n return", "def solve(self, board: List[List[str]]) -> None:\n def DFS(board, i, j):\n q = []\n q.append([i, j])\n \n while q:\n x, y = q.pop()\n board[x][y] = \"*\"\n neighbors = ((0, 1), (0, -1), (1, 0), (-1, 0))\n for dx, dy in neighbors:\n if 0 <= x + dx <= len(board) - 1 and 0 <= y + dy <= len(board[0]) - 1 and board[x + dx][y + dy] == \"O\":\n q.append([x + dx, y + dy])\n \n \n # first row\n i = 0\n for j in range(len(board[0])):\n if board[i][j] == \"O\":\n DFS(board, i, j)\n \n # last row\n i = len(board) - 1\n for j in range(len(board[0])):\n if board[i][j] == \"O\":\n DFS(board, i, j)\n \n # first column\n j = 0\n for i in range(len(board)):\n if board[i][j] == \"O\":\n DFS(board, i, j)\n \n # last column\n j = len(board[0]) - 1\n for i in range(len(board)):\n if board[i][j] == \"O\":\n DFS(board, i, j)\n \n \n for i in range(len(board)):\n for j in range(len(board[0])):\n if board[i][j] == \"O\":\n board[i][j] = \"X\"\n elif board[i][j] == \"*\":\n board[i][j] = \"O\"", "def move(self, row: int, col: int, player: int):\n def addup(dict_name, invalid_set, another_invalid, locx, locy):\n if locx == locy:\n diag_name = (1,1)\n if diag_name not in invalid_set:\n dict_name[diag_name] += 1\n if dict_name[diag_name] == self.tar:\n return player\n another_invalid.add(diag_name)\n if locx == self.tar-1-locy:\n diag_name = (-1, -1)\n if diag_name not in invalid_set:\n dict_name[diag_name] += 1\n if dict_name[diag_name] == self.tar:\n return player\n another_invalid.add(diag_name)\n curcol = (locy, None)\n currow = (None, locx)\n if curcol not in invalid_set:\n dict_name[curcol] += 1\n if dict_name[curcol] == self.tar:\n return player\n another_invalid.add(curcol)\n if currow not in invalid_set:\n dict_name[currow] += 1\n if dict_name[currow] == self.tar:\n return player\n another_invalid.add(currow)\n return 0\n res = 0\n if (row, col) not in self.walked:\n if player == 1:\n res = addup(self.p1, self.invalid_1, self.invalid_2, row, col)\n if player == 2:\n res = addup(self.p2, self.invalid_2, self.invalid_1, row, col)\n self.walked.add((row, col))\n return res", "def solve(self, board: List[List[str]]) -> None:\n\n def expansion(i,j):\n for di,dj in {(-1,0),(1,0),(0,-1),(0,1)}:\n if -1<i+di<m and -1<j+dj<n and state[i+di][j+dj]=='O':\n return True\n return False\n\n if not board:\n return board\n\n m = len(board)\n n = len(board[0]) \n state = [['X']*n for _ in range(m)]\n\n for j in range(n):\n state[0][j] = board[0][j]\n state[m-1][j] = board[m-1][j]\n \n for i in range(m):\n state[i][0] = board[i][0]\n state[i][n-1] = board[i][n-1]\n \n flag = 1\n\n while flag:\n flag = 0\n\n for k in range(1, (1+min(m,n))//2):\n for j in range(k,n-k):\n if board[k][j]=='O' and state[k][j] == 'X' and expansion(k,j):\n state[k][j] = 'O'\n flag = 1\n \n if board[m-1-k][j]=='O' and state[m-1-k][j] == 'X' and expansion(m-1-k,j):\n state[m-1-k][j] = 'O'\n flag = 1\n \n for i in range(k,m-k):\n if board[i][k]=='O' and state[i][k] == 'X' and expansion(i,k):\n state[i][k] = 'O'\n flag = 1\n \n if board[i][n-1-k]=='O' and state[i][n-1-k] == 'X' and expansion(i,n-1-k):\n state[i][n-1-k] = 'O'\n flag = 1\n\n board[:] = state[:]", "def update(self, move):\n\n if not 0 <= move < 7:\n raise InvalidMove\n\n placed = False\n x = None\n y = None\n\n for row in reversed(xrange(self._rows)):\n if not self._board[row][move]:\n self._board[row][move] = self.current_player\n placed = True\n x = move\n y = row\n break\n\n if not placed:\n raise InvalidMove\n\n return (x, y)", "def find_best_move(board):\n new_board = board.get_board()\n\n # X | X | X <-- Check for win on this row\n # ---------\n # 3 | 4 | 5\n # ---------\n # 6 | 7 | 9\n if new_board[0] == new_board[1] and new_board[2] == \"2\":\n return 2\n elif new_board[0] == new_board[2] and new_board[1] == \"1\":\n return 1\n elif new_board[1] == new_board[2] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | 2\n # ---------\n # X | X | X <-- Check for win on this row\n # ---------\n # 6 | 7 | 9\n elif new_board[3] == new_board[4] and new_board[5] == \"5\":\n return 5\n elif new_board[3] == new_board[5] and new_board[4] == \"4\":\n return 4\n elif new_board[4] == new_board[5] and new_board[3] == \"3\":\n return 3\n\n # 0 | 1 | 2\n # ---------\n # 3 | 4 | 5\n # ---------\n # X | X | X <-- Check for win on this row\n elif new_board[6] == new_board[7] and new_board[8] == \"8\":\n return 8\n elif new_board[6] == new_board[8] and new_board[7] == \"7\":\n return 7\n elif new_board[7] == new_board[8] and new_board[6] == \"6\":\n return 6\n\n # X | 1 | 2 Check for win on column one\n # ---------\n # X | 4 | 5\n # ---------\n # X | 7 | 9\n elif new_board[0] == new_board[3] and new_board[6] == \"6\":\n return 6\n elif new_board[0] == new_board[6] and new_board[3] == \"3\":\n return 3\n elif new_board[6] == new_board[3] and new_board[0] == \"0\":\n return 0\n\n # 0 | X | 2 Checks for win on column two\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | X | 9\n elif new_board[1] == new_board[4] and new_board[7] == \"7\":\n return 7\n elif new_board[1] == new_board[7] and new_board[4] == \"4\":\n return 4\n elif new_board[7] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | 4 | X\n # ---------\n # 6 | 7 | X\n elif new_board[2] == new_board[5] and new_board[8] == \"8\":\n return 8\n elif new_board[2] == new_board[8] and new_board[5] == \"5\":\n return 5\n elif new_board[8] == new_board[5] and new_board[2] == \"2\":\n return 2\n\n # X | 1 | 2\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | 7 | X\n elif new_board[0] == new_board[4] and new_board[8] == \"8\":\n return 8\n elif new_board[0] == new_board[8] and new_board[4] == \"4\":\n return 4\n elif new_board[8] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | X | 5\n # ---------\n # X | 7 | 9\n elif new_board[2] == new_board[4] and new_board[6] == \"6\":\n return 6\n elif new_board[2] == new_board[6] and new_board[4] == \"4\":\n return 4\n elif new_board[6] == new_board[4] and new_board[2] == \"2\":\n return 2\n\n # If corners are empty, play there\n elif new_board[0] == \"0\" or new_board[2] == \"2\" or new_board[6] == \"6\" or new_board[8] == \"8\":\n try_spot = 0\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2\n\n # If middle is empty, play there\n elif new_board[4] == \"4\":\n return 4\n\n # Finally if edges are empty try there\n elif new_board[1] == \"1\" or new_board[3] == \"3\" or new_board[5] == \"5\" or new_board[7] == \"7\":\n try_spot = 1\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2", "def solve(puzzle):\n print(\"Solving...\")\n array_puzzle = np.asarray(puzzle)\n array_puzzle.flags.writeable = False # Turn off writable flags to prevent data being ovewritten accidentally.\n goal_state = __generate_goal(len(array_puzzle[0]), len(array_puzzle))\n\n flat_puzzle = list(chain.from_iterable(puzzle)) # Flatten the list\n\n # If the puzzle doesn't contain 0, exit.\n try:\n flat_puzzle.remove(0) # Remove 0 from the list\n except:\n print(\"All puzzles must include an open tile (0).\")\n return None\n\n inversions = __count_inversions(flat_puzzle) # Count the inversions\n\n # width = len(array_puzzle[0]) # Get the width of the puzzle (columns)\n # length = len(array_puzzle) # Get the length of the puzzle (rows)\n\n oddEven = __odd_or_even(len(array_puzzle[0])) # Determine if the width is odd or even.\n start_position = __find_start(array_puzzle) # Find the start position's row\n solvable = __is_solvable(oddEven, inversions, len(array_puzzle), start_position) # Cleck if the puzzle is solvable.\n\n # If the puzzle is not solvable, return None.\n if(solvable == \"None\"):\n return None\n\n # If we cannot calculate a* (for example the given values are not all in sequential order (1-5) 4 is replaced by 6 (1,2,3,5,6))\n try:\n return __a_star(array_puzzle, goal_state)\n except:\n print(\"Please make sure there are no duplicate or skipped inputs.\")\n return None\n\n # This code was used in testing to print out the string.\n # solved = __a_star(array_puzzle, goal_state)\n # Return the moves needed to complete the puzzle.\n # return print(str(__build_string(solved)) + \" (\" + str(len(solved)) + \")\")", "def solve(self, board: 'List[List[str]]') -> 'None':\n\n def dfs(i, j, tmp):\n nonlocal flag\n if i < 0 or i >= len(board) or j < 0 or j >= len(board[0]):\n flag = False\n return\n if board[i][j] != 'O' or [i, j] in tmp:\n return\n tmp.append([i, j])\n dfs(i - 1, j, tmp)\n dfs(i + 1, j, tmp)\n dfs(i, j + 1, tmp)\n dfs(i, j - 1, tmp)\n return tmp\n\n change = []\n for i in range(len(board)):\n for j in range(len(board[0])):\n if board[i][j] == 'O' and [i, j] not in change:\n tmp = []\n flag = True\n tmp = dfs(i, j, tmp[:])\n if flag:\n for loc in tmp:\n i, j = loc[0], loc[1]\n board[i][j] = 'X'\n\n for loc in change:\n i, j = loc[0], loc[1]\n board[i][j] = 'X'", "def move(self, row: int, col: int, player: int) -> int:\n self.board[row][col] = 1 if player == 1 else -1\n rowsum = sum(self.board[row])\n colsum = sum([self.board[r][col] for r in range(self.n)])\n diagsum1 = sum([self.board[i][i] for i in range(self.n)])\n diagsum2 = sum([self.board[i][-i-1] for i in range(self.n)])\n if player == 1:\n if rowsum == self.n or colsum == self.n or diagsum1 == self.n or diagsum2 == self.n:\n return 1\n else:\n if rowsum == -self.n or colsum == -self.n or diagsum1 == -self.n or diagsum2 == -self.n:\n return 2\n return 0", "def solve(board):\r\n \r\n #An O(2mn) time solution; the first O(mn) traversal is to preform a bfs on all tiles attached to edge 'O' tiles (can't convert to 'X's); the second is to convert all remaining 'O's into 'X's\r\n \r\n def bfs(curr,r,c):\r\n if not curr: return\r\n prev = len(curr)\r\n for n in range(0,prev):\r\n i,j = curr[n][0],curr[n][1]\r\n board[i][j] = 'A'\r\n for x,y in [(-1, 0), (0, -1), (1, 0), (0, 1)]:\r\n x_n = i+x\r\n y_n = j+y\r\n if x_n >= 0 and x_n < r and y_n >= 0 and y_n < c and board[x_n][y_n] == \"O\":\r\n curr += [(x_n,y_n)]\r\n bfs(curr[prev:],r,c)\r\n\r\n \r\n q,r,c = [],len(board),len(board[0])\r\n if not r or q: return\r\n\r\n for i in range(r):\r\n for j in range(c):\r\n if (i==0 or j==0 or i==r-1 or j==c-1) and board[i][j] == \"O\":\r\n q += [(i,j)]\r\n \r\n bfs(q,r,c)\r\n\r\n for i in range(r):\r\n for j in range(c): \r\n if board[i][j] == \"O\": \r\n board[i][j] = \"X\"\r\n elif board[i][j] == \"A\":\r\n board[i][j] = \"O\"\r\n \r\n return", "def solve(board) -> None:\n rows = len(board)\n if rows==0:\n return board\n cols = len(board[0])\n \n def is_border(rc):\n (rr, cc) =rc\n if rr<rows and rr< cols and rr>=0 and cc>=0 and board[rr][cc]=='O' and (rr==0 or rr==rows-1 or cc==0 or cc==cols-1):\n return True\n return False\n \n transf = []\n for r in range(rows):\n for c in range(cols):\n if board[r][c]=='O' and not is_border((r,c)) and not any(map(is_border, [(r-1, c), (r+1, c), (r, c-1), (r, c+1)])):\n transf.append((r,c))\n if transf:\n for r,c in transf:\n board[r][c]='X'\n return board", "def execute(self, row, col, action=None):\n assert action is not None, \"No action selected!\"\n\n if action == 'north':\n if (row-1) < 0 or self.board[row-1, col] == '*':\n return row, col\n elif action == 'south':\n if (row+1) >= self.N or self.board[row+1, col] == '*':\n return row, col\n elif action == 'east':\n if (col+1) >= self.M or self.board[row, col+1] == '*':\n return row, col\n elif action == 'west':\n if (col-1) < 0 or self.board[row, col-1] == '*':\n return row, col\n\n return row + self.step_row[action], col + self.step_col[action]", "def check_move(self, col):\n\n for i in range(len(self.board) - 1, -1, -1):\n if self.board[i][col] == 0:\n return i\n\n return \"Full\"", "def get_move(board, player):\r\n row, col = 0, 0\r\n return row, col", "def get_move(board, player):\n row, col = 0, 0\n return row, col", "def move(self, row, col, player):\n offset = player * 2 - 3 # 1 or -1\n self.row[row] += offset\n self.col[col] += offset\n if row == col:\n self.diag += offset\n if row + col == self.n - 1:\n self.anti_diag += offset\n if self.n in [self.row[row], self.col[col], self.diag, self.anti_diag]:\n return 2\n if -self.n in [self.row[row], self.col[col], self.diag, self.anti_diag]:\n return 1\n return 0", "def solve(self, board: List[List[str]]) -> None:\n # New Solution: DFS on boarder (140ms: 89.07%)\n if not board or not board[0]: return\n def dfs(i, j):\n if board[i][j]=='O':\n board[i][j] = '*'\n if i-1>=0:\n dfs(i-1, j)\n if i+1<len(board):\n dfs(i+1, j)\n if j-1>=0:\n dfs(i, j-1)\n if j+1<len(board[0]):\n dfs(i, j+1)\n height, width = len(board), len(board[0])\n for i in range(width):\n if board[0][i]=='O':\n dfs(0, i)\n if board[height-1][i]=='O':\n dfs(height-1, i)\n for i in range(height):\n if board[i][0]=='O':\n dfs(i, 0)\n if board[i][width-1]=='O':\n dfs(i, width-1)\n for i in range(height):\n for j in range(width):\n if board[i][j]=='O':\n board[i][j] = 'X'\n elif board[i][j]=='*':\n board[i][j] = 'O'", "def make_move(board, picked_column, player):\n row = find_first_free_cell(board, picked_column)\n board[row][picked_column] = player\n return board, row", "def make_move(board: Connect4Board) -> \"(row, col)\":\r\n\r\n while True:\r\n\r\n try:\r\n\r\n print('\\nPlease Specify your move. Enter the number column of a cell on the board.')\r\n print('-'*85)\r\n \r\n col = Connect4GameUI.move_col(board)\r\n row = Connect4GameUI._get_valid_row(board, col)\r\n print(row,col)\r\n return row, col\r\n\r\n break\r\n\r\n except:\r\n print('\\nInvalid move!!!')\r\n print('Please try it again.')", "def solve(self, board: List[List[str]]) -> None:\n def _dfs(i, j):\n if i < 0 or j < 0 or i >= len(board) or j >= len(board[0]) or board[i][j] in ['X', '#']: return\n board[i][j] = '#'\n _dfs(i-1, j)\n _dfs(i+1, j)\n _dfs(i, j-1)\n _dfs(i, j+1)\n\n if not board or not board[0]: return\n m, n = len(board), len(board[0])\n for i in range(0, m):\n for j in range(0, n):\n is_edge = i == 0 or j == 0 or i == m-1 or j == n-1\n if is_edge and board[i][j] == 'O':\n _dfs(i, j)\n print(board)\n\n for i in range(0, m):\n for j in range(0, n):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n if board[i][j] == '#':\n board[i][j] = 'O'", "def solve(self, board) -> None:\n for index in range (1, len(board)-1):\n arr = board[index]\n for ch in range(1, len(arr)-1):\n if arr[ch] is 'O':\n safe = True\n if ch-1 == 0 and arr[ch-1] is 'O':\n safe = False\n if ch +1 == len(arr)-1 and arr[ch+1] is 'O':\n safe = False\n if index -1 == 0 and board[index-1][ch] is 'O':\n safe = False\n if index + 1 == len(board)-1 and board[index + 1][ch] is 'O':\n safe = False\n if safe:\n arr[ch] = 'X'", "def move(self, row: int, col: int, player: int) -> int:\n if player == 1:\n self.newList[row][col] = 1\n self.colSum[col] += 1\n self.rowSum[row] += 1\n if row == col:\n self.diag += 1\n if row + col == (self.n - 1):\n self.revDiag += 1\n if self.rowSum[row] == self.n or self.colSum[col] == self.n or self.diag == self.n or self.revDiag == self.n:\n return 1\n if player == 2:\n self.newList[row][col] = -1\n self.colSum[col] -= 1\n self.rowSum[row] -= 1\n if row == col:\n self.diag -= 1\n if row + col == (self.n - 1):\n self.revDiag -= 1\n if self.rowSum[row] == -self.n or self.colSum[col] == -self.n or self.diag == -self.n or self.revDiag == -self.n:\n return 2\n \n return 0", "def move(self, row: int, col: int, player: int) -> int:\n n = self.n\n if player == 1:\n self.rows_1[row] += 1\n self.cols_1[col] += 1\n if player == 2:\n self.rows_2[row] += 1\n self.cols_2[col] += 1\n if row == col:\n self.diag1[row] = player\n if row + col + 1 == n:\n self.diag2[row] = player\n f = 0\n g = 0\n for i in range(n):\n if self.rows_1[row] == n or self.cols_1[col] == n:\n return 1\n if self.rows_2[row] == n or self.cols_2[col] == n:\n return 2 \n if self.diag1[i] != self.diag1[0]:\n f = 1\n if self.diag2[i] != self.diag2[0]:\n g = 1\n if f == 0:\n return self.diag1[0]\n if g == 0:\n return self.diag2[0]\n return 0", "def make_move(self, row, column):\n\t\tif self.board[int(row)][int(column)] == '-':\n\t\t\tself.board[int(row)][int(column)] = self.marker\n\t\telse:\n\t\t\tprint(\"That spot is occupied, you messed up, you lose your turn for doing bad things\")", "def solve(self, board) -> None:\n coords = []\n board_len = len(board)\n row_len = len(board[0]) - 1\n # top\n # coords.append([[0, i] for i, q in enumerate(board[0]) if q == \"O\"])\n # # bottom\n # coords.append(\n # [[board_len, i] for i, q in enumerate(board[board_len]) if q == \"O\"]\n # )\n for i in range(board_len):\n row_coord = [[i,indx] for indx, q in enumerate(board[i]) if q == \"O\"]\n # import pdb; pdb.set_trace()\n for x in row_coord:\n coords.append(x)\n for x in coords:\n if len(x) == 0:\n continue\n if x[0] == 0:\n print(\"top border\")\n elif x[0] == board_len - 1:\n print(\"bottom border\")\n elif x[1] == 0:\n print(\"left border\")\n elif x[1] == row_len:\n prin(\"right border\")", "def solve(self, board: List[List[str]]) -> None:\n visited = [[False for x in range(len(board[0]))] for y in range(len(board))]\n for i in range(len(board)):\n for j in range(len(board[i])):\n if not visited[i][j] and board[i][j] == 'O':\n res = []\n result = self.gatherO(board, i, j, res, visited)\n if not result:\n for coordinate in res:\n board[coordinate[0]][coordinate[1]] = 'X'", "def solve(self, board) -> None:\n x_length = len(board)\n if x_length == 0: \n return\n\n y_length = len(board[0])\n confirmed = set()\n dfs = []\n for i in range(x_length):\n if board[i][0] == 'O':\n board[i][0] = 'temp'\n dfs.append((i, 0))\n if board[i][y_length - 1] == 'O':\n board[i][y_length - 1] = 'temp'\n dfs.append((i, y_length - 1))\n for j in range(y_length):\n if board[0][j] == 'O':\n board[0][j] = 'temp'\n dfs.append((0, j))\n if board[x_length - 1][j] == 'O':\n board[x_length - 1][j] = 'temp'\n dfs.append((x_length - 1, j))\n while dfs:\n i, j = dfs.pop()\n confirmed.add((i, j))\n if i+1 < x_length and board[i+1][j] == 'O':\n board[i+1][j] = 'temp'\n dfs.append((i + 1, j))\n if i > 0 and board[i-1][j] == 'O':\n board[i-1][j] = 'temp'\n dfs.append((i-1, j))\n if j+1 < y_length and board[i][j+1] == 'O':\n board[i][j+1] = 'temp'\n dfs.append((i, j + 1))\n if j > 0 and board[i][j-1] == 'O':\n board[i][j-1] = 'temp'\n dfs.append((i, j-1))\n for i in range(x_length):\n for j in range(y_length):\n if (i, j) in confirmed:\n board[i][j] = 'O'\n else:\n board[i][j] = 'X'\n return", "def solve(self):\n dim = self.puzzle.dimension\n\n # initial loop\n for value, (row, col) in self.puzzle:\n if value:\n self.clear_row(row, value)\n self.clear_col(col, value)\n self.clear_subgrid(row, col, value)\n self.updates.add((value, (row, col)))\n for ps in self.possibilities:\n ps.discard((row, col))\n\n while self.updates:\n while self.updates:\n # while self.updates:\n value, (row, col) = self.updates.pop()\n for i in range(1, dim + 1):\n self.check_row(i, value)\n self.check_col(i, value)\n for i in range(2, 8, 3):\n self.check_subgrid(row, i, value)\n self.check_subgrid(i, col, value)\n\n for value, (row, col) in self.puzzle:\n if not value:\n self.check_cell(row, col)\n\n # for value in range(1, dim + 1):\n # for row in [2, 5, 8]:\n # for col in [2, 5, 8]:\n # self.check_subgrid(row, col, value)", "def solve_puzzle(board):\n # Propagate value effects\n board = simplify_puzzle(board, [])\n\n # Brute force remaining cells\n board = brute(board)\n\n # Verify that the puzzle was successfully solved\n assert get_length(board)==81\n assert valid_attempt(board)\n\n return board", "def solveSudoku(self, board):\n\n digits = { str(i) for i in range(1, 10) }\n rows = [ digits.copy() for _ in range(9) ]\n cols = [ digits.copy() for _ in range(9) ]\n boxs = [ [ digits.copy() for _ in range(3) ] for _ in range(3) ]\n unoccupied = set()\n\n def __recursiveSolver():\n if not unoccupied:\n return\n\n choices = digits.copy()\n for row, col in unoccupied:\n possible_moves = rows[row] & cols[col] & boxs[row // 3][col // 3]\n if len(possible_moves) < len(choices):\n action_pos = (row, col)\n choices = possible_moves\n if len(choices) == 1:\n break\n\n for choice in choices:\n (row, col) = action_pos\n\n unoccupied.remove(action_pos)\n board[row][col] = choice\n rows[row].remove(choice)\n cols[col].remove(choice)\n boxs[row // 3][col // 3].remove(choice)\n\n __recursiveSolver()\n if not unoccupied: return\n\n unoccupied.add(action_pos)\n board[row][col] = '.'\n rows[row].add(choice)\n cols[col].add(choice)\n boxs[row // 3][col // 3].add(choice)\n\n for row in range(9):\n for col in range(9):\n ch = board[row][col]\n if ch == '.':\n unoccupied.add((row, col))\n else:\n rows[row].remove(ch)\n cols[col].remove(ch)\n boxs[row // 3][col // 3].remove(ch)\n\n __recursiveSolver()", "def solve(self, board: List[List[str]]) -> None:\n if len(board) == 0: return []\n nr = len(board)\n nc = len(board[0])\n\n # begin dfs from boundaries with letter \"O\"\n for r in range(nr):\n for c in range(nc):\n if r == 0 or r == nr-1 or c == 0 or c == nc-1:\n if board[r][c] == \"O\":\n self.dfs(board, r, c)\n\n # change \"O\" to \"X\" and \"#\" to \"O\"\n for r in range(nr):\n for c in range(nc):\n if board[r][c] == \"O\":\n board[r][c] = \"X\"\n elif board[r][c] == \"#\":\n board[r][c] = \"O\"" ]
[ "0.7689822", "0.76361793", "0.7602665", "0.759841", "0.7547982", "0.75370145", "0.75116783", "0.7487752", "0.7478812", "0.74497175", "0.7435299", "0.7407118", "0.7397174", "0.73776317", "0.73732615", "0.7324699", "0.72756827", "0.72559106", "0.7206573", "0.71551996", "0.7140375", "0.71336603", "0.7130972", "0.7064705", "0.7057786", "0.70349383", "0.7009467", "0.70032966", "0.6981267", "0.692232", "0.6843114", "0.68304265", "0.68043596", "0.6793013", "0.6661478", "0.66392213", "0.66384995", "0.6629243", "0.6624516", "0.6562946", "0.6547551", "0.65120554", "0.64690065", "0.6463337", "0.6463337", "0.6463337", "0.6461038", "0.6461038", "0.6442287", "0.6416701", "0.6415396", "0.6406427", "0.6388689", "0.6321458", "0.6317301", "0.62179285", "0.6161388", "0.6149291", "0.61431324", "0.612199", "0.60966474", "0.6071344", "0.6065953", "0.6054412", "0.6033915", "0.6023434", "0.6011581", "0.5990514", "0.59699047", "0.5951187", "0.59380174", "0.5924024", "0.59170175", "0.58692485", "0.58520216", "0.584852", "0.5847602", "0.58435917", "0.58354825", "0.58347327", "0.58335406", "0.5826953", "0.5814613", "0.57892853", "0.5788401", "0.5785238", "0.5765685", "0.57636666", "0.57590806", "0.5756696", "0.57558256", "0.5753852", "0.5751244", "0.5747492", "0.57417387", "0.5741668", "0.57338166", "0.5722073", "0.57192683", "0.5709736" ]
0.745573
9
Solve the tile in row one at the specified column Updates puzzle and returns a move string
def solve_row1_tile(self, target_col): cur_row, cur_col = self.current_position(1, target_col) move_str = self.position_tile(1, target_col, cur_row, cur_col, need_ld=False) self.update_puzzle(move_str) return move_str
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve_row1_tile(self, target_col):\r\n moves_str = \"\"\r\n current_row, current_col = self.current_position(1, target_col)\r\n zero_row, zero_col = self.current_position(0, 0)\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"ur\"\r\n self.update_puzzle(moves_str)\r\n print \"solve_row1_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve_puzzle(self):\n cur0_row, cur0_col = self.current_position(0, 0)\n move_str = 'd' * (self._height - cur0_row - 1) + 'r' * (self._width - cur0_col - 1)\n self.update_puzzle(move_str)\n for row in range(self._height-1, 1, -1):\n for col in range(self._width-1, -1, -1):\n assert self.lower_row_invariant(row, col)\n if col != 0:\n move_str += self.solve_interior_tile(row, col)\n else:\n move_str += self.solve_col0_tile(row)\n for col in range(self._width-1, 1, -1):\n assert self.row1_invariant(col)\n move_str += self.solve_row1_tile(col)\n assert self.row0_invariant(col)\n move_str += self.solve_row0_tile(col)\n move_str += self.solve_2x2()\n return move_str", "def solve_puzzle(self):\n\n move_str = \"\"\n \n # Move zero tile to bottom right corner tile of puzzle.\n zero_pos = self.current_position(0,0) \n vert_dist = (self.get_height() - 1) - zero_pos[0]\n horiz_dist = (self.get_width() - 1) - zero_pos[1]\n move_str += ((\"d\" * vert_dist) + (\"r\" * horiz_dist))\n self.update_puzzle(move_str)\n \n # Solve lower rows\n if self.get_height() > 2:\n for row in range(self.get_height() - 1, 1, -1):\n for col in range(self.get_width() - 1, -1, -1):\n if col != 0:\n move_str += self.solve_interior_tile(row, col)\n else:\n move_str += self.solve_col0_tile(row)\n \n # Solve top 2 rows\n if self.get_width() > 2:\n for col in range(self.get_width() - 1, 1, -1):\n move_str += self.solve_row1_tile(col)\n move_str += self.solve_row0_tile(col)\n \n # Solve 2x2\n move_str += self.solve_2x2()\n\n return move_str", "def solve_puzzle(self):\r\n # initialize some values and start tile at bottom right corner\r\n col = self.get_width() - 1\r\n row = self.get_height() - 1\r\n move = \"\"\r\n curr_row, curr_col = self.current_position(0, 0)\r\n move_to_target = abs(curr_col - col) * \"r\" + abs(curr_row - row) * \"d\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n\r\n # apply solver methods\r\n for dummy_i in range(row, 1, -1):\r\n for dummy_j in range(col, 0, -1):\r\n move += self.solve_interior_tile(dummy_i, dummy_j)\r\n move += self.solve_col0_tile(dummy_i)\r\n \r\n for dummy_j in range(col, 1, -1):\r\n move += self.solve_row1_tile(dummy_j)\r\n move += self.solve_row0_tile(dummy_j)\r\n \r\n move += self.solve_2x2()\r\n return move", "def solve_col0_tile(self, target_row):\n solved_row, solved_col = self.current_position(target_row, 0)\n movements = \"\"\n if solved_row == target_row - 1 and solved_col == 0:\n movements = \"u\"\n else:\n local_board = self.clone()\n local_board.update_puzzle(\"ur\")\n movements = \"ur\" + local_board.move_tile(\n target_row - 1, 1, target_row *\n self.get_width()) + \"ruldrdlurdluurddlu\"\n movements += \"r\" * (self.get_width() - 1)\n self.update_puzzle(movements)\n return movements", "def solve_col0_tile(self, target_row):\r\n # check if curr_pos (i, 0) where i > 1\r\n assert self.lower_row_invariant(target_row, 0)\r\n move = \"ur\"\r\n self.update_puzzle(move)\r\n row, col = self.current_position(target_row, 0)\r\n if row == target_row and col == 0:\r\n move_to_target = (self.get_width() - 2) * \"r\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n else:\r\n move_to_target = self.move_to_target(target_row - 1, 1, row, col)\r\n # add solver move to str\r\n move_to_target += \"ruldrdlurdluurddlu\" + (self.get_width() - 1) * \"r\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n assert self.lower_row_invariant(target_row - 1, self.get_width() - 1)\r\n return move", "def solve_row1_tile(self, target_col):\r\n row, col = self.current_position(1, target_col)\r\n move = self.move_to_target(1, target_col, row, col)\r\n # for next move\r\n move += \"ur\"\r\n \r\n self.update_puzzle(move)\r\n return move", "def solve_row0_tile(self, target_col):\n solved_row, solved_col = self.current_position(0, target_col)\n movements = \"\"\n if solved_col == target_col - 1 and solved_row == 0:\n movements = \"ld\"\n else:\n local_board = self.clone()\n local_board.update_puzzle(\"ld\")\n movements = \"ld\" + local_board.move_tile(\n 1, target_col - 1, target_col) + \"urdlurrdluldrruld\"\n self.update_puzzle(movements)\n return movements", "def solve_row1_tile(self, target_col):\n assert target_col > 1, \"target_col must be > 1\"\n assert self.row1_invariant(target_col), \"tiles to right and below incorrectly ordered\"\n\n # Moves correct_tile to the target position (1, target_col),\n # and the zero tile above the target position at (0, target_col). \n correct_tile = self.current_position(1, target_col)\n move_str = self.position_tile(1, target_col, correct_tile) \n move_str += \"ur\"\n self.update_puzzle(move_str)\n\n assert self.row0_invariant(target_col)\n \n return move_str", "def solve_puzzle(self):\n # replace with your code\n string = ''\n width = self._width\n height = self._height\n zero = self.current_position(0, 0)\n row_to_zero = height - 1 - zero[0]\n col_to_zero = width - 1 - zero[1]\n string += 'r' * col_to_zero\n string += 'd' * row_to_zero\n self.update_puzzle(string)\n if width == 2 and height == 2:\n string += self.solve_2x2()\n elif width > 2 and height == 2:\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n elif width == 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n string += self.solve_2x2()\n elif width > 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n #for row in range(height - 1, -1, -1):\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n return string", "def solve_row1_tile(self, target_col):\n # replace with your code\n print target_col\n assert self.row1_invariant(target_col), 'False precondition'\n string = ''\n target = self.current_position(1, target_col)\n row_difference = 1 - target[0]\n col_difference = target_col - target[1]\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'l' * col_difference\n if row_difference == 0:\n string += 'urrdl' * (col_difference - 1)\n string += 'ur'\n elif row_difference == 1:\n string += 'drrul' * (col_difference - 1)\n string += 'dru'\n elif col_difference < 0:\n col_difference = abs(col_difference)\n string += 'r' * col_difference\n string += 'dllur' * (col_difference - 1)\n string += 'dlu'\n print 'Row 1 Path', string\n self.update_puzzle(string)\n assert self.row0_invariant(target_col), 'False string'\n return string", "def move_tile(self, target_row, target_col, val):\n # a little bit twisted here for the use of both solve_interior_tile and solve_col0_tile\n solved_row, solved_col = self.current_position(0, val)\n movements = \"\"\n if solved_row == target_row and solved_col == target_col:\n return \"\"\n if solved_row == target_row:\n if target_col > solved_col:\n movements = \"l\" * (target_col - solved_col) + \"urrdl\" * (\n target_col - solved_col - 1)\n else:\n movements = \"r\" * (solved_col - target_col) + \"ulldr\" * (\n solved_col - target_col - 1) + \"ulld\"\n elif solved_col == target_col:\n movements = \"u\" * (target_row - solved_row) + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n elif solved_col < target_col:\n if solved_col == 0:\n movements = \"l\" * (target_col - solved_col) + \"u\" * (\n target_row - solved_row) + \"rddlu\" * (\n target_row - solved_row - 1) + \"rdl\" + \"urrdl\" * (\n target_col - solved_col - 1)\n else:\n movements = \"l\" * (target_col - solved_col) + \"u\" * (\n target_row - solved_row) + \"lddru\" * (\n target_row - solved_row - 1) + \"rdl\" + \"urrdl\" * (\n target_col - solved_col - 1)\n elif solved_col > target_col:\n if solved_row == 0:\n movements = \"u\" * (target_row - solved_row) + \"r\" * (\n solved_col - target_col) + \"dllur\" * (\n solved_col - target_col - 1) + \"dlu\" + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n else:\n movements = \"u\" * (target_row - solved_row) + \"r\" * (\n solved_col - target_col) + \"ulldr\" * (\n solved_col - target_col - 1) + \"ullddru\" + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n return movements", "def solve_col0_tile(self, target_row):\r\n moves_str = \"\"\r\n # move the zero tile from (i,0) to (i−1,1) \r\n # using the move string \"ur\"\r\n moves_str += \"ur\"\r\n temp_grid = Puzzle(self._height, self._width, self._grid)\r\n temp_grid.update_puzzle(moves_str)\r\n # If the target tile is now at position (i,0)\r\n # you can simply move tile zero to the end of row i−1\r\n current_row, current_col = temp_grid.current_position(target_row, 0)\r\n zero_row, zero_col = temp_grid.current_position(0, 0)\r\n if current_row == target_row and current_col == 0:\r\n rights = self._width - 1 - zero_col\r\n for dummy_r in range(rights):\r\n moves_str += \"r\" \r\n # However, if the target tile is not positioned at (i,0)\r\n # we suggest the following solution strategy:\r\n else:\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"ruldrdlurdluurddlu\"\r\n for dummy_r in range(self._width - 1):\r\n moves_str += \"r\"\r\n \r\n print \"solve_col0_tile\"\r\n print moves_str\r\n self.update_puzzle(moves_str)\r\n print self._grid\r\n return moves_str", "def solve_col0_tile(self, target_row):\n move_str = 'ur'\n self.update_puzzle(move_str)\n cur_row, cur_col = self.current_position(target_row, 0)\n if cur_row == target_row and cur_col == 0:\n move_str += 'r' * (self._width - 2)\n else:\n move_str += self.position_tile(target_row-1, 1, cur_row, cur_col)\n move_str += 'ruldrdlurdluurddlur'\n move_str += 'r' * (self._width - 2)\n self.update_puzzle(move_str[2:])\n return move_str", "def solve_col0_tile(self, target_row):\n # replace with your code\n string = ''\n target = self.current_position(target_row, 0)\n row_difference = target_row - target[0]\n col_difference = target[1]\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'r' * (col_difference - 1)\n if row_difference > 1:\n string += 'druld' * (row_difference - 1)\n string += 'rulld' * (col_difference - 1)\n string += 'ruldrdlurdluurddlu'\n elif col_difference == 0:\n string += 'rddlu' * (row_difference - 2)\n if row_difference > 1:\n string += 'rd'\n string += 'l'\n string += 'ruldrdlurdluurddlu'\n string += 'r' * (self._width - 1)\n print 'Col 0 Path', string\n self.update_puzzle(string)\n assert self.lower_row_invariant(target_row - 1, self._width -1), 'False string'\n return string", "def solve_row0_tile(self, target_col):\n move_str = 'ld'\n self.update_puzzle(move_str)\n cur_row, cur_col = self.current_position(0, target_col)\n if cur_row == 0 and cur_col == target_col:\n return move_str\n else:\n move_str += self.position_tile(1, target_col-1, cur_row, cur_col)\n move_str += 'urdlurrdluldrruld'\n self.update_puzzle(move_str[2:])\n return move_str", "def solve_row1_tile(self, target_col):\r\n # replace with your code\r\n whole_move = ''\r\n if self._grid[1][target_col] != 0:\r\n # print \"DEBUG CASE WHEN ZERO IN JOPA solve_row1_tile \"\r\n \r\n # print self\r\n # print 'Solwing tile', self._grid[1][target_col]\r\n \r\n # print 'Searchind indexes of ZERO'\r\n for row in self._grid:\r\n for col in row:\r\n if col == 0:\r\n zero_row, zero_col = self._grid.index(row), row.index(col)\r\n break\r\n # print 'ZERO indexes=', (zero_row, zero_col)\r\n #####Moving zero to correct place\r\n #path_down = (1 - zero_row) * 'd'\r\n # path_left = (zero_col - target_col) * 'l'\r\n if target_col - zero_col > 0:\r\n #path_right = (target_col - zero_col) * 'r'\r\n path_of_zero = (1 - zero_row) * 'd' + (target_col - zero_col) * 'r'\r\n else:\r\n path_of_zero = (1 - zero_row) * 'd'\r\n #zero_col -= len(filter(lambda x: x=='l', path_of_zero))\r\n #zero_row -= len(filter(lambda x: x=='u', path_of_zero))\r\n zero_col += len(filter(lambda x: x=='r', path_of_zero))\r\n zero_row += len(filter(lambda x: x=='d', path_of_zero))\r\n self.update_puzzle(path_of_zero)\r\n # print 'Grid after moving ZERO to target spot'\r\n # print self\r\n whole_move += path_of_zero\r\n\r\n assert self.row1_invariant(target_col), 'Some trouble in row1_invariant' \r\n \r\n #current_row, current_col = self.current_position(1, target_col)\r\n zero_row, zero_col = 1, target_col\r\n ######Moving zero tile to the target tile\r\n #path_up = (zero_row - current_row) * 'u'\r\n #path_side = (zero_col - current_col) * 'l'\r\n path_for_zero = (zero_row - self.current_position(1, target_col)[0]) * 'u' + (zero_col - self.current_position(1, target_col)[1]) * 'l'\r\n whole_move += path_for_zero\r\n zero_col -= len(filter(lambda x: x=='l', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(1, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (1, target_col)\r\n counter = 0\r\n while self.current_position(1, target_col) != \\\r\n (1, target_col) or (zero_row, zero_col) != (0, target_col):\r\n # print 'Welcome to while loop!'\r\n cyclic_moves = ''\r\n #### Case 3 if ZERO located in the left side of the target tile\r\n ### like in the owel-test case\r\n #current_position = self.current_position(1, target_col)\r\n current_col = self.current_position(1, target_col)[1]\r\n counter +=1\r\n if self.current_position(1, target_col) == \\\r\n (1, target_col):\r\n # print 'ZERO not under TT'\r\n cyclic_moves = 'ur'\r\n whole_move += cyclic_moves\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif zero_col < current_col and self._grid[zero_row+1][zero_col] < \\\r\n self._grid[self.current_position(1, target_col)[0]][self.current_position(1, target_col)[1]]:\r\n # print 'ZERO tile located in the left side and down move is POSIBLE'\r\n if current_col != target_col:\r\n # print 'not under the target place'\r\n cyclic_moves = 'drrul'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n elif current_col == target_col:\r\n # print 'Target tile under target place'\r\n cyclic_moves = 'dru'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif current_col != target_col and self._grid[zero_row+1][zero_col] > \\\r\n self._grid[self.current_position(1, target_col)[0]][self.current_position(1, target_col)[1]]:\r\n # print 'not under the target place'\r\n cyclic_moves = 'urrdl'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves)) \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n # elif zero_col < current_col and self._grid[zero_row+1][zero_col] > \\\r\n # self._grid[current_position[0]][current_position[1]]:\r\n # # print 'ZERO tile located in the left side and down move IS NOT POSIBLE'\r\n # if current_col != target_col:\r\n # # print 'not under the target place'\r\n # cyclic_moves = 'urrdl'\r\n # whole_move += cyclic_moves\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n # zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n # elif current_col == target_col:\r\n # # print 'Target tile under target place'\r\n # cyclic_moves = 'urd'\r\n # whole_move += cyclic_moves\r\n # zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n # zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n #cyclic_moves +='ur'\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n if counter > 10:\r\n break\r\n return whole_move", "def solve_row1_tile(self, target_col):\n movements = self.solve_interior_tile(1, target_col)\n self.update_puzzle(\"ur\")\n return movements + \"ur\"", "def solve_row0_tile(self, target_col):\n # replace with your code\n string = ''\n assert self.row0_invariant(target_col), 'False precondition'\n target = self.current_position(0, target_col)\n row_difference = target[0]\n col_difference = target_col - target[1]\n if row_difference == 0:\n if col_difference == 1:\n string += 'ld'\n elif col_difference > 1:\n string += 'l' * col_difference\n string += 'drrul' * (col_difference - 2)\n string += 'druld'\n string += 'urdlurrdluldrruld'\n elif row_difference == 1:\n if col_difference == 1:\n string += 'lld'\n string += 'urdlurrdluldrruld'\n elif col_difference > 1:\n string += 'ld'\n string += 'l' * (col_difference - 1)\n string += 'urrdl' * (col_difference - 2)\n string += 'urdlurrdluldrruld'\n print 'Row 0 Path', string\n self.update_puzzle(string)\n assert self.row1_invariant(target_col - 1), 'False string'\n return string", "def solve_puzzle(self):\n # move zero tile to the lower right corner\n row, col = self.current_position(0, 0)\n movements = \"d\" * (self.get_height() - 1 - row) + \"r\" * (\n self.get_width() - 1 - col)\n self.update_puzzle(movements)\n # solve rowid from 2 by row\n for row in range(self.get_height() - 1, 1, -1):\n for col in range(self.get_width() - 1, -1, -1):\n assert self.lower_row_invariant(row, col)\n if col == 0:\n movements += self.solve_col0_tile(row)\n assert self.lower_row_invariant(row - 1,\n self.get_width() - 1)\n else:\n movements += self.solve_interior_tile(row, col)\n assert self.lower_row_invariant(row, col - 1)\n # solve the uppermost two rows by column\n for col in range(self.get_width() - 1, 1, -1):\n for row in range(1, -1, -1):\n if row == 0:\n assert self.row0_invariant(col)\n movements += self.solve_row0_tile(col)\n assert self.row1_invariant(col - 1)\n else:\n assert self.row1_invariant(col)\n movements += self.solve_row1_tile(col)\n assert self.row0_invariant(col)\n movements += self.solve_2x2()\n return movements", "def solve_row0_tile(self, target_col):\r\n # replace with your code\r\n assert self.row0_invariant(target_col), 'Some trouble in row0_invariant' \r\n whole_move = ''\r\n #current_position = self.current_position(0, target_col)\r\n current_row, current_col = self.current_position(0, target_col)\r\n # print self.get_number(current_row, current_col)\r\n zero_row, zero_col = 0, target_col\r\n # print 'Target tile position=',self.current_position(0, target_col)\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (0, target_col)\r\n # print '------------------------------------------'\r\n # print 'Recommended move ld'\r\n \r\n recomended_move = 'ld'\r\n whole_move += recomended_move\r\n zero_col -= len(filter(lambda x: x=='l', recomended_move))\r\n zero_row += len(filter(lambda x: x=='d', recomended_move))\r\n self.update_puzzle(recomended_move)\r\n # print 'Grid after move:', recomended_move\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(0, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (0, target_col)\r\n #####Case when we check if recomended move solves the tile\r\n if self.current_position(0, target_col) == (0, target_col):\r\n # print 'Congrads recomended move made great deal !!'\r\n return whole_move\r\n #####If not, we position TT to (1, target_col-1),\r\n ##### and ZEOR to (1, target_col-2)\r\n else:\r\n # print '------------------------------'\r\n # print 'After base move we are do not finde puzzle'\r\n # print 'Lets move zero towards TT'\r\n \r\n ######Moving zero tile to the target tile\r\n path_up = (zero_row - current_row) * 'u'\r\n path_side = (zero_col - current_col) * 'l'\r\n path_for_zero = path_up + path_side\r\n whole_move += path_for_zero\r\n zero_col -= len(filter(lambda x: x=='l', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n \r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(0, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (0, target_col)\r\n counter = 0\r\n # print self.current_position(0, target_col) != (1, target_col-1)\r\n # print (zero_row,zero_col) != (1,target_col-2)\r\n ####POitioning TT and zero into positions that can be solvable\r\n while self.current_position(0, target_col) != (1, target_col-1) or \\\r\n (zero_row,zero_col) != (1,target_col-2):\r\n counter +=1\r\n #current_position = self.current_position(0, target_col)\r\n current_row, current_col = self.current_position(0, target_col)\r\n cyclic_moves = ''\r\n # print 'Aloha in the loop'\r\n if zero_col < current_col:\r\n # print 'ZERO tile located in the left side and down move IS NOT POSIBLE'\r\n \r\n\r\n if current_col != target_col-1 and zero_row == 0:\r\n # print 'In the upper row and we can use down cycling'\r\n cyclic_moves = 'drrul'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n\r\n elif current_col != target_col-1:\r\n # print 'not under the target place'\r\n cyclic_moves = 'urrdl'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n elif current_col == target_col-1:\r\n # print 'Target tile under target place'\r\n # print 'DEBUG!!!!'\r\n # print self\r\n # print zero_col, target_col\r\n if zero_col == 0 and current_col == 1:\r\n cyclic_moves = 'druld'\r\n elif zero_row == 0:\r\n cyclic_moves = 'druld'\r\n \r\n else:\r\n cyclic_moves = 'urd'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n elif zero_row > current_row:\r\n # print 'DEBUG'\r\n # print 'TT under zero tile'\r\n cyclic_moves = 'uld'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n \r\n if counter > 10:\r\n # print 'COUNTER break'\r\n break\r\n\r\n #####Solving using pattern 2 x 3 puzzle\r\n # print '--------------------------'\r\n # print 'Lets solve 2x3 puzzle formed recently'\r\n move2x3 = \"urdlurrdluldrruld\"\r\n whole_move += move2x3\r\n zero_col -= len(filter(lambda x: x=='l', move2x3))\r\n zero_col += len(filter(lambda x: x=='r', move2x3))\r\n zero_row += len(filter(lambda x: x=='d', move2x3))\r\n zero_row -= len(filter(lambda x: x=='u', move2x3))\r\n self.update_puzzle(move2x3)\r\n # print self\r\n assert self.row1_invariant(target_col-1), 'Some trouble in row1_invariant' \r\n return whole_move", "def solve_col0_tile(self, target_row):\r\n # print '----------------------------------'\r\n # print 'SOLVING ZERO COLOUMN'\r\n assert self.lower_row_invariant(target_row,0), \"Invarian is False\"\r\n whole_move = ''\r\n #current_position = self.current_position(target_row, 0)\r\n current_row, current_col = self.current_position(target_row, 0)\r\n zero_row, zero_col = target_row, 0\r\n # print 'Target tile position=',current_position\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (target_row, 0)\r\n # print '------------------------------------------'\r\n # print 'Recommended move ur'\r\n \r\n recomended_move = 'ur'\r\n whole_move += recomended_move\r\n zero_col += len(filter(lambda x: x=='r', recomended_move))\r\n zero_row -= len(filter(lambda x: x=='u', recomended_move))\r\n self.update_puzzle(recomended_move)\r\n # print 'Grid after move:', recomended_move\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n current_position = self.current_position(target_row, 0)\r\n current_row, current_col = current_position\r\n if self.current_position(target_row, 0) == \\\r\n (target_row, 0):\r\n # print 'TT stays in correct place after recomended move'\r\n zero_to_place_move = 'r' * (self._width-1 - zero_col)\r\n self.update_puzzle(zero_to_place_move)\r\n whole_move += zero_to_place_move\r\n # print self\r\n assert self.lower_row_invariant(target_row-1,self._width-1), \"Invarian is False\"\r\n return whole_move\r\n #move ZERO tile to the right\r\n else:\r\n # print '------------------------------'\r\n # print 'After base move we are do not finde puzzle'\r\n # print 'Lets move zero towards TT'\r\n ### reposition TT to (target_row -1, 1)\r\n ### reposition ZERO tile to (target_row-1,0)\r\n \r\n ######Moving zero tile to the target tile\r\n #path_up = (zero_row - current_row) * 'u'\r\n #path_side = (current_col - zero_col) * 'r'\r\n path_for_zero = (zero_row - current_row) * 'u' + (current_col - zero_col) * 'r'\r\n whole_move += path_for_zero\r\n zero_col += len(filter(lambda x: x=='r', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n counter = 0\r\n while self.current_position(target_row, 0) != (target_row -1, 1) or \\\r\n (zero_row,zero_col) != (target_row-1,0):\r\n cyclic_moves = ''\r\n current_position = self.current_position(target_row, 0)\r\n current_row, current_col = current_position\r\n # print 'We are in while loop'\r\n counter += 1\r\n if zero_row < current_row:\r\n # print 'Zero row under current TT '\r\n if self.current_position(target_row, 0) == (target_row -1, 1):\r\n # print 'TT is in the reccomended posiont (target_row -1, 1) \\n and we are movind zero to the left side of TT '\r\n cyclic_moves = 'ld'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n else:\r\n # print 'TT should be one tile down'\r\n cyclic_moves = 'lddru'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n #### Case 1 if ZERO located in the right of\r\n #### target tile (after it)\r\n if zero_col > current_col:\r\n # print ' Look in the up puzzle, zero in the right side'\r\n if current_col != 1:\r\n # print 'not under target place (target_row -1, 1)'\r\n cyclic_moves = 'dllur'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n whole_move += cyclic_moves\r\n # elif current_col == 1 and self._grid[zero_row+1][zero_col] < \\\r\n # self._grid[current_position[0]][current_position[1]]:\r\n elif current_col == 1: \r\n # print 'Tile tat is under ZERO is',self._grid[zero_row+1][zero_col] \r\n # print 'TT under target place'\r\n cyclic_moves = 'dlu'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif self._grid[zero_row+1][zero_col] > \\\r\n self._grid[current_position[0]][current_position[1]]:\r\n print 'Tile under zero is illegal to move and we use upper cycle move '\r\n \r\n cyclic_moves = 'ul'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n if counter > 10:\r\n break\r\n ### Solwing 3x2 puzzle\r\n # print '--------------------------'\r\n # print 'Lets solve 3x2 puzzle formed recently'\r\n move3x2 = 'ruldrdlurdluurddlur'\r\n whole_move += move3x2\r\n zero_col -= len(filter(lambda x: x=='l', move3x2))\r\n zero_col += len(filter(lambda x: x=='r', move3x2))\r\n zero_row += len(filter(lambda x: x=='d', move3x2))\r\n zero_row -= len(filter(lambda x: x=='u', move3x2))\r\n self.update_puzzle(move3x2)\r\n # print 'Grid afret 3x2 solver move'\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n #####Moving ZERO to the (target_row - 1, n - 1) position where\r\n ##### 'n' is a grid height.\r\n # print self._width-1 - zero_col\r\n zero_to_place_move = 'r' * (self._width-1 - zero_col)\r\n whole_move += zero_to_place_move\r\n self.update_puzzle(zero_to_place_move)\r\n # print self\r\n assert self.lower_row_invariant(target_row-1,self._width-1), \"Invarian is False\"\r\n return whole_move", "def solve_2x2(self):\n # move zero tile to the left-up corner\n self.update_puzzle(\"lu\")\n movements = \"rdlu\"\n for _ in range(3):\n self.update_puzzle(\"rdlu\")\n if self.row0_invariant(0):\n return \"lu\" + movements\n movements += \"rdlu\"\n # the final 2x2 cannot be solved\n return \"\"", "def solve_interior_tile(self, target_row, target_col):\r\n assert self._grid[target_row][target_col] == 0\r\n moves_str = \"\"\r\n target_current_row, target_current_col = self.current_position(target_row, target_col)\r\n \r\n moves_str += self.position_tile(target_row, target_col, target_current_row, target_current_col) \r\n \r\n self.update_puzzle(moves_str)\r\n print \"solve_interior_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve_2x2(self):\n cur_row, cur_col = self.current_position(0, 0)\n move_str = 'u' * cur_row + 'l' * cur_col\n self.update_puzzle(move_str)\n if self.check_2x2_solved():\n return move_str\n else:\n while not self.check_2x2_solved():\n move_str += 'rdlu'\n self.update_puzzle('rdlu')\n return move_str", "def solve_row0_tile(self, target_col):\r\n # move the zero tile from position (0,j) to (1,j−1) \r\n # using the move string \"ld\" \r\n moves_str = \"\"\r\n moves_str += \"ld\"\r\n # check whether target tile is at position (0,j).\r\n temp_grid = Puzzle(self._height, self._width, self._grid)\r\n temp_grid.update_puzzle(moves_str) \r\n current_row, current_col = temp_grid.current_position(0, target_col)\r\n zero_row, zero_col = temp_grid.current_position(0, 0)\r\n \r\n # If target tile is not at position (0,j).\r\n # reposition the target tile to position (1,j−1) \r\n # with tile zero in position (1,j−2).\r\n if current_row != 0 or current_col != target_col:\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"urdlurrdluldrruld\"\r\n \r\n self.update_puzzle(moves_str)\r\n print \"solve_row0_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve_col0_tile(self, target_row):\n \n assert target_row > 1, \"target_row cannot be in rows 0 or 1.\"\n assert self.lower_row_invariant(target_row, 0), \"tiles to right and below incorrectly ordered\"\n\n # Move zero tile from target position (target_row, 0) to (target_row - 1, 1).\n self.update_puzzle(\"ur\")\n\n move_str = \"\"\n \n # correct_tile's position is determined after moving zero tile \"ur\" \n # because its position relative to zero tile may have changed as a result.\n correct_tile = self.current_position(target_row, 0)\n \n # Moves to reposition correct_tile to target position.\n if self.get_number(correct_tile[0], correct_tile[1]) != self.get_number(target_row, 0):\n move_str += str(self.position_tile(target_row - 1, 1, correct_tile))\n move_str += str(\"ruldrdlurdluurddlur\")\n\n # Moves to reposition zero tile to end of column of target_row + 1.\n move_str += str(\"r\" * (self.get_width() - 2)) \n \n self.update_puzzle(move_str)\n\n assert self.lower_row_invariant(target_row - 1, self.get_width() - 1) \n \n move_str = \"ur\" + move_str\n return move_str", "def solve_interior_tile(self, target_row, target_col):\n cur_row, cur_col = self.current_position(target_row, target_col)\n move_str = self.position_tile(target_row, target_col, cur_row, cur_col)\n self.update_puzzle(move_str)\n return move_str", "def solve_interior_tile(self, target_row, target_col):\n \n assert target_row > 1, \"target_row cannot be in rows 0 or 1.\"\n assert self.lower_row_invariant(target_row, target_col), \"tiles to right and below incorrectly ordered\"\n \n correct_tile = self.current_position(target_row, target_col) \n move_str = self.position_tile(target_row, target_col, correct_tile) \n self.update_puzzle(move_str)\n \n assert self.lower_row_invariant(target_row, target_col - 1), \"tiles to right and below incorrectly ordered\"\n \n return move_str", "def solve_puzzle(self):\r\n moves_str = \"\"\r\n # move zero to the most botton right\r\n zero_row, zero_col = self.current_position(0, 0)\r\n downs = self._height - 1 - zero_row\r\n rights = self._width - 1 - zero_col\r\n for dummy_d in range(downs):\r\n moves_str += \"d\"\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n self.update_puzzle(moves_str)\r\n # Solve the bottom m−2 rows of the puzzle \r\n # in a row by row manner from bottom to top. \r\n # Each individual row will be solved in a right to left order.\r\n if self._height > 2 and self._width > 2:\r\n for row in range(self._height - 1, 1, -1):\r\n for col in range(self._width - 1, 0, -1):\r\n assert self.lower_row_invariant(row, col)\r\n moves_str += self.solve_interior_tile(row, col)\r\n assert self.lower_row_invariant(row, col - 1)\r\n assert self.lower_row_invariant(row, 0)\r\n moves_str += self.solve_col0_tile(row)\r\n assert self.lower_row_invariant(row - 1, self._width - 1)\r\n # Solve the rightmost n−2 columns of the top two rows\r\n # in a right to left order). \r\n # Each column consists of two unsolved positions \r\n # and will be solved in a bottom to top order.\r\n for col in range(self._width - 1, 1, -1):\r\n assert self.row1_invariant(col)\r\n moves_str += self.solve_row1_tile(col)\r\n assert self.row0_invariant(col)\r\n moves_str += self.solve_row0_tile(col)\r\n assert self.row1_invariant(col - 1)\r\n # Solve the upper left 2×2 portion of the puzzle directly.\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n \r\n elif self._height <=2 and self._width > 2:\r\n for col in range(self._width - 1, 1, -1):\r\n assert self.row1_invariant(col)\r\n moves_str += self.solve_row1_tile(col)\r\n assert self.row0_invariant(col)\r\n moves_str += self.solve_row0_tile(col)\r\n assert self.row1_invariant(col - 1)\r\n # Solve the upper left 2×2 portion of the puzzle directly.\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n elif self._height <= 2 and self._width <= 2:\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n #elif self._height > 2 and self._width <= 2:\r\n \r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve_row0_tile(self, target_col):\r\n assert self.row0_invariant(target_col)\r\n move = \"ld\"\r\n self.update_puzzle(move)\r\n \r\n row, col = self.current_position(0, target_col)\r\n if row == 0 and col == target_col:\r\n return move\r\n else:\r\n move_to_target = self.move_to_target(1, target_col - 1, row, col)\r\n # 2x3 puzzle solver\r\n move_to_target += \"urdlurrdluldrruld\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n return move", "def solve_row0_tile(self, target_col):\n assert target_col > 1, \"target_col must be > 1\"\n assert self.row0_invariant(target_col), \"tiles to right and below incorrectly ordered\"\n \n # Move zero tile from target position (0, target_col) to (1, target_col - 1) \n self.update_puzzle(\"ld\")\n \n move_str = \"\"\n\n # correct_tile's position is determined after moving zero tile \"ld\"\n # because its position relative to zero tile may have changed as a result.\n correct_tile = self.current_position(0, target_col) \n \n # Moves to reposition correct_tile to target position, and\n # the zero tile to (1, target_col - 1).\n if self.get_number(correct_tile[0], correct_tile[1]) != self.get_number(0, target_col):\n move_str += str(self.position_tile(1, target_col - 1, correct_tile))\n move_str += str(\"urdlurrdluldrruld\")\n\n self.update_puzzle(move_str)\n\n assert self.row1_invariant(target_col - 1), \"tiles to right and below incorrectly ordered\"\n \n move_str = \"ld\" + move_str\n return move_str", "def position_tile(self, zero_row, zero_col, correct_tile):\n \n ans = \"\" \n vert_dist = abs(zero_row - correct_tile[0])\n horiz_dist = abs(zero_col - correct_tile[1])\n \n # Updates ans, the move string, based the correct_tile's\n # position relative to the target position.\n \n # SAME ROW\n if vert_dist == 0:\n # Left of target\n if zero_col > correct_tile[1]:\n # Moves zero tile left to correct_tile's position.\n ans += str(\"l\" * horiz_dist)\n # Moves correct_tile right to target position,\n # and moves zero tile to left of target position.\n if horiz_dist > 1:\n ans += str(\"urrdl\" * (horiz_dist - 1))\n # Right of target\n else:\n # Moves zero tile right to correct_tile's position.\n ans += str(\"r\" * horiz_dist)\n # Moves correct_tile left to target position,\n # and moves zero tile to left of target position.\n ans += str(\"ulldr\" * (horiz_dist - 1))\n ans += str(\"ulld\")\n \n # SAME COL\n elif horiz_dist == 0:\n # Moves zero tile up to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n # Moves correct_tile down to target position, \n # and moves zero tile to left of target position.\n if vert_dist > 1:\n ans += str(\"lddru\" * (vert_dist - 1))\n ans += str(\"ld\")\n \n # UPPER LEFT\n elif correct_tile[1] < zero_col:\n # Moves zero tile up and left to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n ans += str(\"l\" * horiz_dist)\n # Moves correct_tile right and down to target position,\n # and moves zero tile to left of target position.\n ans += str(\"drrul\" * (horiz_dist - 1))\n ans += str(\"druld\" * vert_dist)\n\n # UPPER RIGHT\n else:\n # Moves zero tile up and right to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n ans += str(\"r\" * horiz_dist)\n # This if-elif-else statement moves correct_tile left and down to target position.\n # If statement is only used when target position is in row 2.\n if vert_dist == 1 and correct_tile[0] == 0:\n ans += str(\"dllur\" * (horiz_dist - 1))\n ans += str(\"dluld\")\n # Elif statement used when correct_tile is in the row above target position.\n elif vert_dist == 1: \n ans += str(\"ulldr\" * (horiz_dist - 1))\n ans += str(\"ullddruld\")\n # Else statement used when correct_tile is 1+ rows above target position.\n else:\n ans += str(\"dllur\" * (horiz_dist - 1))\n ans += str(\"dlu\")\n ans += str(\"lddru\" * (vert_dist - 1))\n ans += str(\"ld\")\n \n return ans", "def solve_puzzle(self):\r\n \r\n counter = 0\r\n rows = self._height-1\r\n cols = self._width-1\r\n # print rows, cols\r\n # print 'The greed has %s rows and %s coloumn indexes' %(rows, cols) \r\n solution_move = ''\r\n if self.get_number(0,0) == 0 and \\\r\n self.get_number(0,1) == 1:\r\n # print 'Congrads Puxxle is Aolved at start!!!!!'\r\n return ''\r\n #appropriate_number = (self._height * self._width) - 1\r\n appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'First appropriate_number=',appropriate_number\r\n # print \"Grid first tile that we will solwing has value =\", self._grid[rows][cols]\r\n \r\n while counter < 300:\r\n counter +=1\r\n # print self\r\n #appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'Appropriate number in loop=',appropriate_number\r\n # print 'We are solving %s index_row and %s index_col' %(rows, cols) \r\n ####Case when we use solve_interior_tile\r\n if rows > 1 and cols > 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n cols -= 1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solving interior tile', (rows, cols)\r\n solution_move += self.solve_interior_tile(rows, cols)\r\n # print 'Solution move=', solution_move\r\n cols -= 1\r\n #### Case when we use solve_col0_tile\r\n elif rows > 1 and cols == 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n cols = self._width-1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solwing tile 0 in row', rows\r\n # print 'Appropriate number here ='\r\n solution_move += self.solve_col0_tile(rows)\r\n # print 'Solution move=', solution_move\r\n rows -=1\r\n cols = self._width-1\r\n\r\n\r\n #### Cases when we use solve_row0_tile\r\n elif rows == 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n #cols = self._width-1\r\n appropriate_number -= self._width\r\n\r\n else:\r\n # print 'Solving upper 2 rows right side'\r\n solution_move += self.solve_row1_tile(cols)\r\n rows -=1\r\n appropriate_number -= self._width\r\n #### Cases when we use solve_row1_tile \r\n if rows < 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows += 1\r\n cols -= 1\r\n appropriate_number +=self._width-1\r\n else:\r\n # print '(1,J) tile solved, lets solwe tile (0,j) in tile',(rows,cols)\r\n # print 'Greed after move solve_row1_tile'\r\n # print self\r\n solution_move += self.solve_row0_tile(cols)\r\n rows +=1\r\n cols -=1\r\n appropriate_number +=self._width-1\r\n\r\n\r\n #### Case when we use solve_2x2\r\n elif rows <= 1 and cols <= 1:\r\n # print 'We are solving 2x2 puzzle'\r\n solution_move += self.solve_2x2()\r\n if self._grid[0][0] == 0 and \\\r\n self._grid[0][1] == 1:\r\n # print 'Congrads Puxxle is SOLVED!!!!!'\r\n break\r\n\r\n\r\n\r\n\r\n if counter > 100:\r\n # print 'COUNTER BREAK'\r\n break\r\n # print solution_move, len(solution_move)\r\n return solution_move\r\n\r\n\r\n\r\n\r\n\r\n\r\n # for row in solution_greed._grid[::-1]:\r\n # print solution_greed._grid\r\n # print 'Row =',row\r\n \r\n # if solution_greed._grid.index(row) > 1:\r\n # print \"Case when we solwing Interior and Tile0 part\"\r\n \r\n\r\n # for col in solution_greed._grid[solution_greed._grid.index(row)][::-1]:\r\n # print 'Coloumn value=', col\r\n #print row[0]\r\n # if col !=row[0]:\r\n # print 'Case when we use just Interior tile solution'\r\n # print solution_greed._grid.index(row)\r\n # print row.index(col)\r\n \r\n # solution += solution_greed.solve_interior_tile(solution_greed._grid.index(row) , row.index(col))\r\n # print 'Solution =', solution\r\n # print self \r\n # print solution_greed._grid\r\n # elif col ==row[0]:\r\n # print 'Case when we use just Col0 solution'\r\n\r\n # else:\r\n # print 'Case when we solwing first two rows'\r\n\r\n #return \"\"\r", "def solve_interior_tile(self, target_row, target_col):\n # replace with your code\n string = ''\n target = self.current_position(target_row, target_col)\n row_difference = target_row - target[0]\n #print 'Row difference', row_difference\n col_difference = target_col - target[1]\n #print 'Col difference', col_difference\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'l' * col_difference\n if row_difference == 0 and col_difference > 1:\n string += 'urrdl' * (col_difference - 1)\n if row_difference == 1:\n string += 'urrdl' * (col_difference - 1)\n string += 'dru'\n if row_difference > 1:\n string += 'drrul' * (col_difference - 1)\n string += 'dru'\n elif col_difference < 0:\n col_difference = abs(col_difference)\n string += 'r' * col_difference\n if row_difference == 1:\n string += 'ulldr' * (col_difference - 1)\n string += 'ullddru'\n if row_difference > 1:\n string += 'dllur' * (col_difference - 1)\n string += 'dlu'\n string += 'lddru' * (row_difference - 1)\n if row_difference > 0:\n string += 'ld'\n print 'Interior Path', string\n self.update_puzzle(string)\n assert self.lower_row_invariant(target_row, target_col - 1), 'False string'\n return string", "def solve_2x2(self):\r\n # replace with your code\r\n assert self.row1_invariant(1), '2x2 Dont pass row1_invariant(1)'\r\n whole_move = ''\r\n current_position = self.current_position(0, 0)\r\n # print 'Zero position =', current_position\r\n counter = 0\r\n \r\n\r\n \r\n # if current_position == (0,0):\r\n # print (0,0)\r\n # move_to_00 = 'rdlu' \r\n if current_position == (0,1):\r\n # print (0,1)\r\n move_to_00 = 'l'\r\n if current_position == (1,0):\r\n # print (1,0)\r\n move_to_00 = 'u'\r\n if current_position == (1,1):\r\n # print (1,1)\r\n move_to_00 = 'ul'\r\n whole_move += move_to_00\r\n self.update_puzzle(move_to_00)\r\n # print self\r\n # print self.get_number(1,1) < self.get_number(1,0)\r\n \r\n while self.get_number(0,0) != 0 or self.get_number(0,1) != 1:\r\n \r\n # print 'Aloha in loop!'\r\n counter +=1\r\n move = 'rdlu'\r\n whole_move += move\r\n self.update_puzzle(move)\r\n # print self\r\n if counter >5:\r\n break\r\n return whole_move", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction\n for row in self._grid:\n print row\n print", "def solve_2x2(self):\n \n assert self.get_number(1,1) == 0, \"zero tile should be at row 1, col 1\"\n assert self.row1_invariant(1), \"tiles to right and below incorrectly ordered\"\n \n # Moves the zero tile to (0,0).\n self.update_puzzle(\"lu\")\n\n # Repositions the upper left 2x2 part up to 3 times, \n # each time checking whether the puzzle is solved.\n rotation_num = 0\n if self.row0_invariant(0) == False:\n for dummy_rotation in range(3):\n while self.row0_invariant(0) == False:\n rotation_num += 1\n self.update_puzzle(\"rdlu\")\n\n assert self.row0_invariant(0), \"tiles to right and below incorrectly ordered\"\n \n move_str = \"lu\" + (\"rdlu\" * rotation_num) \n return move_str", "def solve_interior_tile(self, target_row, target_col):\r\n whole_move = ''\r\n # replace with your code\r\n if self._grid[target_row][target_col] != 0:\r\n # print \"DEBUG CASE WHEN ZERO IN JOPA\"\r\n \r\n # print self\r\n # print 'Solwing tile', self._grid[target_row][target_col]\r\n \r\n # print 'Searchind indexes of ZERO'\r\n for row in self._grid:\r\n for col in row:\r\n if col == 0:\r\n zero_row, zero_col = self._grid.index(row), row.index(col)\r\n break\r\n # print 'ZERO indexes=', (zero_row, zero_col)\r\n #####Moving zero to correct place\r\n #path_down = (target_row - zero_row) * 'd'\r\n #path_left = (zero_col - target_col) * 'l'\r\n if target_col - zero_col > 0:\r\n #path_right = (target_col - zero_col) * 'r'\r\n path_of_zero = (zero_col - target_col) * 'l' + (target_row - zero_row) * 'd' + (target_col - zero_col) * 'r'\r\n else:\r\n path_of_zero = (zero_col - target_col) * 'l' + (target_row - zero_row) * 'd'\r\n zero_col -= len(filter(lambda x: x=='l', path_of_zero))\r\n zero_col += len(filter(lambda x: x=='r', path_of_zero))\r\n zero_row += len(filter(lambda x: x=='d', path_of_zero))\r\n self.update_puzzle(path_of_zero)\r\n # print 'Grid after moving ZERO to target spot'\r\n # print self\r\n whole_move += path_of_zero\r\n assert self.lower_row_invariant(target_row, target_col), \"Invarian is False\"\r\n \r\n #current_position = self.current_position(target_row, target_col)\r\n #current_row, current_col = self.current_position(target_row, target_col)\r\n # print 'Target tile position=',current_position\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (target_row, target_col)\r\n \r\n ######Moving zero tile to the target tile\r\n path_up = (target_row - self.current_position(target_row, target_col)[0]) * 'u'\r\n zero_row = target_row - len(path_up)\r\n if target_col < self.current_position(target_row, target_col)[1]: # Right move\r\n path_side = (self.current_position(target_row, target_col)[1] - target_col) * 'r'\r\n zero_col = target_col + len(path_side)\r\n else: # Left move\r\n path_side = (target_col - self.current_position(target_row, target_col)[1]) * 'l'\r\n zero_col = target_col - len(path_side)\r\n \r\n #path_for_zero = path_up + path_side\r\n # print '------------------------------------------'\r\n # print 'Move to ZERO =', path_for_zero\r\n \r\n self.update_puzzle(path_up + path_side)\r\n \r\n # print 'Grid after move:'\r\n # print self\r\n # current_position = self.current_position(target_row, target_col) \r\n # current_row, current_col = current_position\r\n # print 'Updated Target tile position=',current_position\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, target_col)\r\n # print '-----------------------------------------'\r\n \r\n\r\n ###### New moves after moving ZERO tile into target tile\r\n # counter = 0\r\n whole_move += path_up + path_side\r\n while self.current_position(target_row, target_col) != \\\r\n (target_row, target_col) or zero_col != target_col - 1:\r\n # tt_in_home = self.current_position(target_row, target_col) == \\\r\n # (target_row, target_col)\r\n\r\n cyclic_moves = ''\r\n # counter += 1\r\n #current_position = self.current_position(target_row, target_col) \r\n #current_col = self.current_position(target_row, target_col)[1]\r\n # print 'Zero coloumn', zero_col, '== Target coloumn', target_col\r\n # print zero_col == target_col \r\n \r\n #### Case 1 if ZERO located in ther right of\r\n #### target tile (after it)\r\n if zero_col > self.current_position(target_row, target_col)[1]:\r\n # print ' Look in the up puzzle, zero on the right side'\r\n # if self.current_position(target_row, target_col)[1] != target_col:\r\n # # print 'not under target place'\r\n # cyclic_moves = 'dllur'\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n if self.current_position(target_row, target_col)[1] == target_col and self._grid[zero_row+1][zero_col] < \\\r\n self._grid[self.current_position(target_row, target_col)[0]][self.current_position(target_row, target_col)[1]]:\r\n # print 'Tile tat is under ZERO is',self._grid[zero_row+1][zero_col] \r\n # print 'TT under target place'\r\n cyclic_moves = 'dlu'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n #zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # elif self._grid[zero_row+1][zero_col] > \\\r\n # self._grid[self.current_position(target_row, target_col)[0]][self.current_position(target_row, target_col)[1]]:\r\n # # print 'Tile under zero is illegal to move and we use upper cycle move '\r\n \r\n # cyclic_moves = 'ul'\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n #### Case 2 if ZERO located under the target tile, and both\r\n #### of them located under the target position of the target tile\r\n elif zero_col == self.current_position(target_row, target_col)[1] and zero_col == target_col:\r\n # print 'Both under the target place'\r\n # print 'TT in home=', tt_in_home\r\n if self.current_position(target_row, target_col) == \\\r\n (target_row, target_col):\r\n cyclic_moves = 'ld'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n #zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n \r\n else:\r\n cyclic_moves = 'lddru'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n \r\n #### Case 3 if ZERO located in the left side of the target tile\r\n ### like in the owel-test case\r\n elif zero_col < self.current_position(target_row, target_col)[1]:\r\n # print 'ZERO tile located in the left side'\r\n if self.current_position(target_row, target_col)[1] != target_col:\r\n # print 'not under the target place'\r\n cyclic_moves = 'drrul'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif self.current_position(target_row, target_col)[1] == target_col:\r\n # print 'Target tile under target place'\r\n cyclic_moves = 'dru'\r\n #zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loot counter =',counter\r\n whole_move += cyclic_moves\r\n # if counter > 12:\r\n # break\r\n # print 'Tile is solved with move '+ whole_move\r\n assert self.lower_row_invariant(target_row, target_col-1), \"Invarian is False\"\r\n return whole_move", "def solve(self, board: List[List[str]]) -> None:\n n = len(board)\n if n == 0:\n return\n m = len(board[0])\n for i in range(m):\n self.traverse(board, 0, i, n, m)\n self.traverse(board, n - 1, i, n, m)\n for i in range(n):\n self.traverse(board, i, 0, n, m)\n self.traverse(board, i, m - 1, n, m)\n for i in range(n):\n for j in range(m):\n board[i][j] = 'X' if board[i][j] == 'O' else board[i][j]\n for i in range(n):\n for j in range(m):\n board[i][j] = 'O' if board[i][j] == '_' else board[i][j]", "def solve_2x2(self):\r\n assert self.row1_invariant(1)\r\n pos_1_0 = self.get_number(1, 0)\r\n pos_0_0 = self.get_number(0, 0)\r\n pos_0_1 = self.get_number(0, 1)\r\n # create grid and solve individual cases\r\n grid = [pos_1_0, pos_0_0, pos_0_1]\r\n if grid == [self.get_width(), 1, self.get_width() + 1]:\r\n move = \"ul\"\r\n elif grid == [1, self.get_width() + 1, self.get_width()]:\r\n move = \"lurdlu\"\r\n elif grid == [self.get_width() + 1, self.get_width(), 1]:\r\n move = \"lu\"\r\n self.update_puzzle(move)\r\n return move", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][\n zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][\n zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def solve(self, board) -> None:\n if board == [[]] or board == []:\n return\n\n r, c = len(board), len(board[0])\n\n from collections import deque\n queue = deque()\n for i in range(r):\n for j in range(c):\n if (i == 0 or i == r - 1 or j == 0 or j == c - 1) and board[i][j] == 'O':\n queue.append([i, j])\n board[i][j] = 'M'\n\n while queue:\n i, j = queue.popleft()\n for x, y in ((i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1)):\n if 0 <= x <= r - 1 and 0 <= y <= c - 1 and board[x][y] == 'O':\n board[x][y] = 'M'\n queue.append([x, y])\n\n for i in range(r):\n for j in range(c):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n elif board[i][j] == 'M':\n board[i][j] = 'O'", "def solve_2x2(self):\r\n moves_str = \"\"\r\n # move zero to the most upper left\r\n zero_row, zero_col = self.current_position(0, 0)\r\n ups = zero_row - 0\r\n lefts = zero_col - 0\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n for dummy_l in range(lefts):\r\n moves_str += \"l\"\r\n \r\n # get zero, one, two, three positions\r\n self.update_puzzle(moves_str)\r\n zero_row, zero_col = self.current_position(0, 0)\r\n one_row, one_col = self.current_position(0, 1)\r\n two_row, two_col = self.current_position(1, 0)\r\n three_row, three_col = self.current_position(1, 1)\r\n counter = 0\r\n while counter <= 3 and \\\r\n (zero_row != 0 or zero_col != 0 or \\\r\n one_row!= 0 or one_col != 1 or \\\r\n two_row != 1 or two_col != 0 or \\\r\n three_row != 1 or three_col != 1):\r\n move = CIRCLES[\"RIGHT_CIRCLES\"]\r\n moves_str += move\r\n self.update_puzzle(move)\r\n counter += 1\r\n zero_row, zero_col = self.current_position(0, 0)\r\n one_row, one_col = self.current_position(0, 1)\r\n two_row, two_col = self.current_position(1, 0)\r\n three_row, three_col = self.current_position(1, 1)\r\n \r\n print \"solve_2x2\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def human_go(self, board):\r\n coord_pattern = re.compile(\"[0-{}]$\".format(board.shape[1]))\r\n print(\"Enter Column and press enter.\")\r\n input_str = input(\"(from 0-6)\\n\")\r\n if not coord_pattern.match(input_str):\r\n print(\"That is not in the right format, please try again...\")\r\n return self.human_go(board)\r\n else:\r\n col = int(input_str)\r\n if board[0][col] != 0:\r\n print(\"That column is already full, please try again\")\r\n self.human_go()\r\n else:\r\n for row in board[::-1]:\r\n if row[col] == 0:\r\n row[col] = -1\r\n return board", "def make_move(self, column):\r\n trans_board = numpy.transpose(self.__board[::1]) # transpose the\r\n # board so that columns are now arrays\r\n if 0 not in trans_board[column] or self.get_winner() or column >= \\\r\n self.BOARD_COLUMNS or column < 0:\r\n # column is full, illegal or the game is already finished\r\n return self.ILLEGAL_MOVE # exception?\r\n else:\r\n reversed_col = list(reversed(trans_board[column]))\r\n for hole in reversed_col:\r\n if hole == 0:\r\n row_i = self.BOARD_ROWS - 1 - reversed_col.index(hole)\r\n self.__board[row_i][column] = self.__cur_player\r\n winner = self.get_winner()\r\n if winner: # is not none\r\n return winner\r\n self.__switch_player()", "def solve(self, board: List[List[str]]) -> None:\n if board == [] or board == [[]]: # corner case\n return\n\n r, c = len(board), len(board[0])\n\n def dfs(i, j): # visited i, j neighbors and change o to M\n if i < 0 or i > r - 1 or j < 0 or j > c - 1 or board[i][j] == 'X' or board[i][j] == 'M':\n return\n\n board[i][j] = 'M'\n dfs(i - 1, j)\n dfs(i + 1, j)\n dfs(i, j - 1)\n dfs(i, j + 1)\n\n for i in range(r):\n for j in range(c):\n if (i == 0 or i == r - 1 or j == 0 or j == c - 1) and board[i][j] == 'O':\n dfs(i, j)\n\n for i in range(r):\n for j in range(c):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n elif board[i][j] == 'M':\n board[i][j] = 'O'", "def solve(self, board: List[List[str]]) -> None:\n rows = len(board)\n cols = len(board[0])\n\n def explore(i, j):\n if i < 0 or i >= rows:\n return\n if j < 0 or j >= cols:\n return\n if board[i][j] != \"O\":\n return\n board[i][j] = \"Y\"\n explore(i + 1, j)\n explore(i - 1, j)\n explore(i, j + 1)\n explore(i, j - 1)\n\n for i in [0, rows - 1]:\n for j in range(cols):\n explore(i, j)\n\n for j in [0, cols - 1]:\n for i in range(rows):\n explore(i, j)\n\n for i in range(rows):\n for j in range(cols):\n if board[i][j] == \"Y\":\n board[i][j] = \"O\"\n elif board[i][j] == \"O\":\n board[i][j] = \"X\"", "def solve(self, board: List[List[str]]) -> None:\n if board is None or len(board) == 0:\n return \n row, col = len(board), len(board[0])\n for i in range(row):\n self.dfs(board, i, 0)\n self.dfs(board, i, col - 1)\n for j in range(col):\n self.dfs(board, 0, j)\n self.dfs(board, row-1, j)\n for i in range(row):\n for j in range(col):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n if board[i][j] == '-':\n board[i][j] = 'O'", "def solve(self, board: List[List[str]]) -> None:\n if not board:\n return\n m, n = len(board), len(board[0])\n\n def dfs(x, y):\n if not 0 <= x < m or not 0 <= y < n or board[x][y] != \"O\":\n return\n direction = [[1, 0], [0, 1], [-1, 0], [0, -1]]\n board[x][y] = \"Y\"\n for d in direction:\n dfs(x+d[0], y+d[1])\n\n for i in range(m):\n dfs(i, 0)\n dfs(i, n-1)\n for i in range(1, n-1):\n dfs(0, i)\n dfs(m-1, i)\n\n for i in range(m):\n for j in range(n):\n if board[i][j] == \"O\":\n board[i][j] = \"X\"\n elif board[i][j] == \"Y\":\n board[i][j] = \"O\"", "def solve(self, board: List[List[str]]) -> None:", "def move(self, row: int, col: int, player: int):\n def addup(dict_name, invalid_set, another_invalid, locx, locy):\n if locx == locy:\n diag_name = (1,1)\n if diag_name not in invalid_set:\n dict_name[diag_name] += 1\n if dict_name[diag_name] == self.tar:\n return player\n another_invalid.add(diag_name)\n if locx == self.tar-1-locy:\n diag_name = (-1, -1)\n if diag_name not in invalid_set:\n dict_name[diag_name] += 1\n if dict_name[diag_name] == self.tar:\n return player\n another_invalid.add(diag_name)\n curcol = (locy, None)\n currow = (None, locx)\n if curcol not in invalid_set:\n dict_name[curcol] += 1\n if dict_name[curcol] == self.tar:\n return player\n another_invalid.add(curcol)\n if currow not in invalid_set:\n dict_name[currow] += 1\n if dict_name[currow] == self.tar:\n return player\n another_invalid.add(currow)\n return 0\n res = 0\n if (row, col) not in self.walked:\n if player == 1:\n res = addup(self.p1, self.invalid_1, self.invalid_2, row, col)\n if player == 2:\n res = addup(self.p2, self.invalid_2, self.invalid_1, row, col)\n self.walked.add((row, col))\n return res", "def solve(self, board: List[List[str]]) -> None:\r\n if not board:\r\n return\r\n m, n = len(board), len(board[0])\r\n board_filter = lambda p: 0 <= p[0] < m and 0 <= p[1] < n and board[p[0]][p[1]] == 'O'\r\n queue = list(filter(board_filter, [(x, y) for r in range(max(m, n)) \r\n for x, y in ((r, 0), (r, n-1), (0, r), (m-1, r))]))\r\n while queue:\r\n x, y = queue.pop()\r\n board[x][y] = 'W'\r\n queue.extend(list(filter(board_filter, ((x-1, y), (x+1, y), (x, y-1), (x, y+1)))))\r\n for row in board:\r\n for i, c in enumerate(row):\r\n row[i] = 'XO'[c=='W']", "def position_tile(self, target_row, target_col, current_row, current_col):\r\n moves_str = \"\"\r\n # current target is on the upper of 0\r\n if current_col == target_col and current_row < target_row:\r\n ups = target_row - current_row\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n for dummy_cycle in range(ups - 1):\r\n moves_str += CIRCLES[\"LEFT_CIRCLE\"]\r\n moves_str += \"ld\"\r\n # current target is on the left of 0\r\n elif current_row == target_row and current_col < target_col:\r\n lefts = target_col - current_col\r\n for dummy_l in range(lefts):\r\n moves_str += \"l\"\r\n for dummy_cycle in range(lefts - 1):\r\n moves_str += CIRCLES[\"UP_CIRCLE\"]\r\n # current target is on the upperleft of 0\r\n elif current_row < target_row and current_col < target_col:\r\n ups = target_row - current_row\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n lefts = target_col - current_col\r\n for dummy_l in range(lefts):\r\n moves_str += \"l\"\r\n for dummy_cycle in range(lefts - 1):\r\n if current_row <= 0: # can not go up\r\n moves_str += CIRCLES[\"DOWN_CIRCLE\"]\r\n else:\r\n moves_str += CIRCLES[\"UP_CIRCLE\"]\r\n moves_str += \"dru\"\r\n for dummy_cycle in range(ups - 1):\r\n moves_str += CIRCLES[\"LEFT_CIRCLE\"]\r\n moves_str += \"ld\"\r\n # current target is on the upperright of 0\r\n elif current_col > target_col and current_row < target_row:\r\n ups = target_row - current_row\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n rights = current_col - target_col\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n for dummy_cycle in range(rights - 1):\r\n if current_row <= 0: # can not go up\r\n moves_str += CIRCLES[\"DOWN_LEFT_CIRCLE\"]\r\n else:\r\n moves_str += CIRCLES[\"UP_LEFT_CIRCLE\"] \r\n moves_str += \"dlu\"\r\n for dummy_cycle in range(ups - 1):\r\n moves_str += CIRCLES[\"LEFT_CIRCLE\"]\r\n moves_str += \"ld\"\r\n # current target is on the right of 0\r\n elif current_col > target_col and current_row == target_row:\r\n rights = current_col - target_col\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n for dummy_cycle in range(rights - 1):\r\n if current_row <= 0: # can not go up\r\n moves_str += CIRCLES[\"DOWN_LEFT_CIRCLE\"]\r\n else:\r\n moves_str += CIRCLES[\"UP_LEFT_CIRCLE\"] \r\n moves_str += \"ulld\"\r\n return moves_str", "def move_to_target(self, target_row, target_col, row, col):\r\n move = \"\"\r\n # typical move to move target tile to target pos.\r\n solver_move = \"druld\"\r\n # move up first\r\n move = (target_row - row) * \"u\"\r\n # conditional statements for moving the tile:\r\n # 1. case curr_pos of tile and target_tile are in same col\r\n if (target_col - col) == 0:\r\n move += \"ld\" + ((target_row - row) - 1) * solver_move\r\n else:\r\n # 2. curr_pos of tile is on the left of target pos\r\n if (target_col - col) > 0:\r\n move += (target_col - col) * \"l\"\r\n if row == 0:\r\n move += (abs(target_col - col) - 1) * \"drrul\"\r\n else:\r\n move += (abs(target_col - col) - 1) * \"urrdl\"\r\n # 3. curr_pos of tile is on the right of target pos:\r\n elif (target_col - col) < 0:\r\n move += (abs(target_col - col) - 1) * \"r\"\r\n if row == 0:\r\n move += abs(target_col - col) * \"rdllu\"\r\n else:\r\n move += abs(target_col - col) * \"rulld\"\r\n move += (target_row - row) * solver_move\r\n return move", "def solve(self, board: List[List[str]]) -> None:\n if len(board) is 0: return\n width = len(board[0]) - 1\n height = len(board) - 1\n def help(x: int, y: int):\n if x < 0 or x > height or y < 0 or y > width:\n return\n if board[x][y] is \"O\":\n board[x][y] = \"g\"\n help(x - 1, y)\n help(x, y - 1)\n help(x + 1, y)\n help(x, y + 1)\n\n for i in range(width + 1):\n if board[0][i] is \"O\":\n help(0, i)\n if board[height][i] is \"O\":\n help(height, i)\n for i in range(1, height):\n if board[i][0] is \"O\":\n help(i, 0)\n if board[i][width] is \"O\":\n help(i, width)\n\n print(board)\n for i in range(width + 1):\n for j in range(height + 1):\n if board[j][i] is \"O\":\n board[j][i] = \"X\"\n elif board[j][i] is \"g\":\n board[j][i] = \"O\"", "def make_move(board, picked_column, player):\n row = find_first_free_cell(board, picked_column)\n board[row][picked_column] = player\n return board, row", "def move(self, row, col, player):\r\n if player == 1:\r\n self.mat[row][col] = 1\r\n else:\r\n self.mat[row][col] = -1\r\n if self.checkrow(player,row) or self.checkcol(player,col):\r\n return player\r\n if row == col or row + col == self.size-1:\r\n if self.checkdiag(player):\r\n return player\r\n return 0", "def execute(self, row, col, action=None):\n assert action is not None, \"No action selected!\"\n\n if action == 'north':\n if (row-1) < 0 or self.board[row-1, col] == '*':\n return row, col\n elif action == 'south':\n if (row+1) >= self.N or self.board[row+1, col] == '*':\n return row, col\n elif action == 'east':\n if (col+1) >= self.M or self.board[row, col+1] == '*':\n return row, col\n elif action == 'west':\n if (col-1) < 0 or self.board[row, col-1] == '*':\n return row, col\n\n return row + self.step_row[action], col + self.step_col[action]", "def solve(self, board):\n def dfs(board, r, c):\n if r < 0 or c < 0 or r > rows - 1 or c > cols - 1 or board[r][c] == 'X' or board[r][c] == '#':\n return\n board[r][c] = '#'\n dfs(board, r - 1, c)\n dfs(board, r + 1, c)\n dfs(board, r, c - 1)\n dfs(board, r, c + 1)\n\n if len(board) == 0:\n return;\n rows = len(board)\n cols = len(board[0])\n for i in range(rows):\n for j in range(cols):\n if (i == 0 or j == 0 or i == rows - 1 or j == cols - 1) and board[i][j] == 'O':\n dfs(board, i, j)\n for i in range(rows):\n for j in range(cols):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n elif board[i][j] == '#':\n board[i][j] = 'O'", "def find_best_move(board):\n new_board = board.get_board()\n\n # X | X | X <-- Check for win on this row\n # ---------\n # 3 | 4 | 5\n # ---------\n # 6 | 7 | 9\n if new_board[0] == new_board[1] and new_board[2] == \"2\":\n return 2\n elif new_board[0] == new_board[2] and new_board[1] == \"1\":\n return 1\n elif new_board[1] == new_board[2] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | 2\n # ---------\n # X | X | X <-- Check for win on this row\n # ---------\n # 6 | 7 | 9\n elif new_board[3] == new_board[4] and new_board[5] == \"5\":\n return 5\n elif new_board[3] == new_board[5] and new_board[4] == \"4\":\n return 4\n elif new_board[4] == new_board[5] and new_board[3] == \"3\":\n return 3\n\n # 0 | 1 | 2\n # ---------\n # 3 | 4 | 5\n # ---------\n # X | X | X <-- Check for win on this row\n elif new_board[6] == new_board[7] and new_board[8] == \"8\":\n return 8\n elif new_board[6] == new_board[8] and new_board[7] == \"7\":\n return 7\n elif new_board[7] == new_board[8] and new_board[6] == \"6\":\n return 6\n\n # X | 1 | 2 Check for win on column one\n # ---------\n # X | 4 | 5\n # ---------\n # X | 7 | 9\n elif new_board[0] == new_board[3] and new_board[6] == \"6\":\n return 6\n elif new_board[0] == new_board[6] and new_board[3] == \"3\":\n return 3\n elif new_board[6] == new_board[3] and new_board[0] == \"0\":\n return 0\n\n # 0 | X | 2 Checks for win on column two\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | X | 9\n elif new_board[1] == new_board[4] and new_board[7] == \"7\":\n return 7\n elif new_board[1] == new_board[7] and new_board[4] == \"4\":\n return 4\n elif new_board[7] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | 4 | X\n # ---------\n # 6 | 7 | X\n elif new_board[2] == new_board[5] and new_board[8] == \"8\":\n return 8\n elif new_board[2] == new_board[8] and new_board[5] == \"5\":\n return 5\n elif new_board[8] == new_board[5] and new_board[2] == \"2\":\n return 2\n\n # X | 1 | 2\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | 7 | X\n elif new_board[0] == new_board[4] and new_board[8] == \"8\":\n return 8\n elif new_board[0] == new_board[8] and new_board[4] == \"4\":\n return 4\n elif new_board[8] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | X | 5\n # ---------\n # X | 7 | 9\n elif new_board[2] == new_board[4] and new_board[6] == \"6\":\n return 6\n elif new_board[2] == new_board[6] and new_board[4] == \"4\":\n return 4\n elif new_board[6] == new_board[4] and new_board[2] == \"2\":\n return 2\n\n # If corners are empty, play there\n elif new_board[0] == \"0\" or new_board[2] == \"2\" or new_board[6] == \"6\" or new_board[8] == \"8\":\n try_spot = 0\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2\n\n # If middle is empty, play there\n elif new_board[4] == \"4\":\n return 4\n\n # Finally if edges are empty try there\n elif new_board[1] == \"1\" or new_board[3] == \"3\" or new_board[5] == \"5\" or new_board[7] == \"7\":\n try_spot = 1\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2", "def move(self, row: int, col: int, player: int) -> int:\n if player == 1:\n self.newList[row][col] = 1\n self.colSum[col] += 1\n self.rowSum[row] += 1\n if row == col:\n self.diag += 1\n if row + col == (self.n - 1):\n self.revDiag += 1\n if self.rowSum[row] == self.n or self.colSum[col] == self.n or self.diag == self.n or self.revDiag == self.n:\n return 1\n if player == 2:\n self.newList[row][col] = -1\n self.colSum[col] -= 1\n self.rowSum[row] -= 1\n if row == col:\n self.diag -= 1\n if row + col == (self.n - 1):\n self.revDiag -= 1\n if self.rowSum[row] == -self.n or self.colSum[col] == -self.n or self.diag == -self.n or self.revDiag == -self.n:\n return 2\n \n return 0", "def solve(self, board):\r\n if not board or not board[0]:\r\n return\r\n \r\n self.m = len(board)\r\n self.n = len(board[0])\r\n boarder = []\r\n \r\n # Collecting all the 'O' on the boarder\r\n for i in range(self.m):\r\n if board[i][0] == 'O':\r\n boarder.append([i, 0])\r\n if board[i][self.n-1] == 'O':\r\n boarder.append([i, self.n-1])\r\n for j in range(self.n):\r\n if board[0][j] == 'O':\r\n boarder.append([0, j])\r\n if board[self.m-1][j] == 'O':\r\n boarder.append([self.m-1, j])\r\n \r\n for row, col in boarder:\r\n self.BFS(board, row, col)\r\n \r\n for row in range(self.m):\r\n for col in range(self.n):\r\n if board[row][col] == 'O':\r\n board[row][col] = 'X'\r\n elif board[row][col] == 'E':\r\n board[row][col] = 'O'\r\n print(board)", "def move(self, row: int, col: int, player: int) -> int:\n self.board[row][col] = 1 if player == 1 else -1\n rowsum = sum(self.board[row])\n colsum = sum([self.board[r][col] for r in range(self.n)])\n diagsum1 = sum([self.board[i][i] for i in range(self.n)])\n diagsum2 = sum([self.board[i][-i-1] for i in range(self.n)])\n if player == 1:\n if rowsum == self.n or colsum == self.n or diagsum1 == self.n or diagsum2 == self.n:\n return 1\n else:\n if rowsum == -self.n or colsum == -self.n or diagsum1 == -self.n or diagsum2 == -self.n:\n return 2\n return 0", "def make_move(board: Connect4Board) -> \"(row, col)\":\r\n\r\n while True:\r\n\r\n try:\r\n\r\n print('\\nPlease Specify your move. Enter the number column of a cell on the board.')\r\n print('-'*85)\r\n \r\n col = Connect4GameUI.move_col(board)\r\n row = Connect4GameUI._get_valid_row(board, col)\r\n print(row,col)\r\n return row, col\r\n\r\n break\r\n\r\n except:\r\n print('\\nInvalid move!!!')\r\n print('Please try it again.')", "def move(self, row: int, col: int, player: int) -> int:\n n = self.n\n if player == 1:\n self.rows_1[row] += 1\n self.cols_1[col] += 1\n if player == 2:\n self.rows_2[row] += 1\n self.cols_2[col] += 1\n if row == col:\n self.diag1[row] = player\n if row + col + 1 == n:\n self.diag2[row] = player\n f = 0\n g = 0\n for i in range(n):\n if self.rows_1[row] == n or self.cols_1[col] == n:\n return 1\n if self.rows_2[row] == n or self.cols_2[col] == n:\n return 2 \n if self.diag1[i] != self.diag1[0]:\n f = 1\n if self.diag2[i] != self.diag2[0]:\n g = 1\n if f == 0:\n return self.diag1[0]\n if g == 0:\n return self.diag2[0]\n return 0", "def solve(self, board: List[List[str]]) -> None:\n def dfs(board, i, j):\n m = len(board)\n n = len(board[0])\n if i < 0 or i >= m or j < 0 or j >= n: return\n\n if board[i][j] != 'O': return\n\n board[i][j] = '#'\n [dfs(board, i+di, j+dj) for di, dj in [(0, 1), (1, 0), (0, -1), (-1, 0)]]\n\n if len(board) == 0: return\n m = len(board)\n n = len(board[0])\n\n for i in range(m):\n dfs(board, i, 0)\n dfs(board, i, n-1)\n\n for j in range(n):\n dfs(board, 0, j)\n dfs(board, m-1, j)\n\n for i in range(m):\n for j in range(n):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n\n for i in range(m):\n for j in range(n):\n if board[i][j] == '#':\n board[i][j] = 'O'", "def solve(self, board: List[List[str]]) -> None:\n if len(board) <= 2 or len(board[0]) <= 2:\n return\n\n self.nRow, self.nCol = len(board), len(board[0])\n\n def helper(kr, kc):\n board[kr][kc] = '.'\n kr > 0 and board[kr - 1][kc] == 'O' and helper(kr - 1, kc)\n kr < self.nRow - 1 and board[kr + 1][kc] == 'O' and helper(kr + 1, kc)\n kc > 0 and board[kr][kc - 1] == 'O' and helper(kr, kc - 1)\n kc < self.nCol - 1 and board[kr][kc + 1] == 'O' and helper(kr, kc + 1)\n\n for kr in [0, self.nRow - 1]:\n for kc in range(self.nCol):\n if board[kr][kc] == 'O':\n helper(kr, kc)\n for kc in [0, self.nCol - 1]:\n for kr in range(self.nRow):\n if board[kr][kc] == 'O':\n helper(kr, kc)\n\n for kr in range(self.nRow):\n for kc in range(self.nCol):\n if board[kr][kc] == 'O':\n board[kr][kc] = 'X'\n elif board[kr][kc] == '.':\n board[kr][kc] = 'O'\n\n return", "def solve(self, board: List[List[str]]) -> None:\n if len(board) == 0:\n return\n self.h = len(board)\n self.w = len(board[0])\n self.board = board\n for i in range(self.h):\n for j in range(self.w):\n if i == 0 or i == self.h-1 or j == 0 or j == self.w-1:\n #print (i,j)\n self.dfs((i,j))\n for i in range(self.h):\n for j in range(self.w):\n if self.board[i][j]==\"O\":\n self.board[i][j]=\"X\"\n for i in range(self.h):\n for j in range(self.w):\n if self.board[i][j]==\"#\":\n self.board[i][j]=\"O\"", "def move(self, row, col, player):\n offset = player * 2 - 3 # 1 or -1\n self.row[row] += offset\n self.col[col] += offset\n if row == col:\n self.diag += offset\n if row + col == self.n - 1:\n self.anti_diag += offset\n if self.n in [self.row[row], self.col[col], self.diag, self.anti_diag]:\n return 2\n if -self.n in [self.row[row], self.col[col], self.diag, self.anti_diag]:\n return 1\n return 0", "def solve(self, board: List[List[str]]) -> None:\n if not board:\n return\n x=len(board)\n y=len(board[0])\n visit=[[False if board[i][j]=='X' else True for j in range(y)] for i in range(x)]\n for i in range(x):\n for j in range(y):\n if visit[i][j] and board[i][j]=='O':\n queue=[[i,j]]\n visit[i][j]=False\n k=0\n surround=True\n while len(queue)>k:\n if queue[k][0]==0 or queue[k][0]==x-1 or queue[k][1]==y-1 or queue[k][1]==0:\n surround=False\n if queue[k][1]!=y-1 and visit[queue[k][0]][queue[k][1]+1]:\n queue.append([queue[k][0],queue[k][1]+1])\n visit[queue[k][0]][queue[k][1]+1]=False\n if queue[k][1]!=0 and visit[queue[k][0]][queue[k][1]-1]:\n queue.append([queue[k][0],queue[k][1]-1])\n visit[queue[k][0]][queue[k][1]-1]=False\n if queue[k][0]!=x-1 and visit[queue[k][0]+1][queue[k][1]]:\n queue.append([queue[k][0]+1,queue[k][1]])\n visit[queue[k][0]+1][queue[k][1]]=False\n if queue[k][0]!=0 and visit[queue[k][0]-1][queue[k][1]]:\n queue.append([queue[k][0]-1,queue[k][1]])\n visit[queue[k][0]-1][queue[k][1]]=False\n k+=1\n if surround:\n for i1,j1 in queue:\n board[i1][j1]='X'\n return", "def solve_util(self, board, col):\n try:\n if col == self.N:\n self.print_sol(board)\n return True\n\n # Trying to place this queen in all rows one by one\n res = False\n for i in range(self.N):\n if self.is_safe(board, i, col):\n board[i][col] = 1\n res = self.solve_util(board, col + 1) or res\n if type(res) == dict:\n return res\n board[i][col] = 0 # Backtracking...\n\n # if queen cannot be placed in any row in this col, then alas\n # we return false..\n return res\n except KeyboardInterrupt:\n print('Keyboard Interrupted!')\n return self.Outputs", "def position_tile(self, target_row, target_col, cur_row, cur_col, need_ld=True):\n move_str = ''\n if cur_row == target_row:\n if cur_col < target_col:\n move_str += 'l' * (target_col - cur_col)\n if target_col - cur_col > 1:\n move_str += 'ur'\n move_str += 'druldru' * (target_col - cur_col - 1)\n else:\n move_str += 'ur' if not need_ld else ''\n need_ld = False\n else:\n move_str += 'r' * (cur_col - target_col)\n if cur_col - target_col > 1:\n move_str += 'ul'\n move_str += 'dlurdlu' * (cur_col - target_col - 1)\n else:\n need_ld = False\n else:\n move_str += 'u' * (target_row - cur_row)\n if cur_col < target_col:\n move_str += ('l' * (target_col - cur_col) + 'dru')\n move_str += 'druldru' * (target_col - cur_col - 1)\n move_str += 'lddru' * (target_row - cur_row - 1)\n elif cur_col > target_col:\n move_str += ('r' * (cur_col - target_col) + 'dlu')\n move_str += 'dlurdlu' * (cur_col - target_col - 1)\n move_str += 'lddru' * (target_row - cur_row - 1)\n else:\n move_str += 'lddru' * (target_row - cur_row - 1)\n if need_ld:\n move_str += 'ld'\n return move_str", "def update(self, move):\n\n if not 0 <= move < 7:\n raise InvalidMove\n\n placed = False\n x = None\n y = None\n\n for row in reversed(xrange(self._rows)):\n if not self._board[row][move]:\n self._board[row][move] = self.current_player\n placed = True\n x = move\n y = row\n break\n\n if not placed:\n raise InvalidMove\n\n return (x, y)", "def make_move(self, row, column):\n\t\tif self.board[int(row)][int(column)] == '-':\n\t\t\tself.board[int(row)][int(column)] = self.marker\n\t\telse:\n\t\t\tprint(\"That spot is occupied, you messed up, you lose your turn for doing bad things\")", "def solve(self, board: 'List[List[str]]') -> 'None':\n if not board:\n return\n m, n = len(board), len(board[0])\n save = [ij for k in range(m + n) for ij in ((0, k), (m - 1, k), (k, 0), (k, n - 1))]\n while save:\n i, j = save.pop()\n if -1 < i < m and -1 < j < n and board[i][j] == 'O':\n board[i][j] = 'S'\n save += (i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1)\n for row in board:\n for i, c in enumerate(row):\n row[i] = 'XO'[c == 'S']", "def solve(self, board: List[List[str]]) -> None:\n if len(board) <= 2 or len(board[0]) <= 2:\n return\n\n nRow, nCol = len(board), len(board[0])\n\n def helper(kr, kc):\n board[kr][kc] = '.'\n kr > 0 and board[kr - 1][kc] == 'O' and helper(kr - 1, kc)\n kr < nRow - 1 and board[kr + 1][kc] == 'O' and helper(kr + 1, kc)\n kc > 0 and board[kr][kc - 1] == 'O' and helper(kr, kc - 1)\n kc < nCol - 1 and board[kr][kc + 1] == 'O' and helper(kr, kc + 1)\n\n for kr in [0, nRow - 1]:\n for kc in range(nCol):\n if board[kr][kc] == 'O':\n helper(kr, kc)\n for kc in [0, nCol - 1]:\n for kr in range(nRow):\n if board[kr][kc] == 'O':\n helper(kr, kc)\n\n for kr in range(nRow):\n for kc in range(nCol):\n if board[kr][kc] == 'O':\n board[kr][kc] = 'X'\n elif board[kr][kc] == '.':\n board[kr][kc] = 'O'\n\n return", "def solve(self, board: List[List[str]]) -> None:\n if len(board) == 0: return\n\n m = len(board)\n n = len(board[0])\n\n uf = UnionFind(m * n + 1)\n dummy = m * n\n\n # connect 'O' at first and last col with dummy\n for i in range(m):\n if board[i][0] == 'O':\n uf.union(dummy, i * n)\n if board[i][-1] == 'O':\n uf.union(dummy, i * n + n - 1)\n\n # connect 'O' at first and last row with dummy\n for j in range(n):\n if board[0][j] == 'O':\n uf.union(dummy, j)\n if board[-1][j] == 'O':\n uf.union(dummy, n * (m-1) + j)\n\n d = [(1, 0), (0, 1), (0, -1), (-1, 0)]\n\n for i in range(1, m-1):\n for j in range(1, n-1):\n if board[i][j] == 'O':\n for di, dj in d:\n x = i+di\n y = j+dj\n if board[x][y] == 'O':\n uf.union(x*n+y, i*n+j)\n\n # change not connected 'O' with 'X'\n for i in range(1, m-1):\n for j in range(1, n-1):\n if not uf.connected(dummy, i * n + j):\n board[i][j] = 'X'", "def result(self, row, col, move):\n start = (row, col)\n end = self.updateCell(row, col, move)\n\n return self.change(start, end)", "def move_x(self, row, column):\n\n #returns false if game has already been won\n if self._game_state != \"UNFINISHED\":\n return False\n\n # checks if x tries to move out of bounds\n if row not in range(8) or column not in range(8):\n return False\n\n # returns false/invalid move if x tries to move more than one row at a time or\n # non diagonal\n if (row - self._current_x_row) > 1 or (column - self._current_x_column) > 1 or (\n self._current_x_row - row) > 1 or (self._current_x_column - column) > 1:\n return False\n\n if self._current_x_column == column:\n return False\n\n if self._current_x_row == row:\n return False\n\n if \"o\" in self._board[row][column]:\n return False\n\n #places x in the specified row and column if the move is legal\n else:\n self._board[self._current_x_row].remove(\"x\")\n self._board[self._current_x_row].append(\"\")\n self._board[row][column] = \"x\"\n self._current_x_row = row\n self._current_x_column = column\n self._current_row += 1\n self._lower_right = (self._current_x_row + 1, self._current_x_column + 1)\n self._lower_left = (self._current_x_row + 1, self._current_x_column - 1)\n self._upper_right = (self._current_x_row - 1, self._current_x_column + 1)\n self._upper_left = (self._current_x_row - 1, self._current_x_column - 1)\n self._row1 = (\n self._board[0][0],\n self._board[1][0],\n self._board[2][0],\n self._board[3][0],\n self._board[4][0],\n self._board[5][0],\n self._board[6][0],\n self._board[7][0])\n\n self._row7 = (\n self._board[0][7],\n self._board[1][7],\n self._board[2][7],\n self._board[3][7],\n self._board[4][7],\n self._board[5][7],\n self._board[6][7],\n self._board[7][7])\n\n\n # checks if four \"o\" pieces surrounds x, if so, then x has no more moves and o wins\n if \"x\" not in self._board[7]:\n if \"o\" in self._board[self._lower_right[0]][self._lower_right[1]] and \"o\" in \\\n self._board[self._lower_left[0]][\n self._lower_left[1]] and \"o\" in self._board[self._upper_right[0]][\n self._upper_right[1]] and \"o\" in \\\n self._board[self._upper_left[0]][self._upper_left[1]]:\n self._game_state = \"O_WON\"\n\n # checks if x is in the last column and o pieces surrounds x, x loses\n if \"x\" in self._row7 and \"o\" in self._board[self._lower_left[0]][self._lower_left[1]] and \"o\" in \\\n self._board[self._upper_left[0]][self._upper_left[1]]:\n self._game_state = \"O_WON\"\n\n # checks if x is in the first row and o surrounds x, x loses\n if \"x\" in self._board[0] and \"o\" in self._board[self._lower_right[0]][self._lower_right[1]] and \"o\" in \\\n self._board[self._lower_left[0]][self._lower_left[1]]:\n self._game_state = \"O_WON\"\n\n # checks if x is in the first column and o pieces surrounds x, x loses\n if \"x\" in self._row1 and \"o\" in self._board[self._lower_right[0]][self._lower_right[1]] and \"o\" in \\\n self._board[self._upper_right[0]][self._upper_right[1]]:\n self._game_state = \"O_WON\"\n\n # winning condition for \"x\" piece upon reaching last row\n if \"x\" in self._board[7]:\n self._game_state = \"X_WON\"\n\n return True", "def move(self, row: int, col: int, player: int) -> int:\n s = -1 if player == 1 else 1\n\n self.rows[row] += s\n if abs(self.rows[row]) == self.n:\n return player\n\n self.cols[col] += s\n if abs(self.cols[col]) == self.n:\n return player\n\n if row == col:\n self.diagonals[0] += s\n if abs(self.diagonals[0]) == self.n:\n return player\n\n if (row + col) == self.n - 1:\n self.diagonals[1] += s\n if abs(self.diagonals[1]) == self.n:\n return player\n\n return 0", "def solve(self, board: List[List[str]]) -> None:\n\n def expansion(i,j):\n for di,dj in {(-1,0),(1,0),(0,-1),(0,1)}:\n if -1<i+di<m and -1<j+dj<n and state[i+di][j+dj]=='O':\n return True\n return False\n\n if not board:\n return board\n\n m = len(board)\n n = len(board[0]) \n state = [['X']*n for _ in range(m)]\n\n for j in range(n):\n state[0][j] = board[0][j]\n state[m-1][j] = board[m-1][j]\n \n for i in range(m):\n state[i][0] = board[i][0]\n state[i][n-1] = board[i][n-1]\n \n flag = 1\n\n while flag:\n flag = 0\n\n for k in range(1, (1+min(m,n))//2):\n for j in range(k,n-k):\n if board[k][j]=='O' and state[k][j] == 'X' and expansion(k,j):\n state[k][j] = 'O'\n flag = 1\n \n if board[m-1-k][j]=='O' and state[m-1-k][j] == 'X' and expansion(m-1-k,j):\n state[m-1-k][j] = 'O'\n flag = 1\n \n for i in range(k,m-k):\n if board[i][k]=='O' and state[i][k] == 'X' and expansion(i,k):\n state[i][k] = 'O'\n flag = 1\n \n if board[i][n-1-k]=='O' and state[i][n-1-k] == 'X' and expansion(i,n-1-k):\n state[i][n-1-k] = 'O'\n flag = 1\n\n board[:] = state[:]", "def move(self, row, column, symbol):\n game_state = self.determine_game_state()\n if game_state not in (GameState.GAME_NOT_STARTED, GameState.GAME_IN_PROGRESS):\n return MoveResults.MOVE_INVALID\n\n # check for initial move\n if self.board == BLANK_BOARD and symbol == O_SYMBOL:\n return MoveResults.MOVE_INVALID\n\n # check for invalid row and column\n if row < 0 or row > 2 or column < 0 or column > 2:\n return MoveResults.MOVE_INVALID\n\n # make sure the game piece is valid\n if symbol != X_SYMBOL and symbol != O_SYMBOL:\n return MoveResults.MOVE_INVALID\n\n # make sure the game piece isn't moving out of turn\n x_moves = self.board.count(X_SYMBOL)\n o_moves = self.board.count(O_SYMBOL)\n if symbol == X_SYMBOL and x_moves > o_moves:\n return MoveResults.MOVE_INVALID\n elif symbol == O_SYMBOL and o_moves >= x_moves:\n # note that x always goes first.\n return MoveResults.MOVE_INVALID \n\n # figure out position.\n position = (3 * row) + column\n\n # make sure there's not already a piece there.\n if self.board[position] != EMPTY_SYMBOL:\n return MoveResults.MOVE_INVALID\n\n self.board = self.board[:position] + symbol + self.board[position+1:] \n return MoveResults.MOVE_VALID", "def solve(puzzle):\n print(\"Solving...\")\n array_puzzle = np.asarray(puzzle)\n array_puzzle.flags.writeable = False # Turn off writable flags to prevent data being ovewritten accidentally.\n goal_state = __generate_goal(len(array_puzzle[0]), len(array_puzzle))\n\n flat_puzzle = list(chain.from_iterable(puzzle)) # Flatten the list\n\n # If the puzzle doesn't contain 0, exit.\n try:\n flat_puzzle.remove(0) # Remove 0 from the list\n except:\n print(\"All puzzles must include an open tile (0).\")\n return None\n\n inversions = __count_inversions(flat_puzzle) # Count the inversions\n\n # width = len(array_puzzle[0]) # Get the width of the puzzle (columns)\n # length = len(array_puzzle) # Get the length of the puzzle (rows)\n\n oddEven = __odd_or_even(len(array_puzzle[0])) # Determine if the width is odd or even.\n start_position = __find_start(array_puzzle) # Find the start position's row\n solvable = __is_solvable(oddEven, inversions, len(array_puzzle), start_position) # Cleck if the puzzle is solvable.\n\n # If the puzzle is not solvable, return None.\n if(solvable == \"None\"):\n return None\n\n # If we cannot calculate a* (for example the given values are not all in sequential order (1-5) 4 is replaced by 6 (1,2,3,5,6))\n try:\n return __a_star(array_puzzle, goal_state)\n except:\n print(\"Please make sure there are no duplicate or skipped inputs.\")\n return None\n\n # This code was used in testing to print out the string.\n # solved = __a_star(array_puzzle, goal_state)\n # Return the moves needed to complete the puzzle.\n # return print(str(__build_string(solved)) + \" (\" + str(len(solved)) + \")\")", "def solve(self, board: List[List[str]]) -> None:\n def DFS(board, i, j):\n q = []\n q.append([i, j])\n \n while q:\n x, y = q.pop()\n board[x][y] = \"*\"\n neighbors = ((0, 1), (0, -1), (1, 0), (-1, 0))\n for dx, dy in neighbors:\n if 0 <= x + dx <= len(board) - 1 and 0 <= y + dy <= len(board[0]) - 1 and board[x + dx][y + dy] == \"O\":\n q.append([x + dx, y + dy])\n \n \n # first row\n i = 0\n for j in range(len(board[0])):\n if board[i][j] == \"O\":\n DFS(board, i, j)\n \n # last row\n i = len(board) - 1\n for j in range(len(board[0])):\n if board[i][j] == \"O\":\n DFS(board, i, j)\n \n # first column\n j = 0\n for i in range(len(board)):\n if board[i][j] == \"O\":\n DFS(board, i, j)\n \n # last column\n j = len(board[0]) - 1\n for i in range(len(board)):\n if board[i][j] == \"O\":\n DFS(board, i, j)\n \n \n for i in range(len(board)):\n for j in range(len(board[0])):\n if board[i][j] == \"O\":\n board[i][j] = \"X\"\n elif board[i][j] == \"*\":\n board[i][j] = \"O\"", "def get_move(board, player):\r\n row, col = 0, 0\r\n return row, col", "def put(self, column):\n column -= 1\n if self.occupied[column] >= 4 or self.won:\n return 'ERROR'\n self.history.append(column + 1)\n row = self.occupied[column]\n # assign 1 to player 1, and -1 to player 2\n if len(self.history) % 2 == 1:\n player = 1\n self.board[3 - row][column] = 1\n else:\n player = -1\n self.board[3 - row][column] = 2\n # add player score to column, row and diagonal scores\n self.columnScore[column] += player\n self.rowScore[row] += player\n self.occupied[column] += 1;\n if column == row:\n self.diagonal += player\n if column + row == 3:\n self.antidiagonal += player\n # check column, row and diagonal scores\n # if absolute value of one of them is 4\n # which means the original value is either 4 or -4\n # and one of the player has occupied all 4 of them\n # which means the player has won in that row/column/diagonal\n # and thus return \"WIN\"\n if (abs(self.rowScore[row]) == 4 or abs(self.columnScore[column]) == 4\n or abs(self.diagonal) == 4 or abs(self.antidiagonal) == 4):\n self.won = True\n return 'WIN'\n # check if there is still non-full columns\n # in other words check if the board is full\n for i in range(0, self.size):\n # if board is not full return \"OK\"\n if self.occupied[i] < 4:\n return 'OK'\n # if the board is full, return \"DRAW\"\n return 'DRAW'", "def move(self, row, col, player):\n if self.winning == True:\n return\n self.matrix[row][col] = player\n n = len(self.matrix)\n indicator = True\n for i in range(n):\n if self.matrix[row][i] != player:\n indicator = False\n break\n if indicator == True:\n self.winning = True\n return player\n \n indicator = True\n for i in range(n):\n if self.matrix[i][col] != player:\n indicator = False\n break\n if indicator == True:\n self.winning = True\n return player\n \n if row == col:\n indicator = True\n for i in range(n):\n if self.matrix[i][i] != player:\n indicator = False\n break\n if indicator == True:\n self.winning = True\n return player\n if row + col == n - 1:\n indicator = True\n for i in range(n):\n if self.matrix[i][n - 1 - i] != player:\n indicator = False\n break\n if indicator == True:\n self.winning = True\n return player\n return 0", "def solve(self, board: 'List[List[str]]') -> 'None':\n\n def dfs(i, j, tmp):\n nonlocal flag\n if i < 0 or i >= len(board) or j < 0 or j >= len(board[0]):\n flag = False\n return\n if board[i][j] != 'O' or [i, j] in tmp:\n return\n tmp.append([i, j])\n dfs(i - 1, j, tmp)\n dfs(i + 1, j, tmp)\n dfs(i, j + 1, tmp)\n dfs(i, j - 1, tmp)\n return tmp\n\n change = []\n for i in range(len(board)):\n for j in range(len(board[0])):\n if board[i][j] == 'O' and [i, j] not in change:\n tmp = []\n flag = True\n tmp = dfs(i, j, tmp[:])\n if flag:\n for loc in tmp:\n i, j = loc[0], loc[1]\n board[i][j] = 'X'\n\n for loc in change:\n i, j = loc[0], loc[1]\n board[i][j] = 'X'", "def solve_(self, x, y, board, path):\n if self.SOLVED:\n return\n if self.is_done(board):\n self.print_path(path)\n self.SOLVED = True\n return\n for new_x, new_y in self.next_click(x, y, board):\n if new_x is None or new_y is None:\n return\n new_board = self.click(new_x, new_y, board)\n self.solve_(\n x=0, y=0,\n board=new_board,\n path=path + [((new_x, new_y), new_board)]\n )", "def move(self, row, col, player):\n toadd = 1 if player == 1 else -1\n \n self.row[row] += toadd\n self.col[col] += toadd\n if row == col: self.diagonal += toadd\n if col == self.n - row -1 : self.antidiag += toadd\n \n if abs(self.row[row]) == self.n or abs(self.col[col]) == self.n or abs(self.diagonal) == self.n or abs(self.antidiag) == self.n:\n return player\n else:\n return 0", "def move(self, row, col, player):", "def get_move(board, player):\n row, col = 0, 0\n return row, col", "def solve_puzzle(board):\n # Propagate value effects\n board = simplify_puzzle(board, [])\n\n # Brute force remaining cells\n board = brute(board)\n\n # Verify that the puzzle was successfully solved\n assert get_length(board)==81\n assert valid_attempt(board)\n\n return board", "def test_perform_move(self):\n p = hw.create_tile_puzzle(3, 3)\n self.assertFalse(p.perform_move(\"taco\"))\n self.assertTrue(p.perform_move('up'))\n self.assertEqual(p.get_board(), [[1,2,3],[4,5,0],[7,8,6]])\n self.assertFalse(p.perform_move('right'))\n p = hw.create_tile_puzzle(2, 4)\n self.assertTrue(p.perform_move('left'))\n self.assertTrue(p.perform_move('up'))\n self.assertFalse(p.perform_move('up'))\n self.assertEqual(p.get_board(), [[1,2,0,4],[5,6,3,7]])\n p = hw.create_tile_puzzle(1, 4)\n self.assertTrue(p.perform_move('left'))\n self.assertTrue(p.perform_move('left'))\n self.assertTrue(p.perform_move('left'))\n self.assertFalse(p.perform_move('down'))\n self.assertFalse(p.perform_move('left'))\n self.assertEqual(p.get_board(), [[0,1,2,3]])" ]
[ "0.7611342", "0.7530595", "0.7489633", "0.74769557", "0.7346029", "0.7312759", "0.72872084", "0.72763294", "0.72613305", "0.72458375", "0.72417307", "0.7236731", "0.72223794", "0.7210395", "0.71957016", "0.71925163", "0.7142041", "0.71206504", "0.70840454", "0.7064091", "0.7038805", "0.7027271", "0.69623935", "0.695526", "0.69471264", "0.694509", "0.69337404", "0.69234896", "0.6916079", "0.6878287", "0.68757343", "0.68463266", "0.6837197", "0.67863864", "0.6742842", "0.6735732", "0.67058396", "0.66578364", "0.66284144", "0.6625063", "0.6580358", "0.65620995", "0.65620995", "0.65620995", "0.65583694", "0.6551443", "0.6551443", "0.645354", "0.64497983", "0.6408909", "0.6379721", "0.6374615", "0.63398385", "0.633639", "0.6325701", "0.62638223", "0.6257799", "0.62237", "0.62125415", "0.61983734", "0.6138262", "0.60704345", "0.6063678", "0.6057803", "0.6055227", "0.6052307", "0.6052291", "0.60510117", "0.60505867", "0.60382706", "0.6028564", "0.60173017", "0.6004885", "0.6001108", "0.59939766", "0.5991939", "0.59812343", "0.5976079", "0.59581846", "0.5937005", "0.5924132", "0.59235555", "0.59208554", "0.591132", "0.59080476", "0.5894667", "0.5890435", "0.5885432", "0.5883782", "0.5882655", "0.5874512", "0.58741593", "0.5873913", "0.5868474", "0.58598614", "0.5855099", "0.584924", "0.58490455", "0.58463395", "0.58241105" ]
0.74047565
4
Solve the upper left 2x2 part of the puzzle Updates the puzzle and returns a move string
def solve_2x2(self): cur_row, cur_col = self.current_position(0, 0) move_str = 'u' * cur_row + 'l' * cur_col self.update_puzzle(move_str) if self.check_2x2_solved(): return move_str else: while not self.check_2x2_solved(): move_str += 'rdlu' self.update_puzzle('rdlu') return move_str
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve_puzzle(self):\n\n move_str = \"\"\n \n # Move zero tile to bottom right corner tile of puzzle.\n zero_pos = self.current_position(0,0) \n vert_dist = (self.get_height() - 1) - zero_pos[0]\n horiz_dist = (self.get_width() - 1) - zero_pos[1]\n move_str += ((\"d\" * vert_dist) + (\"r\" * horiz_dist))\n self.update_puzzle(move_str)\n \n # Solve lower rows\n if self.get_height() > 2:\n for row in range(self.get_height() - 1, 1, -1):\n for col in range(self.get_width() - 1, -1, -1):\n if col != 0:\n move_str += self.solve_interior_tile(row, col)\n else:\n move_str += self.solve_col0_tile(row)\n \n # Solve top 2 rows\n if self.get_width() > 2:\n for col in range(self.get_width() - 1, 1, -1):\n move_str += self.solve_row1_tile(col)\n move_str += self.solve_row0_tile(col)\n \n # Solve 2x2\n move_str += self.solve_2x2()\n\n return move_str", "def solve_puzzle(self):\n cur0_row, cur0_col = self.current_position(0, 0)\n move_str = 'd' * (self._height - cur0_row - 1) + 'r' * (self._width - cur0_col - 1)\n self.update_puzzle(move_str)\n for row in range(self._height-1, 1, -1):\n for col in range(self._width-1, -1, -1):\n assert self.lower_row_invariant(row, col)\n if col != 0:\n move_str += self.solve_interior_tile(row, col)\n else:\n move_str += self.solve_col0_tile(row)\n for col in range(self._width-1, 1, -1):\n assert self.row1_invariant(col)\n move_str += self.solve_row1_tile(col)\n assert self.row0_invariant(col)\n move_str += self.solve_row0_tile(col)\n move_str += self.solve_2x2()\n return move_str", "def solve_puzzle(self):\r\n # initialize some values and start tile at bottom right corner\r\n col = self.get_width() - 1\r\n row = self.get_height() - 1\r\n move = \"\"\r\n curr_row, curr_col = self.current_position(0, 0)\r\n move_to_target = abs(curr_col - col) * \"r\" + abs(curr_row - row) * \"d\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n\r\n # apply solver methods\r\n for dummy_i in range(row, 1, -1):\r\n for dummy_j in range(col, 0, -1):\r\n move += self.solve_interior_tile(dummy_i, dummy_j)\r\n move += self.solve_col0_tile(dummy_i)\r\n \r\n for dummy_j in range(col, 1, -1):\r\n move += self.solve_row1_tile(dummy_j)\r\n move += self.solve_row0_tile(dummy_j)\r\n \r\n move += self.solve_2x2()\r\n return move", "def solve_puzzle(self):\r\n moves_str = \"\"\r\n # move zero to the most botton right\r\n zero_row, zero_col = self.current_position(0, 0)\r\n downs = self._height - 1 - zero_row\r\n rights = self._width - 1 - zero_col\r\n for dummy_d in range(downs):\r\n moves_str += \"d\"\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n self.update_puzzle(moves_str)\r\n # Solve the bottom m−2 rows of the puzzle \r\n # in a row by row manner from bottom to top. \r\n # Each individual row will be solved in a right to left order.\r\n if self._height > 2 and self._width > 2:\r\n for row in range(self._height - 1, 1, -1):\r\n for col in range(self._width - 1, 0, -1):\r\n assert self.lower_row_invariant(row, col)\r\n moves_str += self.solve_interior_tile(row, col)\r\n assert self.lower_row_invariant(row, col - 1)\r\n assert self.lower_row_invariant(row, 0)\r\n moves_str += self.solve_col0_tile(row)\r\n assert self.lower_row_invariant(row - 1, self._width - 1)\r\n # Solve the rightmost n−2 columns of the top two rows\r\n # in a right to left order). \r\n # Each column consists of two unsolved positions \r\n # and will be solved in a bottom to top order.\r\n for col in range(self._width - 1, 1, -1):\r\n assert self.row1_invariant(col)\r\n moves_str += self.solve_row1_tile(col)\r\n assert self.row0_invariant(col)\r\n moves_str += self.solve_row0_tile(col)\r\n assert self.row1_invariant(col - 1)\r\n # Solve the upper left 2×2 portion of the puzzle directly.\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n \r\n elif self._height <=2 and self._width > 2:\r\n for col in range(self._width - 1, 1, -1):\r\n assert self.row1_invariant(col)\r\n moves_str += self.solve_row1_tile(col)\r\n assert self.row0_invariant(col)\r\n moves_str += self.solve_row0_tile(col)\r\n assert self.row1_invariant(col - 1)\r\n # Solve the upper left 2×2 portion of the puzzle directly.\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n elif self._height <= 2 and self._width <= 2:\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n #elif self._height > 2 and self._width <= 2:\r\n \r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve_puzzle(self):\n # replace with your code\n string = ''\n width = self._width\n height = self._height\n zero = self.current_position(0, 0)\n row_to_zero = height - 1 - zero[0]\n col_to_zero = width - 1 - zero[1]\n string += 'r' * col_to_zero\n string += 'd' * row_to_zero\n self.update_puzzle(string)\n if width == 2 and height == 2:\n string += self.solve_2x2()\n elif width > 2 and height == 2:\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n elif width == 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n string += self.solve_2x2()\n elif width > 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n #for row in range(height - 1, -1, -1):\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n return string", "def solve_2x2(self):\n # move zero tile to the left-up corner\n self.update_puzzle(\"lu\")\n movements = \"rdlu\"\n for _ in range(3):\n self.update_puzzle(\"rdlu\")\n if self.row0_invariant(0):\n return \"lu\" + movements\n movements += \"rdlu\"\n # the final 2x2 cannot be solved\n return \"\"", "def solve_2x2(self):\r\n moves_str = \"\"\r\n # move zero to the most upper left\r\n zero_row, zero_col = self.current_position(0, 0)\r\n ups = zero_row - 0\r\n lefts = zero_col - 0\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n for dummy_l in range(lefts):\r\n moves_str += \"l\"\r\n \r\n # get zero, one, two, three positions\r\n self.update_puzzle(moves_str)\r\n zero_row, zero_col = self.current_position(0, 0)\r\n one_row, one_col = self.current_position(0, 1)\r\n two_row, two_col = self.current_position(1, 0)\r\n three_row, three_col = self.current_position(1, 1)\r\n counter = 0\r\n while counter <= 3 and \\\r\n (zero_row != 0 or zero_col != 0 or \\\r\n one_row!= 0 or one_col != 1 or \\\r\n two_row != 1 or two_col != 0 or \\\r\n three_row != 1 or three_col != 1):\r\n move = CIRCLES[\"RIGHT_CIRCLES\"]\r\n moves_str += move\r\n self.update_puzzle(move)\r\n counter += 1\r\n zero_row, zero_col = self.current_position(0, 0)\r\n one_row, one_col = self.current_position(0, 1)\r\n two_row, two_col = self.current_position(1, 0)\r\n three_row, three_col = self.current_position(1, 1)\r\n \r\n print \"solve_2x2\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve_puzzle(self):\n # move zero tile to the lower right corner\n row, col = self.current_position(0, 0)\n movements = \"d\" * (self.get_height() - 1 - row) + \"r\" * (\n self.get_width() - 1 - col)\n self.update_puzzle(movements)\n # solve rowid from 2 by row\n for row in range(self.get_height() - 1, 1, -1):\n for col in range(self.get_width() - 1, -1, -1):\n assert self.lower_row_invariant(row, col)\n if col == 0:\n movements += self.solve_col0_tile(row)\n assert self.lower_row_invariant(row - 1,\n self.get_width() - 1)\n else:\n movements += self.solve_interior_tile(row, col)\n assert self.lower_row_invariant(row, col - 1)\n # solve the uppermost two rows by column\n for col in range(self.get_width() - 1, 1, -1):\n for row in range(1, -1, -1):\n if row == 0:\n assert self.row0_invariant(col)\n movements += self.solve_row0_tile(col)\n assert self.row1_invariant(col - 1)\n else:\n assert self.row1_invariant(col)\n movements += self.solve_row1_tile(col)\n assert self.row0_invariant(col)\n movements += self.solve_2x2()\n return movements", "def solve_2x2(self):\r\n # replace with your code\r\n assert self.row1_invariant(1), '2x2 Dont pass row1_invariant(1)'\r\n whole_move = ''\r\n current_position = self.current_position(0, 0)\r\n # print 'Zero position =', current_position\r\n counter = 0\r\n \r\n\r\n \r\n # if current_position == (0,0):\r\n # print (0,0)\r\n # move_to_00 = 'rdlu' \r\n if current_position == (0,1):\r\n # print (0,1)\r\n move_to_00 = 'l'\r\n if current_position == (1,0):\r\n # print (1,0)\r\n move_to_00 = 'u'\r\n if current_position == (1,1):\r\n # print (1,1)\r\n move_to_00 = 'ul'\r\n whole_move += move_to_00\r\n self.update_puzzle(move_to_00)\r\n # print self\r\n # print self.get_number(1,1) < self.get_number(1,0)\r\n \r\n while self.get_number(0,0) != 0 or self.get_number(0,1) != 1:\r\n \r\n # print 'Aloha in loop!'\r\n counter +=1\r\n move = 'rdlu'\r\n whole_move += move\r\n self.update_puzzle(move)\r\n # print self\r\n if counter >5:\r\n break\r\n return whole_move", "def solve_2x2(self):\n \n assert self.get_number(1,1) == 0, \"zero tile should be at row 1, col 1\"\n assert self.row1_invariant(1), \"tiles to right and below incorrectly ordered\"\n \n # Moves the zero tile to (0,0).\n self.update_puzzle(\"lu\")\n\n # Repositions the upper left 2x2 part up to 3 times, \n # each time checking whether the puzzle is solved.\n rotation_num = 0\n if self.row0_invariant(0) == False:\n for dummy_rotation in range(3):\n while self.row0_invariant(0) == False:\n rotation_num += 1\n self.update_puzzle(\"rdlu\")\n\n assert self.row0_invariant(0), \"tiles to right and below incorrectly ordered\"\n \n move_str = \"lu\" + (\"rdlu\" * rotation_num) \n return move_str", "def solve_2x2(self):\r\n assert self.row1_invariant(1)\r\n pos_1_0 = self.get_number(1, 0)\r\n pos_0_0 = self.get_number(0, 0)\r\n pos_0_1 = self.get_number(0, 1)\r\n # create grid and solve individual cases\r\n grid = [pos_1_0, pos_0_0, pos_0_1]\r\n if grid == [self.get_width(), 1, self.get_width() + 1]:\r\n move = \"ul\"\r\n elif grid == [1, self.get_width() + 1, self.get_width()]:\r\n move = \"lurdlu\"\r\n elif grid == [self.get_width() + 1, self.get_width(), 1]:\r\n move = \"lu\"\r\n self.update_puzzle(move)\r\n return move", "def solve_2x2(self):\n # replace with your code\n string = ''\n num1 = self.get_number(0, 0)\n num2 = self.get_number(0, 1)\n num3 = self.get_number(1, 0)\n max_num = max([num1, num2, num3])\n min_num = min([num1, num2, num3])\n if num1 == min_num and num2 == max_num:\n string += 'ul'\n elif num1 == max_num and num3 == min_num:\n string += 'ul'\n string += 'rdlu' * 2\n elif num2 == min_num and num3 == max_num:\n string += 'ul'\n string += 'rdlu'\n print '2x2 Path', string\n self.update_puzzle(string)\n return string", "def solve_puzzle(self):\r\n \r\n counter = 0\r\n rows = self._height-1\r\n cols = self._width-1\r\n # print rows, cols\r\n # print 'The greed has %s rows and %s coloumn indexes' %(rows, cols) \r\n solution_move = ''\r\n if self.get_number(0,0) == 0 and \\\r\n self.get_number(0,1) == 1:\r\n # print 'Congrads Puxxle is Aolved at start!!!!!'\r\n return ''\r\n #appropriate_number = (self._height * self._width) - 1\r\n appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'First appropriate_number=',appropriate_number\r\n # print \"Grid first tile that we will solwing has value =\", self._grid[rows][cols]\r\n \r\n while counter < 300:\r\n counter +=1\r\n # print self\r\n #appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'Appropriate number in loop=',appropriate_number\r\n # print 'We are solving %s index_row and %s index_col' %(rows, cols) \r\n ####Case when we use solve_interior_tile\r\n if rows > 1 and cols > 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n cols -= 1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solving interior tile', (rows, cols)\r\n solution_move += self.solve_interior_tile(rows, cols)\r\n # print 'Solution move=', solution_move\r\n cols -= 1\r\n #### Case when we use solve_col0_tile\r\n elif rows > 1 and cols == 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n cols = self._width-1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solwing tile 0 in row', rows\r\n # print 'Appropriate number here ='\r\n solution_move += self.solve_col0_tile(rows)\r\n # print 'Solution move=', solution_move\r\n rows -=1\r\n cols = self._width-1\r\n\r\n\r\n #### Cases when we use solve_row0_tile\r\n elif rows == 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n #cols = self._width-1\r\n appropriate_number -= self._width\r\n\r\n else:\r\n # print 'Solving upper 2 rows right side'\r\n solution_move += self.solve_row1_tile(cols)\r\n rows -=1\r\n appropriate_number -= self._width\r\n #### Cases when we use solve_row1_tile \r\n if rows < 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows += 1\r\n cols -= 1\r\n appropriate_number +=self._width-1\r\n else:\r\n # print '(1,J) tile solved, lets solwe tile (0,j) in tile',(rows,cols)\r\n # print 'Greed after move solve_row1_tile'\r\n # print self\r\n solution_move += self.solve_row0_tile(cols)\r\n rows +=1\r\n cols -=1\r\n appropriate_number +=self._width-1\r\n\r\n\r\n #### Case when we use solve_2x2\r\n elif rows <= 1 and cols <= 1:\r\n # print 'We are solving 2x2 puzzle'\r\n solution_move += self.solve_2x2()\r\n if self._grid[0][0] == 0 and \\\r\n self._grid[0][1] == 1:\r\n # print 'Congrads Puxxle is SOLVED!!!!!'\r\n break\r\n\r\n\r\n\r\n\r\n if counter > 100:\r\n # print 'COUNTER BREAK'\r\n break\r\n # print solution_move, len(solution_move)\r\n return solution_move\r\n\r\n\r\n\r\n\r\n\r\n\r\n # for row in solution_greed._grid[::-1]:\r\n # print solution_greed._grid\r\n # print 'Row =',row\r\n \r\n # if solution_greed._grid.index(row) > 1:\r\n # print \"Case when we solwing Interior and Tile0 part\"\r\n \r\n\r\n # for col in solution_greed._grid[solution_greed._grid.index(row)][::-1]:\r\n # print 'Coloumn value=', col\r\n #print row[0]\r\n # if col !=row[0]:\r\n # print 'Case when we use just Interior tile solution'\r\n # print solution_greed._grid.index(row)\r\n # print row.index(col)\r\n \r\n # solution += solution_greed.solve_interior_tile(solution_greed._grid.index(row) , row.index(col))\r\n # print 'Solution =', solution\r\n # print self \r\n # print solution_greed._grid\r\n # elif col ==row[0]:\r\n # print 'Case when we use just Col0 solution'\r\n\r\n # else:\r\n # print 'Case when we solwing first two rows'\r\n\r\n #return \"\"\r", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction\n for row in self._grid:\n print row\n print", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][\n zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][\n zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def solve_row1_tile(self, target_col):\r\n moves_str = \"\"\r\n current_row, current_col = self.current_position(1, target_col)\r\n zero_row, zero_col = self.current_position(0, 0)\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"ur\"\r\n self.update_puzzle(moves_str)\r\n print \"solve_row1_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve(self, board: List[List[str]]) -> None:\n n = len(board)\n if n == 0:\n return\n m = len(board[0])\n for i in range(m):\n self.traverse(board, 0, i, n, m)\n self.traverse(board, n - 1, i, n, m)\n for i in range(n):\n self.traverse(board, i, 0, n, m)\n self.traverse(board, i, m - 1, n, m)\n for i in range(n):\n for j in range(m):\n board[i][j] = 'X' if board[i][j] == 'O' else board[i][j]\n for i in range(n):\n for j in range(m):\n board[i][j] = 'O' if board[i][j] == '_' else board[i][j]", "def solve_interior_tile(self, target_row, target_col):\r\n assert self._grid[target_row][target_col] == 0\r\n moves_str = \"\"\r\n target_current_row, target_current_col = self.current_position(target_row, target_col)\r\n \r\n moves_str += self.position_tile(target_row, target_col, target_current_row, target_current_col) \r\n \r\n self.update_puzzle(moves_str)\r\n print \"solve_interior_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve(puzzle):\n print(\"Solving...\")\n array_puzzle = np.asarray(puzzle)\n array_puzzle.flags.writeable = False # Turn off writable flags to prevent data being ovewritten accidentally.\n goal_state = __generate_goal(len(array_puzzle[0]), len(array_puzzle))\n\n flat_puzzle = list(chain.from_iterable(puzzle)) # Flatten the list\n\n # If the puzzle doesn't contain 0, exit.\n try:\n flat_puzzle.remove(0) # Remove 0 from the list\n except:\n print(\"All puzzles must include an open tile (0).\")\n return None\n\n inversions = __count_inversions(flat_puzzle) # Count the inversions\n\n # width = len(array_puzzle[0]) # Get the width of the puzzle (columns)\n # length = len(array_puzzle) # Get the length of the puzzle (rows)\n\n oddEven = __odd_or_even(len(array_puzzle[0])) # Determine if the width is odd or even.\n start_position = __find_start(array_puzzle) # Find the start position's row\n solvable = __is_solvable(oddEven, inversions, len(array_puzzle), start_position) # Cleck if the puzzle is solvable.\n\n # If the puzzle is not solvable, return None.\n if(solvable == \"None\"):\n return None\n\n # If we cannot calculate a* (for example the given values are not all in sequential order (1-5) 4 is replaced by 6 (1,2,3,5,6))\n try:\n return __a_star(array_puzzle, goal_state)\n except:\n print(\"Please make sure there are no duplicate or skipped inputs.\")\n return None\n\n # This code was used in testing to print out the string.\n # solved = __a_star(array_puzzle, goal_state)\n # Return the moves needed to complete the puzzle.\n # return print(str(__build_string(solved)) + \" (\" + str(len(solved)) + \")\")", "def solve(self, board: List[List[str]]) -> None:\n rows = len(board)\n cols = len(board[0])\n\n def explore(i, j):\n if i < 0 or i >= rows:\n return\n if j < 0 or j >= cols:\n return\n if board[i][j] != \"O\":\n return\n board[i][j] = \"Y\"\n explore(i + 1, j)\n explore(i - 1, j)\n explore(i, j + 1)\n explore(i, j - 1)\n\n for i in [0, rows - 1]:\n for j in range(cols):\n explore(i, j)\n\n for j in [0, cols - 1]:\n for i in range(rows):\n explore(i, j)\n\n for i in range(rows):\n for j in range(cols):\n if board[i][j] == \"Y\":\n board[i][j] = \"O\"\n elif board[i][j] == \"O\":\n board[i][j] = \"X\"", "def solve(self, board: List[List[str]]) -> None:\n if not board:\n return\n m, n = len(board), len(board[0])\n\n def dfs(x, y):\n if not 0 <= x < m or not 0 <= y < n or board[x][y] != \"O\":\n return\n direction = [[1, 0], [0, 1], [-1, 0], [0, -1]]\n board[x][y] = \"Y\"\n for d in direction:\n dfs(x+d[0], y+d[1])\n\n for i in range(m):\n dfs(i, 0)\n dfs(i, n-1)\n for i in range(1, n-1):\n dfs(0, i)\n dfs(m-1, i)\n\n for i in range(m):\n for j in range(n):\n if board[i][j] == \"O\":\n board[i][j] = \"X\"\n elif board[i][j] == \"Y\":\n board[i][j] = \"O\"", "def solve(self, board) -> None:\n if board == [[]] or board == []:\n return\n\n r, c = len(board), len(board[0])\n\n from collections import deque\n queue = deque()\n for i in range(r):\n for j in range(c):\n if (i == 0 or i == r - 1 or j == 0 or j == c - 1) and board[i][j] == 'O':\n queue.append([i, j])\n board[i][j] = 'M'\n\n while queue:\n i, j = queue.popleft()\n for x, y in ((i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1)):\n if 0 <= x <= r - 1 and 0 <= y <= c - 1 and board[x][y] == 'O':\n board[x][y] = 'M'\n queue.append([x, y])\n\n for i in range(r):\n for j in range(c):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n elif board[i][j] == 'M':\n board[i][j] = 'O'", "def move_tile(self, target_row, target_col, val):\n # a little bit twisted here for the use of both solve_interior_tile and solve_col0_tile\n solved_row, solved_col = self.current_position(0, val)\n movements = \"\"\n if solved_row == target_row and solved_col == target_col:\n return \"\"\n if solved_row == target_row:\n if target_col > solved_col:\n movements = \"l\" * (target_col - solved_col) + \"urrdl\" * (\n target_col - solved_col - 1)\n else:\n movements = \"r\" * (solved_col - target_col) + \"ulldr\" * (\n solved_col - target_col - 1) + \"ulld\"\n elif solved_col == target_col:\n movements = \"u\" * (target_row - solved_row) + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n elif solved_col < target_col:\n if solved_col == 0:\n movements = \"l\" * (target_col - solved_col) + \"u\" * (\n target_row - solved_row) + \"rddlu\" * (\n target_row - solved_row - 1) + \"rdl\" + \"urrdl\" * (\n target_col - solved_col - 1)\n else:\n movements = \"l\" * (target_col - solved_col) + \"u\" * (\n target_row - solved_row) + \"lddru\" * (\n target_row - solved_row - 1) + \"rdl\" + \"urrdl\" * (\n target_col - solved_col - 1)\n elif solved_col > target_col:\n if solved_row == 0:\n movements = \"u\" * (target_row - solved_row) + \"r\" * (\n solved_col - target_col) + \"dllur\" * (\n solved_col - target_col - 1) + \"dlu\" + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n else:\n movements = \"u\" * (target_row - solved_row) + \"r\" * (\n solved_col - target_col) + \"ulldr\" * (\n solved_col - target_col - 1) + \"ullddru\" + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n return movements", "def solve_interior_tile(self, target_row, target_col):\n cur_row, cur_col = self.current_position(target_row, target_col)\n move_str = self.position_tile(target_row, target_col, cur_row, cur_col)\n self.update_puzzle(move_str)\n return move_str", "def solve(self, board: List[List[str]]) -> None:\n if len(board) is 0: return\n width = len(board[0]) - 1\n height = len(board) - 1\n def help(x: int, y: int):\n if x < 0 or x > height or y < 0 or y > width:\n return\n if board[x][y] is \"O\":\n board[x][y] = \"g\"\n help(x - 1, y)\n help(x, y - 1)\n help(x + 1, y)\n help(x, y + 1)\n\n for i in range(width + 1):\n if board[0][i] is \"O\":\n help(0, i)\n if board[height][i] is \"O\":\n help(height, i)\n for i in range(1, height):\n if board[i][0] is \"O\":\n help(i, 0)\n if board[i][width] is \"O\":\n help(i, width)\n\n print(board)\n for i in range(width + 1):\n for j in range(height + 1):\n if board[j][i] is \"O\":\n board[j][i] = \"X\"\n elif board[j][i] is \"g\":\n board[j][i] = \"O\"", "def solve_col0_tile(self, target_row):\n solved_row, solved_col = self.current_position(target_row, 0)\n movements = \"\"\n if solved_row == target_row - 1 and solved_col == 0:\n movements = \"u\"\n else:\n local_board = self.clone()\n local_board.update_puzzle(\"ur\")\n movements = \"ur\" + local_board.move_tile(\n target_row - 1, 1, target_row *\n self.get_width()) + \"ruldrdlurdluurddlu\"\n movements += \"r\" * (self.get_width() - 1)\n self.update_puzzle(movements)\n return movements", "def solve_interior_tile(self, target_row, target_col):\n \n assert target_row > 1, \"target_row cannot be in rows 0 or 1.\"\n assert self.lower_row_invariant(target_row, target_col), \"tiles to right and below incorrectly ordered\"\n \n correct_tile = self.current_position(target_row, target_col) \n move_str = self.position_tile(target_row, target_col, correct_tile) \n self.update_puzzle(move_str)\n \n assert self.lower_row_invariant(target_row, target_col - 1), \"tiles to right and below incorrectly ordered\"\n \n return move_str", "def solve(self, board: List[List[str]]) -> None:", "def solve(self, board: List[List[str]]) -> None:\n if not board:\n return\n x=len(board)\n y=len(board[0])\n visit=[[False if board[i][j]=='X' else True for j in range(y)] for i in range(x)]\n for i in range(x):\n for j in range(y):\n if visit[i][j] and board[i][j]=='O':\n queue=[[i,j]]\n visit[i][j]=False\n k=0\n surround=True\n while len(queue)>k:\n if queue[k][0]==0 or queue[k][0]==x-1 or queue[k][1]==y-1 or queue[k][1]==0:\n surround=False\n if queue[k][1]!=y-1 and visit[queue[k][0]][queue[k][1]+1]:\n queue.append([queue[k][0],queue[k][1]+1])\n visit[queue[k][0]][queue[k][1]+1]=False\n if queue[k][1]!=0 and visit[queue[k][0]][queue[k][1]-1]:\n queue.append([queue[k][0],queue[k][1]-1])\n visit[queue[k][0]][queue[k][1]-1]=False\n if queue[k][0]!=x-1 and visit[queue[k][0]+1][queue[k][1]]:\n queue.append([queue[k][0]+1,queue[k][1]])\n visit[queue[k][0]+1][queue[k][1]]=False\n if queue[k][0]!=0 and visit[queue[k][0]-1][queue[k][1]]:\n queue.append([queue[k][0]-1,queue[k][1]])\n visit[queue[k][0]-1][queue[k][1]]=False\n k+=1\n if surround:\n for i1,j1 in queue:\n board[i1][j1]='X'\n return", "def solve(self, board):\r\n if not board or not board[0]:\r\n return\r\n \r\n self.m = len(board)\r\n self.n = len(board[0])\r\n boarder = []\r\n \r\n # Collecting all the 'O' on the boarder\r\n for i in range(self.m):\r\n if board[i][0] == 'O':\r\n boarder.append([i, 0])\r\n if board[i][self.n-1] == 'O':\r\n boarder.append([i, self.n-1])\r\n for j in range(self.n):\r\n if board[0][j] == 'O':\r\n boarder.append([0, j])\r\n if board[self.m-1][j] == 'O':\r\n boarder.append([self.m-1, j])\r\n \r\n for row, col in boarder:\r\n self.BFS(board, row, col)\r\n \r\n for row in range(self.m):\r\n for col in range(self.n):\r\n if board[row][col] == 'O':\r\n board[row][col] = 'X'\r\n elif board[row][col] == 'E':\r\n board[row][col] = 'O'\r\n print(board)", "def solve(self, board: List[List[str]]) -> None:\n if board == [] or board == [[]]: # corner case\n return\n\n r, c = len(board), len(board[0])\n\n def dfs(i, j): # visited i, j neighbors and change o to M\n if i < 0 or i > r - 1 or j < 0 or j > c - 1 or board[i][j] == 'X' or board[i][j] == 'M':\n return\n\n board[i][j] = 'M'\n dfs(i - 1, j)\n dfs(i + 1, j)\n dfs(i, j - 1)\n dfs(i, j + 1)\n\n for i in range(r):\n for j in range(c):\n if (i == 0 or i == r - 1 or j == 0 or j == c - 1) and board[i][j] == 'O':\n dfs(i, j)\n\n for i in range(r):\n for j in range(c):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n elif board[i][j] == 'M':\n board[i][j] = 'O'", "def solve_row0_tile(self, target_col):\r\n # replace with your code\r\n assert self.row0_invariant(target_col), 'Some trouble in row0_invariant' \r\n whole_move = ''\r\n #current_position = self.current_position(0, target_col)\r\n current_row, current_col = self.current_position(0, target_col)\r\n # print self.get_number(current_row, current_col)\r\n zero_row, zero_col = 0, target_col\r\n # print 'Target tile position=',self.current_position(0, target_col)\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (0, target_col)\r\n # print '------------------------------------------'\r\n # print 'Recommended move ld'\r\n \r\n recomended_move = 'ld'\r\n whole_move += recomended_move\r\n zero_col -= len(filter(lambda x: x=='l', recomended_move))\r\n zero_row += len(filter(lambda x: x=='d', recomended_move))\r\n self.update_puzzle(recomended_move)\r\n # print 'Grid after move:', recomended_move\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(0, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (0, target_col)\r\n #####Case when we check if recomended move solves the tile\r\n if self.current_position(0, target_col) == (0, target_col):\r\n # print 'Congrads recomended move made great deal !!'\r\n return whole_move\r\n #####If not, we position TT to (1, target_col-1),\r\n ##### and ZEOR to (1, target_col-2)\r\n else:\r\n # print '------------------------------'\r\n # print 'After base move we are do not finde puzzle'\r\n # print 'Lets move zero towards TT'\r\n \r\n ######Moving zero tile to the target tile\r\n path_up = (zero_row - current_row) * 'u'\r\n path_side = (zero_col - current_col) * 'l'\r\n path_for_zero = path_up + path_side\r\n whole_move += path_for_zero\r\n zero_col -= len(filter(lambda x: x=='l', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n \r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(0, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (0, target_col)\r\n counter = 0\r\n # print self.current_position(0, target_col) != (1, target_col-1)\r\n # print (zero_row,zero_col) != (1,target_col-2)\r\n ####POitioning TT and zero into positions that can be solvable\r\n while self.current_position(0, target_col) != (1, target_col-1) or \\\r\n (zero_row,zero_col) != (1,target_col-2):\r\n counter +=1\r\n #current_position = self.current_position(0, target_col)\r\n current_row, current_col = self.current_position(0, target_col)\r\n cyclic_moves = ''\r\n # print 'Aloha in the loop'\r\n if zero_col < current_col:\r\n # print 'ZERO tile located in the left side and down move IS NOT POSIBLE'\r\n \r\n\r\n if current_col != target_col-1 and zero_row == 0:\r\n # print 'In the upper row and we can use down cycling'\r\n cyclic_moves = 'drrul'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n\r\n elif current_col != target_col-1:\r\n # print 'not under the target place'\r\n cyclic_moves = 'urrdl'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n elif current_col == target_col-1:\r\n # print 'Target tile under target place'\r\n # print 'DEBUG!!!!'\r\n # print self\r\n # print zero_col, target_col\r\n if zero_col == 0 and current_col == 1:\r\n cyclic_moves = 'druld'\r\n elif zero_row == 0:\r\n cyclic_moves = 'druld'\r\n \r\n else:\r\n cyclic_moves = 'urd'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n elif zero_row > current_row:\r\n # print 'DEBUG'\r\n # print 'TT under zero tile'\r\n cyclic_moves = 'uld'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n \r\n if counter > 10:\r\n # print 'COUNTER break'\r\n break\r\n\r\n #####Solving using pattern 2 x 3 puzzle\r\n # print '--------------------------'\r\n # print 'Lets solve 2x3 puzzle formed recently'\r\n move2x3 = \"urdlurrdluldrruld\"\r\n whole_move += move2x3\r\n zero_col -= len(filter(lambda x: x=='l', move2x3))\r\n zero_col += len(filter(lambda x: x=='r', move2x3))\r\n zero_row += len(filter(lambda x: x=='d', move2x3))\r\n zero_row -= len(filter(lambda x: x=='u', move2x3))\r\n self.update_puzzle(move2x3)\r\n # print self\r\n assert self.row1_invariant(target_col-1), 'Some trouble in row1_invariant' \r\n return whole_move", "def solve_row1_tile(self, target_col):\r\n # replace with your code\r\n whole_move = ''\r\n if self._grid[1][target_col] != 0:\r\n # print \"DEBUG CASE WHEN ZERO IN JOPA solve_row1_tile \"\r\n \r\n # print self\r\n # print 'Solwing tile', self._grid[1][target_col]\r\n \r\n # print 'Searchind indexes of ZERO'\r\n for row in self._grid:\r\n for col in row:\r\n if col == 0:\r\n zero_row, zero_col = self._grid.index(row), row.index(col)\r\n break\r\n # print 'ZERO indexes=', (zero_row, zero_col)\r\n #####Moving zero to correct place\r\n #path_down = (1 - zero_row) * 'd'\r\n # path_left = (zero_col - target_col) * 'l'\r\n if target_col - zero_col > 0:\r\n #path_right = (target_col - zero_col) * 'r'\r\n path_of_zero = (1 - zero_row) * 'd' + (target_col - zero_col) * 'r'\r\n else:\r\n path_of_zero = (1 - zero_row) * 'd'\r\n #zero_col -= len(filter(lambda x: x=='l', path_of_zero))\r\n #zero_row -= len(filter(lambda x: x=='u', path_of_zero))\r\n zero_col += len(filter(lambda x: x=='r', path_of_zero))\r\n zero_row += len(filter(lambda x: x=='d', path_of_zero))\r\n self.update_puzzle(path_of_zero)\r\n # print 'Grid after moving ZERO to target spot'\r\n # print self\r\n whole_move += path_of_zero\r\n\r\n assert self.row1_invariant(target_col), 'Some trouble in row1_invariant' \r\n \r\n #current_row, current_col = self.current_position(1, target_col)\r\n zero_row, zero_col = 1, target_col\r\n ######Moving zero tile to the target tile\r\n #path_up = (zero_row - current_row) * 'u'\r\n #path_side = (zero_col - current_col) * 'l'\r\n path_for_zero = (zero_row - self.current_position(1, target_col)[0]) * 'u' + (zero_col - self.current_position(1, target_col)[1]) * 'l'\r\n whole_move += path_for_zero\r\n zero_col -= len(filter(lambda x: x=='l', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(1, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (1, target_col)\r\n counter = 0\r\n while self.current_position(1, target_col) != \\\r\n (1, target_col) or (zero_row, zero_col) != (0, target_col):\r\n # print 'Welcome to while loop!'\r\n cyclic_moves = ''\r\n #### Case 3 if ZERO located in the left side of the target tile\r\n ### like in the owel-test case\r\n #current_position = self.current_position(1, target_col)\r\n current_col = self.current_position(1, target_col)[1]\r\n counter +=1\r\n if self.current_position(1, target_col) == \\\r\n (1, target_col):\r\n # print 'ZERO not under TT'\r\n cyclic_moves = 'ur'\r\n whole_move += cyclic_moves\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif zero_col < current_col and self._grid[zero_row+1][zero_col] < \\\r\n self._grid[self.current_position(1, target_col)[0]][self.current_position(1, target_col)[1]]:\r\n # print 'ZERO tile located in the left side and down move is POSIBLE'\r\n if current_col != target_col:\r\n # print 'not under the target place'\r\n cyclic_moves = 'drrul'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n elif current_col == target_col:\r\n # print 'Target tile under target place'\r\n cyclic_moves = 'dru'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif current_col != target_col and self._grid[zero_row+1][zero_col] > \\\r\n self._grid[self.current_position(1, target_col)[0]][self.current_position(1, target_col)[1]]:\r\n # print 'not under the target place'\r\n cyclic_moves = 'urrdl'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves)) \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n # elif zero_col < current_col and self._grid[zero_row+1][zero_col] > \\\r\n # self._grid[current_position[0]][current_position[1]]:\r\n # # print 'ZERO tile located in the left side and down move IS NOT POSIBLE'\r\n # if current_col != target_col:\r\n # # print 'not under the target place'\r\n # cyclic_moves = 'urrdl'\r\n # whole_move += cyclic_moves\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n # zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n # elif current_col == target_col:\r\n # # print 'Target tile under target place'\r\n # cyclic_moves = 'urd'\r\n # whole_move += cyclic_moves\r\n # zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n # zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n #cyclic_moves +='ur'\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n if counter > 10:\r\n break\r\n return whole_move", "def solve(self, board: List[List[str]]) -> None:\r\n if not board:\r\n return\r\n m, n = len(board), len(board[0])\r\n board_filter = lambda p: 0 <= p[0] < m and 0 <= p[1] < n and board[p[0]][p[1]] == 'O'\r\n queue = list(filter(board_filter, [(x, y) for r in range(max(m, n)) \r\n for x, y in ((r, 0), (r, n-1), (0, r), (m-1, r))]))\r\n while queue:\r\n x, y = queue.pop()\r\n board[x][y] = 'W'\r\n queue.extend(list(filter(board_filter, ((x-1, y), (x+1, y), (x, y-1), (x, y+1)))))\r\n for row in board:\r\n for i, c in enumerate(row):\r\n row[i] = 'XO'[c=='W']", "def solve_col0_tile(self, target_row):\r\n moves_str = \"\"\r\n # move the zero tile from (i,0) to (i−1,1) \r\n # using the move string \"ur\"\r\n moves_str += \"ur\"\r\n temp_grid = Puzzle(self._height, self._width, self._grid)\r\n temp_grid.update_puzzle(moves_str)\r\n # If the target tile is now at position (i,0)\r\n # you can simply move tile zero to the end of row i−1\r\n current_row, current_col = temp_grid.current_position(target_row, 0)\r\n zero_row, zero_col = temp_grid.current_position(0, 0)\r\n if current_row == target_row and current_col == 0:\r\n rights = self._width - 1 - zero_col\r\n for dummy_r in range(rights):\r\n moves_str += \"r\" \r\n # However, if the target tile is not positioned at (i,0)\r\n # we suggest the following solution strategy:\r\n else:\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"ruldrdlurdluurddlu\"\r\n for dummy_r in range(self._width - 1):\r\n moves_str += \"r\"\r\n \r\n print \"solve_col0_tile\"\r\n print moves_str\r\n self.update_puzzle(moves_str)\r\n print self._grid\r\n return moves_str", "def solve_col0_tile(self, target_row):\r\n # check if curr_pos (i, 0) where i > 1\r\n assert self.lower_row_invariant(target_row, 0)\r\n move = \"ur\"\r\n self.update_puzzle(move)\r\n row, col = self.current_position(target_row, 0)\r\n if row == target_row and col == 0:\r\n move_to_target = (self.get_width() - 2) * \"r\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n else:\r\n move_to_target = self.move_to_target(target_row - 1, 1, row, col)\r\n # add solver move to str\r\n move_to_target += \"ruldrdlurdluurddlu\" + (self.get_width() - 1) * \"r\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n assert self.lower_row_invariant(target_row - 1, self.get_width() - 1)\r\n return move", "def solve_col0_tile(self, target_row):\n move_str = 'ur'\n self.update_puzzle(move_str)\n cur_row, cur_col = self.current_position(target_row, 0)\n if cur_row == target_row and cur_col == 0:\n move_str += 'r' * (self._width - 2)\n else:\n move_str += self.position_tile(target_row-1, 1, cur_row, cur_col)\n move_str += 'ruldrdlurdluurddlur'\n move_str += 'r' * (self._width - 2)\n self.update_puzzle(move_str[2:])\n return move_str", "def solve(self, board: List[List[str]]) -> None:\n\n def expansion(i,j):\n for di,dj in {(-1,0),(1,0),(0,-1),(0,1)}:\n if -1<i+di<m and -1<j+dj<n and state[i+di][j+dj]=='O':\n return True\n return False\n\n if not board:\n return board\n\n m = len(board)\n n = len(board[0]) \n state = [['X']*n for _ in range(m)]\n\n for j in range(n):\n state[0][j] = board[0][j]\n state[m-1][j] = board[m-1][j]\n \n for i in range(m):\n state[i][0] = board[i][0]\n state[i][n-1] = board[i][n-1]\n \n flag = 1\n\n while flag:\n flag = 0\n\n for k in range(1, (1+min(m,n))//2):\n for j in range(k,n-k):\n if board[k][j]=='O' and state[k][j] == 'X' and expansion(k,j):\n state[k][j] = 'O'\n flag = 1\n \n if board[m-1-k][j]=='O' and state[m-1-k][j] == 'X' and expansion(m-1-k,j):\n state[m-1-k][j] = 'O'\n flag = 1\n \n for i in range(k,m-k):\n if board[i][k]=='O' and state[i][k] == 'X' and expansion(i,k):\n state[i][k] = 'O'\n flag = 1\n \n if board[i][n-1-k]=='O' and state[i][n-1-k] == 'X' and expansion(i,n-1-k):\n state[i][n-1-k] = 'O'\n flag = 1\n\n board[:] = state[:]", "def solve_row0_tile(self, target_col):\n solved_row, solved_col = self.current_position(0, target_col)\n movements = \"\"\n if solved_col == target_col - 1 and solved_row == 0:\n movements = \"ld\"\n else:\n local_board = self.clone()\n local_board.update_puzzle(\"ld\")\n movements = \"ld\" + local_board.move_tile(\n 1, target_col - 1, target_col) + \"urdlurrdluldrruld\"\n self.update_puzzle(movements)\n return movements", "def solve(self, board: List[List[str]]) -> None:\n if len(board) <= 2 or len(board[0]) <= 2:\n return\n\n self.nRow, self.nCol = len(board), len(board[0])\n\n def helper(kr, kc):\n board[kr][kc] = '.'\n kr > 0 and board[kr - 1][kc] == 'O' and helper(kr - 1, kc)\n kr < self.nRow - 1 and board[kr + 1][kc] == 'O' and helper(kr + 1, kc)\n kc > 0 and board[kr][kc - 1] == 'O' and helper(kr, kc - 1)\n kc < self.nCol - 1 and board[kr][kc + 1] == 'O' and helper(kr, kc + 1)\n\n for kr in [0, self.nRow - 1]:\n for kc in range(self.nCol):\n if board[kr][kc] == 'O':\n helper(kr, kc)\n for kc in [0, self.nCol - 1]:\n for kr in range(self.nRow):\n if board[kr][kc] == 'O':\n helper(kr, kc)\n\n for kr in range(self.nRow):\n for kc in range(self.nCol):\n if board[kr][kc] == 'O':\n board[kr][kc] = 'X'\n elif board[kr][kc] == '.':\n board[kr][kc] = 'O'\n\n return", "def solve_col0_tile(self, target_row):\r\n # print '----------------------------------'\r\n # print 'SOLVING ZERO COLOUMN'\r\n assert self.lower_row_invariant(target_row,0), \"Invarian is False\"\r\n whole_move = ''\r\n #current_position = self.current_position(target_row, 0)\r\n current_row, current_col = self.current_position(target_row, 0)\r\n zero_row, zero_col = target_row, 0\r\n # print 'Target tile position=',current_position\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (target_row, 0)\r\n # print '------------------------------------------'\r\n # print 'Recommended move ur'\r\n \r\n recomended_move = 'ur'\r\n whole_move += recomended_move\r\n zero_col += len(filter(lambda x: x=='r', recomended_move))\r\n zero_row -= len(filter(lambda x: x=='u', recomended_move))\r\n self.update_puzzle(recomended_move)\r\n # print 'Grid after move:', recomended_move\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n current_position = self.current_position(target_row, 0)\r\n current_row, current_col = current_position\r\n if self.current_position(target_row, 0) == \\\r\n (target_row, 0):\r\n # print 'TT stays in correct place after recomended move'\r\n zero_to_place_move = 'r' * (self._width-1 - zero_col)\r\n self.update_puzzle(zero_to_place_move)\r\n whole_move += zero_to_place_move\r\n # print self\r\n assert self.lower_row_invariant(target_row-1,self._width-1), \"Invarian is False\"\r\n return whole_move\r\n #move ZERO tile to the right\r\n else:\r\n # print '------------------------------'\r\n # print 'After base move we are do not finde puzzle'\r\n # print 'Lets move zero towards TT'\r\n ### reposition TT to (target_row -1, 1)\r\n ### reposition ZERO tile to (target_row-1,0)\r\n \r\n ######Moving zero tile to the target tile\r\n #path_up = (zero_row - current_row) * 'u'\r\n #path_side = (current_col - zero_col) * 'r'\r\n path_for_zero = (zero_row - current_row) * 'u' + (current_col - zero_col) * 'r'\r\n whole_move += path_for_zero\r\n zero_col += len(filter(lambda x: x=='r', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n counter = 0\r\n while self.current_position(target_row, 0) != (target_row -1, 1) or \\\r\n (zero_row,zero_col) != (target_row-1,0):\r\n cyclic_moves = ''\r\n current_position = self.current_position(target_row, 0)\r\n current_row, current_col = current_position\r\n # print 'We are in while loop'\r\n counter += 1\r\n if zero_row < current_row:\r\n # print 'Zero row under current TT '\r\n if self.current_position(target_row, 0) == (target_row -1, 1):\r\n # print 'TT is in the reccomended posiont (target_row -1, 1) \\n and we are movind zero to the left side of TT '\r\n cyclic_moves = 'ld'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n else:\r\n # print 'TT should be one tile down'\r\n cyclic_moves = 'lddru'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n #### Case 1 if ZERO located in the right of\r\n #### target tile (after it)\r\n if zero_col > current_col:\r\n # print ' Look in the up puzzle, zero in the right side'\r\n if current_col != 1:\r\n # print 'not under target place (target_row -1, 1)'\r\n cyclic_moves = 'dllur'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n whole_move += cyclic_moves\r\n # elif current_col == 1 and self._grid[zero_row+1][zero_col] < \\\r\n # self._grid[current_position[0]][current_position[1]]:\r\n elif current_col == 1: \r\n # print 'Tile tat is under ZERO is',self._grid[zero_row+1][zero_col] \r\n # print 'TT under target place'\r\n cyclic_moves = 'dlu'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif self._grid[zero_row+1][zero_col] > \\\r\n self._grid[current_position[0]][current_position[1]]:\r\n print 'Tile under zero is illegal to move and we use upper cycle move '\r\n \r\n cyclic_moves = 'ul'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n if counter > 10:\r\n break\r\n ### Solwing 3x2 puzzle\r\n # print '--------------------------'\r\n # print 'Lets solve 3x2 puzzle formed recently'\r\n move3x2 = 'ruldrdlurdluurddlur'\r\n whole_move += move3x2\r\n zero_col -= len(filter(lambda x: x=='l', move3x2))\r\n zero_col += len(filter(lambda x: x=='r', move3x2))\r\n zero_row += len(filter(lambda x: x=='d', move3x2))\r\n zero_row -= len(filter(lambda x: x=='u', move3x2))\r\n self.update_puzzle(move3x2)\r\n # print 'Grid afret 3x2 solver move'\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n #####Moving ZERO to the (target_row - 1, n - 1) position where\r\n ##### 'n' is a grid height.\r\n # print self._width-1 - zero_col\r\n zero_to_place_move = 'r' * (self._width-1 - zero_col)\r\n whole_move += zero_to_place_move\r\n self.update_puzzle(zero_to_place_move)\r\n # print self\r\n assert self.lower_row_invariant(target_row-1,self._width-1), \"Invarian is False\"\r\n return whole_move", "def solve(self, board: 'List[List[str]]') -> 'None':\n if not board:\n return\n m, n = len(board), len(board[0])\n save = [ij for k in range(m + n) for ij in ((0, k), (m - 1, k), (k, 0), (k, n - 1))]\n while save:\n i, j = save.pop()\n if -1 < i < m and -1 < j < n and board[i][j] == 'O':\n board[i][j] = 'S'\n save += (i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1)\n for row in board:\n for i, c in enumerate(row):\n row[i] = 'XO'[c == 'S']", "def solve_interior_tile(self, target_row, target_col):\n # replace with your code\n string = ''\n target = self.current_position(target_row, target_col)\n row_difference = target_row - target[0]\n #print 'Row difference', row_difference\n col_difference = target_col - target[1]\n #print 'Col difference', col_difference\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'l' * col_difference\n if row_difference == 0 and col_difference > 1:\n string += 'urrdl' * (col_difference - 1)\n if row_difference == 1:\n string += 'urrdl' * (col_difference - 1)\n string += 'dru'\n if row_difference > 1:\n string += 'drrul' * (col_difference - 1)\n string += 'dru'\n elif col_difference < 0:\n col_difference = abs(col_difference)\n string += 'r' * col_difference\n if row_difference == 1:\n string += 'ulldr' * (col_difference - 1)\n string += 'ullddru'\n if row_difference > 1:\n string += 'dllur' * (col_difference - 1)\n string += 'dlu'\n string += 'lddru' * (row_difference - 1)\n if row_difference > 0:\n string += 'ld'\n print 'Interior Path', string\n self.update_puzzle(string)\n assert self.lower_row_invariant(target_row, target_col - 1), 'False string'\n return string", "def solve_row1_tile(self, target_col):\n cur_row, cur_col = self.current_position(1, target_col)\n move_str = self.position_tile(1, target_col, cur_row, cur_col, need_ld=False)\n self.update_puzzle(move_str)\n return move_str", "def solve_interior_tile(self, target_row, target_col):\r\n whole_move = ''\r\n # replace with your code\r\n if self._grid[target_row][target_col] != 0:\r\n # print \"DEBUG CASE WHEN ZERO IN JOPA\"\r\n \r\n # print self\r\n # print 'Solwing tile', self._grid[target_row][target_col]\r\n \r\n # print 'Searchind indexes of ZERO'\r\n for row in self._grid:\r\n for col in row:\r\n if col == 0:\r\n zero_row, zero_col = self._grid.index(row), row.index(col)\r\n break\r\n # print 'ZERO indexes=', (zero_row, zero_col)\r\n #####Moving zero to correct place\r\n #path_down = (target_row - zero_row) * 'd'\r\n #path_left = (zero_col - target_col) * 'l'\r\n if target_col - zero_col > 0:\r\n #path_right = (target_col - zero_col) * 'r'\r\n path_of_zero = (zero_col - target_col) * 'l' + (target_row - zero_row) * 'd' + (target_col - zero_col) * 'r'\r\n else:\r\n path_of_zero = (zero_col - target_col) * 'l' + (target_row - zero_row) * 'd'\r\n zero_col -= len(filter(lambda x: x=='l', path_of_zero))\r\n zero_col += len(filter(lambda x: x=='r', path_of_zero))\r\n zero_row += len(filter(lambda x: x=='d', path_of_zero))\r\n self.update_puzzle(path_of_zero)\r\n # print 'Grid after moving ZERO to target spot'\r\n # print self\r\n whole_move += path_of_zero\r\n assert self.lower_row_invariant(target_row, target_col), \"Invarian is False\"\r\n \r\n #current_position = self.current_position(target_row, target_col)\r\n #current_row, current_col = self.current_position(target_row, target_col)\r\n # print 'Target tile position=',current_position\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (target_row, target_col)\r\n \r\n ######Moving zero tile to the target tile\r\n path_up = (target_row - self.current_position(target_row, target_col)[0]) * 'u'\r\n zero_row = target_row - len(path_up)\r\n if target_col < self.current_position(target_row, target_col)[1]: # Right move\r\n path_side = (self.current_position(target_row, target_col)[1] - target_col) * 'r'\r\n zero_col = target_col + len(path_side)\r\n else: # Left move\r\n path_side = (target_col - self.current_position(target_row, target_col)[1]) * 'l'\r\n zero_col = target_col - len(path_side)\r\n \r\n #path_for_zero = path_up + path_side\r\n # print '------------------------------------------'\r\n # print 'Move to ZERO =', path_for_zero\r\n \r\n self.update_puzzle(path_up + path_side)\r\n \r\n # print 'Grid after move:'\r\n # print self\r\n # current_position = self.current_position(target_row, target_col) \r\n # current_row, current_col = current_position\r\n # print 'Updated Target tile position=',current_position\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, target_col)\r\n # print '-----------------------------------------'\r\n \r\n\r\n ###### New moves after moving ZERO tile into target tile\r\n # counter = 0\r\n whole_move += path_up + path_side\r\n while self.current_position(target_row, target_col) != \\\r\n (target_row, target_col) or zero_col != target_col - 1:\r\n # tt_in_home = self.current_position(target_row, target_col) == \\\r\n # (target_row, target_col)\r\n\r\n cyclic_moves = ''\r\n # counter += 1\r\n #current_position = self.current_position(target_row, target_col) \r\n #current_col = self.current_position(target_row, target_col)[1]\r\n # print 'Zero coloumn', zero_col, '== Target coloumn', target_col\r\n # print zero_col == target_col \r\n \r\n #### Case 1 if ZERO located in ther right of\r\n #### target tile (after it)\r\n if zero_col > self.current_position(target_row, target_col)[1]:\r\n # print ' Look in the up puzzle, zero on the right side'\r\n # if self.current_position(target_row, target_col)[1] != target_col:\r\n # # print 'not under target place'\r\n # cyclic_moves = 'dllur'\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n if self.current_position(target_row, target_col)[1] == target_col and self._grid[zero_row+1][zero_col] < \\\r\n self._grid[self.current_position(target_row, target_col)[0]][self.current_position(target_row, target_col)[1]]:\r\n # print 'Tile tat is under ZERO is',self._grid[zero_row+1][zero_col] \r\n # print 'TT under target place'\r\n cyclic_moves = 'dlu'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n #zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # elif self._grid[zero_row+1][zero_col] > \\\r\n # self._grid[self.current_position(target_row, target_col)[0]][self.current_position(target_row, target_col)[1]]:\r\n # # print 'Tile under zero is illegal to move and we use upper cycle move '\r\n \r\n # cyclic_moves = 'ul'\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n #### Case 2 if ZERO located under the target tile, and both\r\n #### of them located under the target position of the target tile\r\n elif zero_col == self.current_position(target_row, target_col)[1] and zero_col == target_col:\r\n # print 'Both under the target place'\r\n # print 'TT in home=', tt_in_home\r\n if self.current_position(target_row, target_col) == \\\r\n (target_row, target_col):\r\n cyclic_moves = 'ld'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n #zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n \r\n else:\r\n cyclic_moves = 'lddru'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n \r\n #### Case 3 if ZERO located in the left side of the target tile\r\n ### like in the owel-test case\r\n elif zero_col < self.current_position(target_row, target_col)[1]:\r\n # print 'ZERO tile located in the left side'\r\n if self.current_position(target_row, target_col)[1] != target_col:\r\n # print 'not under the target place'\r\n cyclic_moves = 'drrul'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif self.current_position(target_row, target_col)[1] == target_col:\r\n # print 'Target tile under target place'\r\n cyclic_moves = 'dru'\r\n #zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loot counter =',counter\r\n whole_move += cyclic_moves\r\n # if counter > 12:\r\n # break\r\n # print 'Tile is solved with move '+ whole_move\r\n assert self.lower_row_invariant(target_row, target_col-1), \"Invarian is False\"\r\n return whole_move", "def solve_col0_tile(self, target_row):\n # replace with your code\n string = ''\n target = self.current_position(target_row, 0)\n row_difference = target_row - target[0]\n col_difference = target[1]\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'r' * (col_difference - 1)\n if row_difference > 1:\n string += 'druld' * (row_difference - 1)\n string += 'rulld' * (col_difference - 1)\n string += 'ruldrdlurdluurddlu'\n elif col_difference == 0:\n string += 'rddlu' * (row_difference - 2)\n if row_difference > 1:\n string += 'rd'\n string += 'l'\n string += 'ruldrdlurdluurddlu'\n string += 'r' * (self._width - 1)\n print 'Col 0 Path', string\n self.update_puzzle(string)\n assert self.lower_row_invariant(target_row - 1, self._width -1), 'False string'\n return string", "def solve(self, board: List[List[str]]) -> None:\n if len(board) <= 2 or len(board[0]) <= 2:\n return\n\n nRow, nCol = len(board), len(board[0])\n\n def helper(kr, kc):\n board[kr][kc] = '.'\n kr > 0 and board[kr - 1][kc] == 'O' and helper(kr - 1, kc)\n kr < nRow - 1 and board[kr + 1][kc] == 'O' and helper(kr + 1, kc)\n kc > 0 and board[kr][kc - 1] == 'O' and helper(kr, kc - 1)\n kc < nCol - 1 and board[kr][kc + 1] == 'O' and helper(kr, kc + 1)\n\n for kr in [0, nRow - 1]:\n for kc in range(nCol):\n if board[kr][kc] == 'O':\n helper(kr, kc)\n for kc in [0, nCol - 1]:\n for kr in range(nRow):\n if board[kr][kc] == 'O':\n helper(kr, kc)\n\n for kr in range(nRow):\n for kc in range(nCol):\n if board[kr][kc] == 'O':\n board[kr][kc] = 'X'\n elif board[kr][kc] == '.':\n board[kr][kc] = 'O'\n\n return", "def solve(self, board) -> None:\n x_length = len(board)\n if x_length == 0: \n return\n\n y_length = len(board[0])\n confirmed = set()\n dfs = []\n for i in range(x_length):\n if board[i][0] == 'O':\n board[i][0] = 'temp'\n dfs.append((i, 0))\n if board[i][y_length - 1] == 'O':\n board[i][y_length - 1] = 'temp'\n dfs.append((i, y_length - 1))\n for j in range(y_length):\n if board[0][j] == 'O':\n board[0][j] = 'temp'\n dfs.append((0, j))\n if board[x_length - 1][j] == 'O':\n board[x_length - 1][j] = 'temp'\n dfs.append((x_length - 1, j))\n while dfs:\n i, j = dfs.pop()\n confirmed.add((i, j))\n if i+1 < x_length and board[i+1][j] == 'O':\n board[i+1][j] = 'temp'\n dfs.append((i + 1, j))\n if i > 0 and board[i-1][j] == 'O':\n board[i-1][j] = 'temp'\n dfs.append((i-1, j))\n if j+1 < y_length and board[i][j+1] == 'O':\n board[i][j+1] = 'temp'\n dfs.append((i, j + 1))\n if j > 0 and board[i][j-1] == 'O':\n board[i][j-1] = 'temp'\n dfs.append((i, j-1))\n for i in range(x_length):\n for j in range(y_length):\n if (i, j) in confirmed:\n board[i][j] = 'O'\n else:\n board[i][j] = 'X'\n return", "def solve(board):\r\n \r\n #An O(2mn) time solution; the first O(mn) traversal is to preform a bfs on all tiles attached to edge 'O' tiles (can't convert to 'X's); the second is to convert all remaining 'O's into 'X's\r\n \r\n def bfs(curr,r,c):\r\n if not curr: return\r\n prev = len(curr)\r\n for n in range(0,prev):\r\n i,j = curr[n][0],curr[n][1]\r\n board[i][j] = 'A'\r\n for x,y in [(-1, 0), (0, -1), (1, 0), (0, 1)]:\r\n x_n = i+x\r\n y_n = j+y\r\n if x_n >= 0 and x_n < r and y_n >= 0 and y_n < c and board[x_n][y_n] == \"O\":\r\n curr += [(x_n,y_n)]\r\n bfs(curr[prev:],r,c)\r\n\r\n \r\n q,r,c = [],len(board),len(board[0])\r\n if not r or q: return\r\n\r\n for i in range(r):\r\n for j in range(c):\r\n if (i==0 or j==0 or i==r-1 or j==c-1) and board[i][j] == \"O\":\r\n q += [(i,j)]\r\n \r\n bfs(q,r,c)\r\n\r\n for i in range(r):\r\n for j in range(c): \r\n if board[i][j] == \"O\": \r\n board[i][j] = \"X\"\r\n elif board[i][j] == \"A\":\r\n board[i][j] = \"O\"\r\n \r\n return", "def solve(self, board: List[List[str]]) -> None:\n if board is None or len(board) == 0:\n return \n row, col = len(board), len(board[0])\n for i in range(row):\n self.dfs(board, i, 0)\n self.dfs(board, i, col - 1)\n for j in range(col):\n self.dfs(board, 0, j)\n self.dfs(board, row-1, j)\n for i in range(row):\n for j in range(col):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n if board[i][j] == '-':\n board[i][j] = 'O'", "def solve_row0_tile(self, target_col):\n move_str = 'ld'\n self.update_puzzle(move_str)\n cur_row, cur_col = self.current_position(0, target_col)\n if cur_row == 0 and cur_col == target_col:\n return move_str\n else:\n move_str += self.position_tile(1, target_col-1, cur_row, cur_col)\n move_str += 'urdlurrdluldrruld'\n self.update_puzzle(move_str[2:])\n return move_str", "def solve(self, board: List[List[str]]) -> None:\n if len(board) == 0: return\n\n m = len(board)\n n = len(board[0])\n\n uf = UnionFind(m * n + 1)\n dummy = m * n\n\n # connect 'O' at first and last col with dummy\n for i in range(m):\n if board[i][0] == 'O':\n uf.union(dummy, i * n)\n if board[i][-1] == 'O':\n uf.union(dummy, i * n + n - 1)\n\n # connect 'O' at first and last row with dummy\n for j in range(n):\n if board[0][j] == 'O':\n uf.union(dummy, j)\n if board[-1][j] == 'O':\n uf.union(dummy, n * (m-1) + j)\n\n d = [(1, 0), (0, 1), (0, -1), (-1, 0)]\n\n for i in range(1, m-1):\n for j in range(1, n-1):\n if board[i][j] == 'O':\n for di, dj in d:\n x = i+di\n y = j+dj\n if board[x][y] == 'O':\n uf.union(x*n+y, i*n+j)\n\n # change not connected 'O' with 'X'\n for i in range(1, m-1):\n for j in range(1, n-1):\n if not uf.connected(dummy, i * n + j):\n board[i][j] = 'X'", "def position_tile(self, zero_row, zero_col, correct_tile):\n \n ans = \"\" \n vert_dist = abs(zero_row - correct_tile[0])\n horiz_dist = abs(zero_col - correct_tile[1])\n \n # Updates ans, the move string, based the correct_tile's\n # position relative to the target position.\n \n # SAME ROW\n if vert_dist == 0:\n # Left of target\n if zero_col > correct_tile[1]:\n # Moves zero tile left to correct_tile's position.\n ans += str(\"l\" * horiz_dist)\n # Moves correct_tile right to target position,\n # and moves zero tile to left of target position.\n if horiz_dist > 1:\n ans += str(\"urrdl\" * (horiz_dist - 1))\n # Right of target\n else:\n # Moves zero tile right to correct_tile's position.\n ans += str(\"r\" * horiz_dist)\n # Moves correct_tile left to target position,\n # and moves zero tile to left of target position.\n ans += str(\"ulldr\" * (horiz_dist - 1))\n ans += str(\"ulld\")\n \n # SAME COL\n elif horiz_dist == 0:\n # Moves zero tile up to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n # Moves correct_tile down to target position, \n # and moves zero tile to left of target position.\n if vert_dist > 1:\n ans += str(\"lddru\" * (vert_dist - 1))\n ans += str(\"ld\")\n \n # UPPER LEFT\n elif correct_tile[1] < zero_col:\n # Moves zero tile up and left to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n ans += str(\"l\" * horiz_dist)\n # Moves correct_tile right and down to target position,\n # and moves zero tile to left of target position.\n ans += str(\"drrul\" * (horiz_dist - 1))\n ans += str(\"druld\" * vert_dist)\n\n # UPPER RIGHT\n else:\n # Moves zero tile up and right to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n ans += str(\"r\" * horiz_dist)\n # This if-elif-else statement moves correct_tile left and down to target position.\n # If statement is only used when target position is in row 2.\n if vert_dist == 1 and correct_tile[0] == 0:\n ans += str(\"dllur\" * (horiz_dist - 1))\n ans += str(\"dluld\")\n # Elif statement used when correct_tile is in the row above target position.\n elif vert_dist == 1: \n ans += str(\"ulldr\" * (horiz_dist - 1))\n ans += str(\"ullddruld\")\n # Else statement used when correct_tile is 1+ rows above target position.\n else:\n ans += str(\"dllur\" * (horiz_dist - 1))\n ans += str(\"dlu\")\n ans += str(\"lddru\" * (vert_dist - 1))\n ans += str(\"ld\")\n \n return ans", "def half_turn_solve(db, position):\n print('- Table Lookup: ', end='')\n solve_sequence = half_turn_lookup.lookup_position(db, position)\n temp_cube = Cube(position)\n for move in solve_sequence:\n dyn_move(temp_cube, move)\n print(move.name, end=' ')\n print()\n return solve_sequence", "def solve_puzzle(self):\n moves = self.moves\n peg_pos = self.peg_pos\n move_z = self.move_to_height\n \n print('Solving Tower of Hanoi:')\n for i, move in enumerate(moves):\n des_peg = move[0]\n des_peg_pos = peg_pos[des_peg]\n \n #move to peg\n print(' Moving to peg: '+str(des_peg)+' at: '+str(des_peg_pos))\n self.move_to(des_peg_pos[0], des_peg_pos[1], move_z)\n \n #if index is even, pickup disk, else drop disk\n if i % 2 == 0:\n print(' Picking up disk at height: '+str(move[1]))\n self.pick(move[1])\n else:\n print(' Dropping disk')\n self.drop()\n print('Finished solving puzzle')", "def solve(board) -> None:\n rows = len(board)\n if rows==0:\n return board\n cols = len(board[0])\n \n def is_border(rc):\n (rr, cc) =rc\n if rr<rows and rr< cols and rr>=0 and cc>=0 and board[rr][cc]=='O' and (rr==0 or rr==rows-1 or cc==0 or cc==cols-1):\n return True\n return False\n \n transf = []\n for r in range(rows):\n for c in range(cols):\n if board[r][c]=='O' and not is_border((r,c)) and not any(map(is_border, [(r-1, c), (r+1, c), (r, c-1), (r, c+1)])):\n transf.append((r,c))\n if transf:\n for r,c in transf:\n board[r][c]='X'\n return board", "def solve(self, board: List[List[str]]) -> None:\n def dfs(board, i, j):\n m = len(board)\n n = len(board[0])\n if i < 0 or i >= m or j < 0 or j >= n: return\n\n if board[i][j] != 'O': return\n\n board[i][j] = '#'\n [dfs(board, i+di, j+dj) for di, dj in [(0, 1), (1, 0), (0, -1), (-1, 0)]]\n\n if len(board) == 0: return\n m = len(board)\n n = len(board[0])\n\n for i in range(m):\n dfs(board, i, 0)\n dfs(board, i, n-1)\n\n for j in range(n):\n dfs(board, 0, j)\n dfs(board, m-1, j)\n\n for i in range(m):\n for j in range(n):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n\n for i in range(m):\n for j in range(n):\n if board[i][j] == '#':\n board[i][j] = 'O'", "def solve_tour(self):\n\t\tboard = [[-1 for _ in range(self.N)]for _ in range(self.N)]\n\t\tboard[0][0] = 0\n\n\t\tz = self.find_tour(board, 0, 0, 1)\n\t\tif z:\n\t\t\tfor i in range(self.N):\n\t\t\t\tfor j in range(self.N):\n\t\t\t\t\tself.solution.append(board[i][j])\n\t\t\tprint board\n\t\t\treturn self.solution\n\t\t\t\t\n\t\telse:\n\t\t\tprint(\"No solution\")", "def solve(self, board) -> None:\n coords = []\n board_len = len(board)\n row_len = len(board[0]) - 1\n # top\n # coords.append([[0, i] for i, q in enumerate(board[0]) if q == \"O\"])\n # # bottom\n # coords.append(\n # [[board_len, i] for i, q in enumerate(board[board_len]) if q == \"O\"]\n # )\n for i in range(board_len):\n row_coord = [[i,indx] for indx, q in enumerate(board[i]) if q == \"O\"]\n # import pdb; pdb.set_trace()\n for x in row_coord:\n coords.append(x)\n for x in coords:\n if len(x) == 0:\n continue\n if x[0] == 0:\n print(\"top border\")\n elif x[0] == board_len - 1:\n print(\"bottom border\")\n elif x[1] == 0:\n print(\"left border\")\n elif x[1] == row_len:\n prin(\"right border\")", "def solve_row1_tile(self, target_col):\n assert target_col > 1, \"target_col must be > 1\"\n assert self.row1_invariant(target_col), \"tiles to right and below incorrectly ordered\"\n\n # Moves correct_tile to the target position (1, target_col),\n # and the zero tile above the target position at (0, target_col). \n correct_tile = self.current_position(1, target_col)\n move_str = self.position_tile(1, target_col, correct_tile) \n move_str += \"ur\"\n self.update_puzzle(move_str)\n\n assert self.row0_invariant(target_col)\n \n return move_str", "def solve_puzzle(board):\n # Propagate value effects\n board = simplify_puzzle(board, [])\n\n # Brute force remaining cells\n board = brute(board)\n\n # Verify that the puzzle was successfully solved\n assert get_length(board)==81\n assert valid_attempt(board)\n\n return board", "def solve(self, board) -> None:\n for index in range (1, len(board)-1):\n arr = board[index]\n for ch in range(1, len(arr)-1):\n if arr[ch] is 'O':\n safe = True\n if ch-1 == 0 and arr[ch-1] is 'O':\n safe = False\n if ch +1 == len(arr)-1 and arr[ch+1] is 'O':\n safe = False\n if index -1 == 0 and board[index-1][ch] is 'O':\n safe = False\n if index + 1 == len(board)-1 and board[index + 1][ch] is 'O':\n safe = False\n if safe:\n arr[ch] = 'X'", "def solve_row1_tile(self, target_col):\r\n row, col = self.current_position(1, target_col)\r\n move = self.move_to_target(1, target_col, row, col)\r\n # for next move\r\n move += \"ur\"\r\n \r\n self.update_puzzle(move)\r\n return move", "def solve(self):\n # If board is filled, board is trivially solved\n if self.check_full_board():\n return self.done\n\n # Iterate over every square in the board\n for row in range(self.num_rows):\n for col in range(self.num_columns):\n\n # If square is empty, begin plugging in possible values\n if self.check_empty_space(row, col):\n for val in range(1, 10):\n if not self.check_row(val, row) and \\\n not self.check_column(val, col) and \\\n not self.check_box(val, self.what_box(row, col)):\n self.board[row][col] = val\n \n if self.solve():\n return self.done()\n \n # Didn't work; undo assigment\n self.board[row][col] = ' '\n\n # Bad path; backtrack\n return False", "def solve(self, board: 'List[List[str]]') -> 'None':\n\n def dfs(i, j, tmp):\n nonlocal flag\n if i < 0 or i >= len(board) or j < 0 or j >= len(board[0]):\n flag = False\n return\n if board[i][j] != 'O' or [i, j] in tmp:\n return\n tmp.append([i, j])\n dfs(i - 1, j, tmp)\n dfs(i + 1, j, tmp)\n dfs(i, j + 1, tmp)\n dfs(i, j - 1, tmp)\n return tmp\n\n change = []\n for i in range(len(board)):\n for j in range(len(board[0])):\n if board[i][j] == 'O' and [i, j] not in change:\n tmp = []\n flag = True\n tmp = dfs(i, j, tmp[:])\n if flag:\n for loc in tmp:\n i, j = loc[0], loc[1]\n board[i][j] = 'X'\n\n for loc in change:\n i, j = loc[0], loc[1]\n board[i][j] = 'X'", "def find_best_move(board):\n new_board = board.get_board()\n\n # X | X | X <-- Check for win on this row\n # ---------\n # 3 | 4 | 5\n # ---------\n # 6 | 7 | 9\n if new_board[0] == new_board[1] and new_board[2] == \"2\":\n return 2\n elif new_board[0] == new_board[2] and new_board[1] == \"1\":\n return 1\n elif new_board[1] == new_board[2] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | 2\n # ---------\n # X | X | X <-- Check for win on this row\n # ---------\n # 6 | 7 | 9\n elif new_board[3] == new_board[4] and new_board[5] == \"5\":\n return 5\n elif new_board[3] == new_board[5] and new_board[4] == \"4\":\n return 4\n elif new_board[4] == new_board[5] and new_board[3] == \"3\":\n return 3\n\n # 0 | 1 | 2\n # ---------\n # 3 | 4 | 5\n # ---------\n # X | X | X <-- Check for win on this row\n elif new_board[6] == new_board[7] and new_board[8] == \"8\":\n return 8\n elif new_board[6] == new_board[8] and new_board[7] == \"7\":\n return 7\n elif new_board[7] == new_board[8] and new_board[6] == \"6\":\n return 6\n\n # X | 1 | 2 Check for win on column one\n # ---------\n # X | 4 | 5\n # ---------\n # X | 7 | 9\n elif new_board[0] == new_board[3] and new_board[6] == \"6\":\n return 6\n elif new_board[0] == new_board[6] and new_board[3] == \"3\":\n return 3\n elif new_board[6] == new_board[3] and new_board[0] == \"0\":\n return 0\n\n # 0 | X | 2 Checks for win on column two\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | X | 9\n elif new_board[1] == new_board[4] and new_board[7] == \"7\":\n return 7\n elif new_board[1] == new_board[7] and new_board[4] == \"4\":\n return 4\n elif new_board[7] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | 4 | X\n # ---------\n # 6 | 7 | X\n elif new_board[2] == new_board[5] and new_board[8] == \"8\":\n return 8\n elif new_board[2] == new_board[8] and new_board[5] == \"5\":\n return 5\n elif new_board[8] == new_board[5] and new_board[2] == \"2\":\n return 2\n\n # X | 1 | 2\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | 7 | X\n elif new_board[0] == new_board[4] and new_board[8] == \"8\":\n return 8\n elif new_board[0] == new_board[8] and new_board[4] == \"4\":\n return 4\n elif new_board[8] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | X | 5\n # ---------\n # X | 7 | 9\n elif new_board[2] == new_board[4] and new_board[6] == \"6\":\n return 6\n elif new_board[2] == new_board[6] and new_board[4] == \"4\":\n return 4\n elif new_board[6] == new_board[4] and new_board[2] == \"2\":\n return 2\n\n # If corners are empty, play there\n elif new_board[0] == \"0\" or new_board[2] == \"2\" or new_board[6] == \"6\" or new_board[8] == \"8\":\n try_spot = 0\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2\n\n # If middle is empty, play there\n elif new_board[4] == \"4\":\n return 4\n\n # Finally if edges are empty try there\n elif new_board[1] == \"1\" or new_board[3] == \"3\" or new_board[5] == \"5\" or new_board[7] == \"7\":\n try_spot = 1\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2", "def solve(self, board):\n def dfs(board, r, c):\n if r < 0 or c < 0 or r > rows - 1 or c > cols - 1 or board[r][c] == 'X' or board[r][c] == '#':\n return\n board[r][c] = '#'\n dfs(board, r - 1, c)\n dfs(board, r + 1, c)\n dfs(board, r, c - 1)\n dfs(board, r, c + 1)\n\n if len(board) == 0:\n return;\n rows = len(board)\n cols = len(board[0])\n for i in range(rows):\n for j in range(cols):\n if (i == 0 or j == 0 or i == rows - 1 or j == cols - 1) and board[i][j] == 'O':\n dfs(board, i, j)\n for i in range(rows):\n for j in range(cols):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n elif board[i][j] == '#':\n board[i][j] = 'O'", "def solve_row0_tile(self, target_col):\n # replace with your code\n string = ''\n assert self.row0_invariant(target_col), 'False precondition'\n target = self.current_position(0, target_col)\n row_difference = target[0]\n col_difference = target_col - target[1]\n if row_difference == 0:\n if col_difference == 1:\n string += 'ld'\n elif col_difference > 1:\n string += 'l' * col_difference\n string += 'drrul' * (col_difference - 2)\n string += 'druld'\n string += 'urdlurrdluldrruld'\n elif row_difference == 1:\n if col_difference == 1:\n string += 'lld'\n string += 'urdlurrdluldrruld'\n elif col_difference > 1:\n string += 'ld'\n string += 'l' * (col_difference - 1)\n string += 'urrdl' * (col_difference - 2)\n string += 'urdlurrdluldrruld'\n print 'Row 0 Path', string\n self.update_puzzle(string)\n assert self.row1_invariant(target_col - 1), 'False string'\n return string", "def solve(self, board: List[List[str]]) -> None:\n if len(board) == 0:\n return\n self.h = len(board)\n self.w = len(board[0])\n self.board = board\n for i in range(self.h):\n for j in range(self.w):\n if i == 0 or i == self.h-1 or j == 0 or j == self.w-1:\n #print (i,j)\n self.dfs((i,j))\n for i in range(self.h):\n for j in range(self.w):\n if self.board[i][j]==\"O\":\n self.board[i][j]=\"X\"\n for i in range(self.h):\n for j in range(self.w):\n if self.board[i][j]==\"#\":\n self.board[i][j]=\"O\"", "def solve(self, board: List[List[str]]) -> None:\n def _dfs(i, j):\n if i < 0 or j < 0 or i >= len(board) or j >= len(board[0]) or board[i][j] in ['X', '#']: return\n board[i][j] = '#'\n _dfs(i-1, j)\n _dfs(i+1, j)\n _dfs(i, j-1)\n _dfs(i, j+1)\n\n if not board or not board[0]: return\n m, n = len(board), len(board[0])\n for i in range(0, m):\n for j in range(0, n):\n is_edge = i == 0 or j == 0 or i == m-1 or j == n-1\n if is_edge and board[i][j] == 'O':\n _dfs(i, j)\n print(board)\n\n for i in range(0, m):\n for j in range(0, n):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n if board[i][j] == '#':\n board[i][j] = 'O'", "def solve(self):\n while self.character.path[-1] != 88:\n n = self.next_move()\n if n is None:\n self.character.path += ['Error: Could not find full path (budget does not suffice or unreachable).']\n break\n self.character.path += [n]\n self.updated_occupied_locations()\n self.currentTurn += 1", "def solve(self, board: List[List[str]]) -> None:\n def DFS(board, i, j):\n q = []\n q.append([i, j])\n \n while q:\n x, y = q.pop()\n board[x][y] = \"*\"\n neighbors = ((0, 1), (0, -1), (1, 0), (-1, 0))\n for dx, dy in neighbors:\n if 0 <= x + dx <= len(board) - 1 and 0 <= y + dy <= len(board[0]) - 1 and board[x + dx][y + dy] == \"O\":\n q.append([x + dx, y + dy])\n \n \n # first row\n i = 0\n for j in range(len(board[0])):\n if board[i][j] == \"O\":\n DFS(board, i, j)\n \n # last row\n i = len(board) - 1\n for j in range(len(board[0])):\n if board[i][j] == \"O\":\n DFS(board, i, j)\n \n # first column\n j = 0\n for i in range(len(board)):\n if board[i][j] == \"O\":\n DFS(board, i, j)\n \n # last column\n j = len(board[0]) - 1\n for i in range(len(board)):\n if board[i][j] == \"O\":\n DFS(board, i, j)\n \n \n for i in range(len(board)):\n for j in range(len(board[0])):\n if board[i][j] == \"O\":\n board[i][j] = \"X\"\n elif board[i][j] == \"*\":\n board[i][j] = \"O\"", "def solve (M, cpos, move): \n if move == 64:\n print (\"\\n\\nmove: \", move)\n print (\"sum: \", sum(M))\n pprint (M)\n #exit()\n for next in get_moves(cpos, M):\n solve(ulist(M, next, move+1), next, move+1)", "def solve_row1_tile(self, target_col):\n # replace with your code\n print target_col\n assert self.row1_invariant(target_col), 'False precondition'\n string = ''\n target = self.current_position(1, target_col)\n row_difference = 1 - target[0]\n col_difference = target_col - target[1]\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'l' * col_difference\n if row_difference == 0:\n string += 'urrdl' * (col_difference - 1)\n string += 'ur'\n elif row_difference == 1:\n string += 'drrul' * (col_difference - 1)\n string += 'dru'\n elif col_difference < 0:\n col_difference = abs(col_difference)\n string += 'r' * col_difference\n string += 'dllur' * (col_difference - 1)\n string += 'dlu'\n print 'Row 1 Path', string\n self.update_puzzle(string)\n assert self.row0_invariant(target_col), 'False string'\n return string", "def solve(self, board: List[List[str]]) -> None:\n m = len(board)\n if m == 0:\n return\n n = len(board[0])\n uf = UnionFold(m*n+1)\n dummy = m*n\n # 搜索与边界联通的 “O”\n for i in range(0, m):\n for j in range(0, n):\n # the boundary\n if (i==0 or i == m-1 or j == 0 or j == n-1) and board[i][j] == \"O\":\n uf.union(i*n+j, dummy)\n elif board[i][j] == \"O\":\n for l in [[1,0], [0, 1], [-1, 0], [0, -1]]:\n x, y = l[0]+i, l[1]+j\n if board[x][y] == \"O\":\n uf.union(i*n+j, x*n+y)\n # 所有不和 dummy 连通的 O,都要被替换\n for i in range(1, m-1):\n for j in range(1, n-1):\n if not uf.connected(dummy, i*n+j):\n board[i][j] = \"X\"", "def solve_col0_tile(self, target_row):\n \n assert target_row > 1, \"target_row cannot be in rows 0 or 1.\"\n assert self.lower_row_invariant(target_row, 0), \"tiles to right and below incorrectly ordered\"\n\n # Move zero tile from target position (target_row, 0) to (target_row - 1, 1).\n self.update_puzzle(\"ur\")\n\n move_str = \"\"\n \n # correct_tile's position is determined after moving zero tile \"ur\" \n # because its position relative to zero tile may have changed as a result.\n correct_tile = self.current_position(target_row, 0)\n \n # Moves to reposition correct_tile to target position.\n if self.get_number(correct_tile[0], correct_tile[1]) != self.get_number(target_row, 0):\n move_str += str(self.position_tile(target_row - 1, 1, correct_tile))\n move_str += str(\"ruldrdlurdluurddlur\")\n\n # Moves to reposition zero tile to end of column of target_row + 1.\n move_str += str(\"r\" * (self.get_width() - 2)) \n \n self.update_puzzle(move_str)\n\n assert self.lower_row_invariant(target_row - 1, self.get_width() - 1) \n \n move_str = \"ur\" + move_str\n return move_str", "def solve_row0_tile(self, target_col):\r\n # move the zero tile from position (0,j) to (1,j−1) \r\n # using the move string \"ld\" \r\n moves_str = \"\"\r\n moves_str += \"ld\"\r\n # check whether target tile is at position (0,j).\r\n temp_grid = Puzzle(self._height, self._width, self._grid)\r\n temp_grid.update_puzzle(moves_str) \r\n current_row, current_col = temp_grid.current_position(0, target_col)\r\n zero_row, zero_col = temp_grid.current_position(0, 0)\r\n \r\n # If target tile is not at position (0,j).\r\n # reposition the target tile to position (1,j−1) \r\n # with tile zero in position (1,j−2).\r\n if current_row != 0 or current_col != target_col:\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"urdlurrdluldrruld\"\r\n \r\n self.update_puzzle(moves_str)\r\n print \"solve_row0_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve(self, board: List[List[str]]) -> None:\n # New Solution: DFS on boarder (140ms: 89.07%)\n if not board or not board[0]: return\n def dfs(i, j):\n if board[i][j]=='O':\n board[i][j] = '*'\n if i-1>=0:\n dfs(i-1, j)\n if i+1<len(board):\n dfs(i+1, j)\n if j-1>=0:\n dfs(i, j-1)\n if j+1<len(board[0]):\n dfs(i, j+1)\n height, width = len(board), len(board[0])\n for i in range(width):\n if board[0][i]=='O':\n dfs(0, i)\n if board[height-1][i]=='O':\n dfs(height-1, i)\n for i in range(height):\n if board[i][0]=='O':\n dfs(i, 0)\n if board[i][width-1]=='O':\n dfs(i, width-1)\n for i in range(height):\n for j in range(width):\n if board[i][j]=='O':\n board[i][j] = 'X'\n elif board[i][j]=='*':\n board[i][j] = 'O'", "def solve_row1_tile(self, target_col):\n movements = self.solve_interior_tile(1, target_col)\n self.update_puzzle(\"ur\")\n return movements + \"ur\"", "def solve_(self, x, y, board, path):\n if self.SOLVED:\n return\n if self.is_done(board):\n self.print_path(path)\n self.SOLVED = True\n return\n for new_x, new_y in self.next_click(x, y, board):\n if new_x is None or new_y is None:\n return\n new_board = self.click(new_x, new_y, board)\n self.solve_(\n x=0, y=0,\n board=new_board,\n path=path + [((new_x, new_y), new_board)]\n )", "def solve(self, board: List[List[str]]) -> None:\n visited = [[False for x in range(len(board[0]))] for y in range(len(board))]\n for i in range(len(board)):\n for j in range(len(board[i])):\n if not visited[i][j] and board[i][j] == 'O':\n res = []\n result = self.gatherO(board, i, j, res, visited)\n if not result:\n for coordinate in res:\n board[coordinate[0]][coordinate[1]] = 'X'", "def solveSudoku(self, board):\n\n digits = { str(i) for i in range(1, 10) }\n rows = [ digits.copy() for _ in range(9) ]\n cols = [ digits.copy() for _ in range(9) ]\n boxs = [ [ digits.copy() for _ in range(3) ] for _ in range(3) ]\n unoccupied = set()\n\n def __recursiveSolver():\n if not unoccupied:\n return\n\n choices = digits.copy()\n for row, col in unoccupied:\n possible_moves = rows[row] & cols[col] & boxs[row // 3][col // 3]\n if len(possible_moves) < len(choices):\n action_pos = (row, col)\n choices = possible_moves\n if len(choices) == 1:\n break\n\n for choice in choices:\n (row, col) = action_pos\n\n unoccupied.remove(action_pos)\n board[row][col] = choice\n rows[row].remove(choice)\n cols[col].remove(choice)\n boxs[row // 3][col // 3].remove(choice)\n\n __recursiveSolver()\n if not unoccupied: return\n\n unoccupied.add(action_pos)\n board[row][col] = '.'\n rows[row].add(choice)\n cols[col].add(choice)\n boxs[row // 3][col // 3].add(choice)\n\n for row in range(9):\n for col in range(9):\n ch = board[row][col]\n if ch == '.':\n unoccupied.add((row, col))\n else:\n rows[row].remove(ch)\n cols[col].remove(ch)\n boxs[row // 3][col // 3].remove(ch)\n\n __recursiveSolver()", "def part1_2(puzzle_input):\n [initial_state_string, configurations] = puzzle_input.split('\\n\\n')\n initial_state = re.sub('initial state: ', '', initial_state_string)\n rules_arr = configurations.split('\\n')\n rules = [re.split(' => ', line) for line in rules_arr]\n rules = {t[0]: t[1] for t in rules}\n current_state = '..........' + initial_state + '...............................................................................................................................................'\n for i in range(100): # After 100th cycle, the only change is that there is a '#' that shifts right\n next_generation_string = \"\"\n for index, pot in enumerate(current_state):\n if index == 0:\n temp_string = '..' + current_state[:3]\n elif index == 1:\n temp_string = '.' + current_state[:4]\n elif index == len(current_state) - 2:\n temp_string = current_state[-4:] + '.'\n elif index == len(current_state) - 1:\n temp_string = current_state[-3:] + '..'\n else:\n temp_string = current_state[index-2:index+3]\n if temp_string in rules:\n next_generation_string += rules[temp_string]\n else:\n next_generation_string += pot\n current_state = next_generation_string\n\n # For part 1\n part1_sum = 0\n if i == 19:\n for index, pot in enumerate(current_state):\n if pot == '#':\n part1_sum += index - 10\n print(part1_sum)\n\n # Part 2\n part2_sum = 0\n for index, pot in enumerate(current_state):\n if pot == '#':\n part2_sum += index - 10 + 50000000000 - 100\n print(part2_sum)", "def solve(self, g: List[List[str]]) -> None:\n n = len(g)\n m = len(g[0])\n for i in range(n):\n for j in range(m):\n if g[i][j] == 'O':\n g[i][j] = ' '\n def dfs(x, y):\n g[x][y]='O'\n for nx, ny in (x+1,y),(x-1,y),(x,y+1),(x,y-1):\n if 0<=nx<n and 0<=ny<m and g[nx][ny]==' ':\n dfs(nx, ny)\n for i in range(n):\n if g[i][0]==' ':\n dfs(i,0)\n if g[i][m-1]==' ':\n dfs(i,m-1)\n for i in range(m):\n if g[0][i]==' ':\n dfs(0,i)\n if g[n-1][i]==' ':\n dfs(n-1,i)\n for i in range(n):\n for j in range(m):\n if g[i][j]==' ':\n g[i][j]='X'\n return g", "def solve_puzzle(self):\n\n # for each word in the words list\n # ...for each row in the game board\n # ......for each column in each row\n for word in self.words:\n for y, row in enumerate(self.board):\n for x, col in enumerate(row):\n \n # for each direction\n # try to find a word in said direction\n for dir in self.directions:\n self.scan_word(word, y, x, dir)", "def move(self, direction):\r\n direc = list(OFFSETS[direction])\r\n line = []\r\n dummy_board = self.board[:]\r\n if direction == 3:\r\n for i in range(self.height):\r\n self.board[i] = merge(self.board[i])\r\n self.compare(dummy_board)\r\n return self.board\r\n \r\n elif direction == 4:\r\n for i in range(self.height):\r\n line = self.board[i][::-1]\r\n self.board[i] = merge(line)\r\n self.board[i] = self.board[i][::-1]\r\n self.compare(dummy_board)\r\n return self.board\r\n \r\n \r\n elif direction == 1 or 2:\r\n dummy_board = str(self.board[:])\r\n if direction == 1:\r\n tile = [0,0]\r\n elif direction == 2:\r\n tile = [self.height - 1, 0]\r\n for i in range(self.width):\r\n tile2 = tile[:]\r\n while len(line) < self.height:\r\n line.append(self.get_tile(*tile2))\r\n tile2 = [x+y for x,y in zip(direc, tile2)]\r\n line = merge(line)\r\n tile2 = tile[:]\r\n for i in range(self.height):\r\n self.set_tile(*(tile2+[line[0]]))\r\n line.remove(line[0])\r\n tile2 = [x+y for x,y in zip(direc, tile2)]\r\n tile = [x+y for x,y in zip(tile, [0,1])]\r\n if dummy_board != self.__str__():\r\n self.new_tile()\r\n return self.board", "def solve(self, board):\n self.memo={}\n def changeVal(board, i, j,memo):\n key =str(i)+','+str(j)\n print(i,j)\n print(memo)\n if key in self.memo.keys():\n return self.memo[key]\n if ((i == 0 or j == 0 or i == len(board) - 1 or j == len(board[0]) - 1) and board[i][j] == 'O'):\n self.memo[key] = False\n return False\n if board[i][j] == 'X':\n self.memo[key] = True\n return True\n if board[i][j] == 'O':\n board[i][j] = 'X'\n ans1 = changeVal(board, i + 1, j,self.memo)\n ans2 = changeVal(board, i - 1, j,self.memo)\n ans3 = changeVal(board, i,j + 1,self.memo)\n ans4 = changeVal(board, i, j - 1,self.memo)\n\n ans = ans1 and ans2 and ans3 and ans4\n self.memo[key] = ans\n if ans == False:\n board[i][j] = 'O'\n return ans\n\n for i in range(0, len(board)):\n for j in range(0, len(board[0])):\n if board[i][j] == 'O':\n changeVal(board, i, j,self.memo)\n\n return board", "def make_move(self, state):\r\n # intially set drop phase to true\r\n drop_phase = True\r\n move = [] # list to make moves with to return\r\n succ = self.succ(state) # get the successor of this state\r\n # intial postion of board to set up most advantagous spot if its empty\r\n if sum(x.count(self.my_piece) for x in self.board) == 0 and self.board[2][2] == ' ':\r\n move.insert(0, (2, 2))\r\n return move\r\n \r\n # check the number of 'r' and 'b' on board if theres 4 of each drop phase is false\r\n if sum(x.count('r') for x in self.board) == 4 and sum(x.count('b') for x in self.board) == 4:\r\n drop_phase = False\r\n\r\n # if not during drop phase use minimax to make next move from one postion to next\r\n if not drop_phase:\r\n move = []\r\n d = self.Max_value(state, 0)\r\n val = d['val']\r\n m = d['move']\r\n p = d['pos']\r\n f = d['from']\r\n s = sorted(succ, key=lambda e: e['f'])\r\n moveto = s[-1]\r\n move.insert(1, (moveto['from'][0], moveto['from'][1]))\r\n move.insert(0, (moveto['pos'][0], moveto['pos'][1]))\r\n return move # return the from, to move\r\n\r\n else: #else use minimax and to make move during drop phase selecting spot to place AI piece\r\n d = self.Max_value(state, 0)\r\n val = d['val']\r\n m = d['move']\r\n p = d['pos']\r\n hold = []\r\n move = []\r\n n = None\r\n hold = []\r\n for s in succ:\r\n p = s['pos'][0]\r\n p1 = s['pos'][1]\r\n if s['f'] == val and state[p][p1] == ' ':\r\n hold.append(s)\r\n if len(hold) == 1:\r\n row = hold[0]['pos'][0]\r\n col = hold[0]['pos'][1]\r\n else:\r\n f = sorted(hold, key=lambda e: e['pos'])\r\n row = f[0]['pos'][0]\r\n col = f[0]['pos'][1]\r\n\r\n move.insert(0, (row, col)) # return the move \r\n return move", "def action(self):\r\n\r\n\r\n #have we just started?\r\n if self.player_information[\"us\"][\"nTokens\"] == 0:\r\n move = generate_starting_move(self.player_information[\"us\"][\"player_side\"], self.board_array)\r\n return move\r\n\r\n #otherwise do minimax \r\n \r\n #start off with some shallow depth:\r\n if self.turn_no < 5:\r\n depth = 3\r\n else:\r\n depth = 2\r\n \r\n #set a constraint for search depth\r\n if self.total_tokens_on_board < 6:\r\n depth = 3\r\n elif self.total_tokens_on_board < 10:\r\n depth = 2\r\n else:\r\n depth = 1\r\n \r\n #have a time reference\r\n print(f'nthrows: {self.player_information[\"us\"][\"nThrowsRemaining\"]}')\r\n starting_time = int(round(time.time(), 0))\r\n #salvage result from minimax\r\n result = minimax(self.board_dict.copy(), self.player_tokens.copy(), self.co_existance_dict.copy(),\r\n None, None, None, depth, True, -math.inf, math.inf,\r\n (-5, -5), self.player_information.copy(), self.board_array, self.board_edge, \r\n starting_time, True, self.turn_no)\r\n\r\n #clean it up a bit \r\n print(self.board_dict)\r\n #tidy it up\r\n result = result[0]\r\n print(f'pre: {result}')\r\n #in case we get a bad move redo but make it very shallow\r\n if len(result) == 1 or result == (-5, -5):\r\n #force it to return a usable move\r\n counter = 0\r\n while (len(result) == 1) or (result == (-5, -5)):\r\n result = minimax(self.board_dict.copy(), self.player_tokens.copy(), self.co_existance_dict.copy(),\r\n None, None, None, 1, True, -math.inf, math.inf,\r\n (-5, -5), self.player_information.copy(), self.board_array, self.board_edge, \r\n starting_time, False, self.turn_no)\r\n result = result[0]\r\n counter += 1\r\n \r\n #if its taking too long\r\n if counter > 2: \r\n #generate one random possible move to use \r\n allied_tokens = [token for token in self.player_tokens if self.player_tokens[token] == \"us\"]\r\n move_list = generate_moves(self.board_dict, self.player_tokens, self.co_existance_dict, allied_tokens,\r\n self.player_information, self.board_array, True, \"all\")\r\n \r\n \r\n #if there are no moves\r\n if len(move_list) == 0:\r\n if self.player_information['us']['nThrowsRemaining'] > 0:\r\n throws = generate_possible_throws(self.board_dict, self.player_tokens, self.co_existance_dict, self.player_information, \"us\",\r\n self.player_information[\"us\"][\"player_side\"], self.board_array, \"all\" )\r\n result = random.choice(throws)\r\n \r\n else:\r\n result = random.choice(move_list)\r\n print(f'random: {result}')\r\n break\r\n\r\n print(f' inside: {result}')\r\n\r\n print(result)\r\n #otherwise clean it up\r\n if result[0] == 'throw':\r\n final_result = (result[0].upper(), result[1], result[2])\r\n else:\r\n final_result = (result[0].upper(), result[2], result[3])\r\n # return final result \r\n return final_result", "def get_all_possible_moves():\r\n \"\"\"\r\n Creates the labels for the universal chess interface into an array and returns them\r\n \"\"\"\r\n labels_array = []\r\n letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']\r\n numbers = ['1', '2', '3', '4', '5', '6', '7', '8']\r\n promoted_to = ['q', 'r', 'b', 'n']\r\n\r\n for l1 in range(8):\r\n for n1 in range(8):\r\n destinations = [(t, n1) for t in range(8)] + \\\r\n [(l1, t) for t in range(8)] + \\\r\n [(l1 + t, n1 + t) for t in range(-7, 8)] + \\\r\n [(l1 + t, n1 - t) for t in range(-7, 8)] + \\\r\n [(l1 + a, n1 + b) for (a, b) in\r\n [(-2, -1), (-1, -2), (-2, 1), (1, -2), (2, -1), (-1, 2), (2, 1), (1, 2)]]\r\n for (l2, n2) in destinations:\r\n if (l1, n1) != (l2, n2) and l2 in range(8) and n2 in range(8):\r\n move = letters[l1] + numbers[n1] + letters[l2] + numbers[n2]\r\n labels_array.append(move)\r\n for l1 in range(8):\r\n l = letters[l1]\r\n for p in promoted_to:\r\n labels_array.append(l + '2' + l + '1' + p)\r\n labels_array.append(l + '7' + l + '8' + p)\r\n if l1 > 0:\r\n l_l = letters[l1 - 1]\r\n labels_array.append(l + '2' + l_l + '1' + p)\r\n labels_array.append(l + '7' + l_l + '8' + p)\r\n if l1 < 7:\r\n l_r = letters[l1 + 1]\r\n labels_array.append(l + '2' + l_r + '1' + p)\r\n labels_array.append(l + '7' + l_r + '8' + p)\r\n return labels_array", "def solve(self, board: List[List[str]]) -> None:\n if len(board) == 0: return []\n nr = len(board)\n nc = len(board[0])\n\n # begin dfs from boundaries with letter \"O\"\n for r in range(nr):\n for c in range(nc):\n if r == 0 or r == nr-1 or c == 0 or c == nc-1:\n if board[r][c] == \"O\":\n self.dfs(board, r, c)\n\n # change \"O\" to \"X\" and \"#\" to \"O\"\n for r in range(nr):\n for c in range(nc):\n if board[r][c] == \"O\":\n board[r][c] = \"X\"\n elif board[r][c] == \"#\":\n board[r][c] = \"O\"", "def solve(self):\n dim = self.puzzle.dimension\n\n # initial loop\n for value, (row, col) in self.puzzle:\n if value:\n self.clear_row(row, value)\n self.clear_col(col, value)\n self.clear_subgrid(row, col, value)\n self.updates.add((value, (row, col)))\n for ps in self.possibilities:\n ps.discard((row, col))\n\n while self.updates:\n while self.updates:\n # while self.updates:\n value, (row, col) = self.updates.pop()\n for i in range(1, dim + 1):\n self.check_row(i, value)\n self.check_col(i, value)\n for i in range(2, 8, 3):\n self.check_subgrid(row, i, value)\n self.check_subgrid(i, col, value)\n\n for value, (row, col) in self.puzzle:\n if not value:\n self.check_cell(row, col)\n\n # for value in range(1, dim + 1):\n # for row in [2, 5, 8]:\n # for col in [2, 5, 8]:\n # self.check_subgrid(row, col, value)", "def solve(self, board: List[List[str]]) -> None:\n if(len(board)==0) : return \n for i in range(0, len(board[0])): \n if(board[0][i]==\"O\"):\n self.DFS(board, 0, i)\n \n if(board[len(board)-1][i]==\"O\"):\n self.DFS(board,len(board)-1,i)\n \n for i in range(0, len(board)):\n if(board[i][0]==\"O\"):\n self.DFS(board, i, 0)\n \n if(board[i][len(board[0])-1]==\"O\"):\n self.DFS(board,i, len(board[0])-1)\n \n \n for i in range(0,len(board)):\n for j in range(0, len(board[0])):\n if(board[i][j]==\"#\"):\n board[i][j]=\"O\"\n \n else:\n board[i][j]=\"X\"", "def find_best_move(state: GameState) -> None:", "def solve_row0_tile(self, target_col):\n assert target_col > 1, \"target_col must be > 1\"\n assert self.row0_invariant(target_col), \"tiles to right and below incorrectly ordered\"\n \n # Move zero tile from target position (0, target_col) to (1, target_col - 1) \n self.update_puzzle(\"ld\")\n \n move_str = \"\"\n\n # correct_tile's position is determined after moving zero tile \"ld\"\n # because its position relative to zero tile may have changed as a result.\n correct_tile = self.current_position(0, target_col) \n \n # Moves to reposition correct_tile to target position, and\n # the zero tile to (1, target_col - 1).\n if self.get_number(correct_tile[0], correct_tile[1]) != self.get_number(0, target_col):\n move_str += str(self.position_tile(1, target_col - 1, correct_tile))\n move_str += str(\"urdlurrdluldrruld\")\n\n self.update_puzzle(move_str)\n\n assert self.row1_invariant(target_col - 1), \"tiles to right and below incorrectly ordered\"\n \n move_str = \"ld\" + move_str\n return move_str" ]
[ "0.86358064", "0.84391785", "0.820359", "0.81158245", "0.8079695", "0.7876614", "0.7855013", "0.77959996", "0.7629816", "0.75706357", "0.7446708", "0.71919894", "0.70876247", "0.6987711", "0.6876523", "0.686447", "0.686447", "0.686447", "0.6857152", "0.6857152", "0.6825898", "0.682045", "0.680644", "0.6798767", "0.67694706", "0.6757338", "0.67503834", "0.6743881", "0.6727839", "0.66785896", "0.66642034", "0.665565", "0.6631162", "0.66305166", "0.6597714", "0.6597469", "0.65839773", "0.6572454", "0.6566848", "0.65513307", "0.6532281", "0.65215236", "0.65060896", "0.64793384", "0.6475916", "0.64727217", "0.64694023", "0.6466018", "0.6461253", "0.6459988", "0.64553595", "0.64140576", "0.63782644", "0.63752735", "0.63594425", "0.6354533", "0.63507825", "0.6346083", "0.6341721", "0.6335916", "0.6334586", "0.6311966", "0.6311564", "0.6310484", "0.6304807", "0.63016754", "0.6295452", "0.62954044", "0.6293516", "0.62665886", "0.6245019", "0.6223726", "0.6215188", "0.6211001", "0.62100565", "0.6206542", "0.61962146", "0.6182959", "0.6178695", "0.6177864", "0.61600417", "0.61391354", "0.61234325", "0.61214095", "0.60961217", "0.6078878", "0.60724854", "0.60497886", "0.6030426", "0.60152173", "0.60047674", "0.6000283", "0.59923935", "0.5980893", "0.59638155", "0.59557575", "0.5953203", "0.5951496", "0.5951158", "0.5948482" ]
0.8153259
3
This function will return data about the number of triangles on each vertex in a file inputs
def get_triangles_per_vertex(my_core, native_ranges): t_p_v_data = [] tri_dimension = 2 for vertex in native_ranges[types.MBVERTEX]: t_p_v_data.append(my_core.get_adjacencies(vertex, tri_dimension).size()) return np.array(t_p_v_data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_triangles(self, file):\n self.nibble(80)\n return struct.unpack(\"@i\", self.nibble(4))[0]", "def get_num_vertices(triangles):\n return numpy.amax(numpy.reshape(triangles, -1)) + 1", "def count_cells(fpath):\n cells = []\n for i in range(40):\n fname = f\"{fpath}/Mesh2d_{i}.vtu\"\n if not os.path.exists(fname):\n print(f\"File {fname} does not exist.\")\n break\n mesh = meshio.read(fname)\n for cell_block in mesh.cells:\n if cell_block.type in (\"triangle\"):\n num_cells = len(cell_block)\n print(f\"{i:2d}: {num_cells:6d} elements, {len(mesh.points):6d} vertices\")\n cells.append(num_cells)\n continue\n return cells", "def test_triangle_count_05(self):\n body = {\"direction\": \"IN\", \"degree\": -1}\n code, res = Algorithm().post_triangle_count(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'edges_in': 13, 'vertices_in': 9, 'triangles': 2}\n else:\n assert 0", "def test_triangle_count_08(self):\n body = {\"direction\": \"IN\", \"degree\": 1}\n code, res = Algorithm().post_triangle_count(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'edges_in': 13, 'vertices_in': 9, 'triangles': 0}\n else:\n assert 0", "def count_aux(self, infile):\n n_aux = 0\n n_tokens = 0\n dgs_in = self._file_handler.file_to_dg_list(infile)\n for dg in dgs_in:\n n_tokens += len(dg)\n transform = VGtransformer(dg, dep_style=self._dep_style)\n transform.transform()\n n_aux += transform.tot_aux\n return n_aux, n_tokens, len(dgs_in)", "def test_triangle_count_06(self):\n body = {\"direction\": \"OUT\", \"degree\": -1}\n code, res = Algorithm().post_triangle_count(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'edges_out': 13, 'vertices_out': 7, 'triangles': 2}\n else:\n assert 0", "def test_triangle_count_04(self):\n body = {\"direction\": \"OUT\"}\n code, res = Algorithm().post_triangle_count(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'edges_out': 13, 'vertices_out': 7, 'triangles': 2}\n else:\n assert 0", "def num_data_lines(filepath):\n\tif not file_exists(filepath):\n\t\treturn -1\n\tcount = 0\n\twith open(filepath, 'r') as f:\n\t\twhile read_floats(f):\n\t\t\tcount += 1\n\tf.close()\n\treturn count", "def get_vertices_count(self) -> int:\n # TODO: verify the following claim:\n raise NotImplementedError", "def test_triangle_count_07(self):\n body = {\"direction\": \"OUT\", \"degree\": 1}\n code, res = Algorithm().post_triangle_count(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'edges_out': 13, 'vertices_out': 7, 'triangles': 0}\n else:\n assert 0", "def obtener_cantidad_vertices(self):\n return len(self.vertices.keys())", "def countSamples(filename):\n with open(filename, \"r\") as f:\n line = f.readline().split(\"\\t\")\n return len(line) - 2", "def num_vertices(self):\n return len(self)", "def ss_triangle_count(graph: ScipyGraph) -> int:\n props = ScipyGraph.Type.compute_abstract_properties(graph, {\"edge_type\"})\n if props[\"edge_type\"] == \"map\":\n # Drop weights before performing triangle count\n m = graph.value.copy()\n m.data = np.ones_like(m.data)\n elif props[\"edge_type\"] == \"set\":\n m = graph.value\n L = ss.tril(m, k=-1).tocsr()\n U = ss.triu(m, k=1).tocsc()\n return int((L @ U.T).multiply(L).sum())", "def findTriangles(p):\n triangleCount = 0\n for a in range(3, p//3 + 1):\n for b in range(a+1, p//2):\n c = p - (a+b)\n if (a**2 + b**2) == c**2:\n triangleCount += 1\n return triangleCount", "def num_vertices(self):\n return self.n * (1 + int(self.variant.is_bipartite()))", "def gather_counts(directory):\n counts_un = defaultdict(int)\n counts_bi = defaultdict(int)\n counts_tri = defaultdict(int)\n prev_prev = \"<s>\"\n prev = \"<s>\"\n for filename in os.listdir(f\"./{directory}\"):\n if \".DS_Store\" in filename:\n continue\n with open(f\"./{directory}/{filename}\", \"r\") as f:\n for line in f:\n line = line.strip()\n if len(line) == 0:\n continue\n counts_un[line+\"\\n\"] += 1\n counts_bi[prev+\"\\n\"+line+\"\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\"+line+\"\\n\"] += 1\n prev_prev = prev\n prev = line\n counts_un[\"</s>\\n\"] += 2\n counts_bi[\"</s>\\n</s>\\n\"] += 1\n counts_bi[prev+\"\\n\"+\"</s>\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\" + \"</s>\\n\"] += 1\n counts_tri[prev+\"\\n</s>\\n</s>\\n\"] += 1\n return counts_un, counts_bi, counts_tri", "def load_data():\n data = []\n with euler.Resource('triangle.txt') as datafile:\n for line in datafile.readlines():\n data.insert(0, map(int, line.strip().split()))\n return data", "def num_vertices(self):\n return len(self.vertices)", "def num_vertices(self):\n return len(self.vertices)", "def return_num_vertices(self):\n return self.__size", "def n_vertices(self):\n try: \n return self._n_vertices\n except AttributeError:\n self._n_vertices = 0\n for v in self.vertex_generator(): self._n_vertices += 1\n return self._n_vertices", "def get_edge_lengths(points: np.ndarray, triangles: np.ndarray) -> np.ndarray:\n edges, _ = get_edges(triangles)\n return np.linalg.norm(np.diff(points[edges], axis=1), axis=2).squeeze()", "def countDataSize(self,filename):\n \n try:\n d = h5py.File(filename,'r')\n except:\n print(filename)\n return \n\n N = 0\n scan_edges = d['level2/Statistics/scan_edges'][:]\n for (start,end) in scan_edges:\n N += (end-start)//self.offsetLen * self.offsetLen\n d.close()\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def _get_las_npoints(fpath):\n with laspy.file.File(fpath) as f:\n return f.header.count", "def std_triangles_count(graph):\n if nx.is_directed(graph):\n raise Exception(\"Graph is not undirected\")\n\n return sum(nx.triangles(graph).values()) // 3", "def total_number():\r\n total_number = 0\r\n file_read = read_file()\r\n for key in file_read:\r\n total_number = total_number + len(file_read[key])\r\n return total_number", "def vertexCount(self):\n return self._nVertices", "def numverts(self):\n return self._numvertstotal", "def getNumVertices(self):\n return len(self.V)", "def num_vertices(self, p):\n ret_val = self._num_vertices(p)\n return ret_val", "def countsubcatchments(inputfilename=FileSettings.settingsdict['inputfilename']):\r\n global count\r\n with open(inputfilename, 'r') as swmmput:\r\n contents = swmmput.readlines()\r\n count = len(contents)\r\n return(count)", "def get_dataset_length(file_path, had_header=True):\n with open(file_path, 'r') as f:\n length = 0\n for _ in f:\n length += 1\n length = length - had_header\n return length", "def get_triangle_count(remote, objectid):\n cmd1 = mmapi.StoredCommands()\n key1 = cmd1.AppendSceneCommand_GetTriangleCount(objectid)\n remote.runCommand(cmd1)\n result_val = mmapi.any_result()\n cmd1.GetSceneCommandResult_GetTriangleCount(key1, result_val)\n return result_val.i", "def get_array_size():\n tg_file = 'NA_CAS_gauges.txt'\n lines = open(tg_file).readlines()\n tg_nbr = len(lines)\n return tg_nbr", "def test_number_of_vertex_elements_in_MESH_chunk(self):\n for O in self.mod.objts.itervalues():\n for M in O.meshes.itervalues():\n self.assertEqual(M.vsize, len(M.vert))", "def num_vertices(self):\r\n return len(self.__graph_dict.keys())", "def countgenes():\n directory = openfile('db_directory.txt')\n no_genes_file = directory+'GENES_IN_HPO.txt'\n GENES_IN_HPO = openfile(no_genes_file)\n #GENES_IN_HPO = openfile(numbergenes_file)\n return int(GENES_IN_HPO)", "def num_vertices(self):\n return self._top_exp.number_of_vertices()", "def n_vertices(self):\n return len(self.minimized_generators())", "def read_off_size(path):\n try:\n with open(path, 'r') as file:\n lines = file.readlines()\n if lines[0] != 'OFF\\n':\n print(path, 'Error: is not an .off file')\n num_vertices, num_faces = tuple(lines[1].split()[:2])\n return int(num_vertices), int(num_faces)\n except IOError:\n print('Error: Failed reading file:', path)", "def count_lines(filename):\n with open(filename, 'r', encoding='utf-8') as file:\n lines_count = int()\n for line in file:\n lines_count += 1\n info_tuple = (filename, lines_count)\n return info_tuple", "def get_vertex_data(\n mesh: object,\n g: BinaryReader,\n v1: int,\n v2: int,\n v3: int,\n v4: int,\n n: int,\n verbose=False,\n):\n for i in range(v1):\n v_offset = g.tell()\n mesh.vertPosList.append(g.f(3))\n mesh.vertNormList.append(g.f(3))\n indice_offset = g.tell()\n if verbose:\n logger.debug({\n \"v1 v_offset\": v_offset,\n \"v1 indice_offset\": indice_offset,\n })\n mesh.skinIndiceList.append(g.B(4))\n mesh.skinWeightList.append([0, 0, 0, 1])\n\n for i in range(v2):\n v_offset = g.tell()\n mesh.vertPosList.append(g.f(3))\n mesh.vertNormList.append(g.f(3))\n indice_offset = g.tell()\n if verbose:\n logger.debug({\n \"v2 v_offset\": v_offset,\n \"v2 indice_offset\": indice_offset,\n })\n mesh.skinIndiceList.append(g.B(4))\n w1 = g.f(1)[0]\n w2 = 1.0 - w1\n mesh.skinWeightList.append([0, 0, w2, w1])\n\n for i in range(v3):\n v_offset = g.tell()\n mesh.vertPosList.append(g.f(3))\n mesh.vertNormList.append(g.f(3))\n indice_offset = g.tell()\n if verbose:\n logger.debug({\n \"v3 v_offset\": v_offset,\n \"v3 indice_offset\": indice_offset,\n })\n mesh.skinIndiceList.append(g.B(4))\n w1 = g.f(1)[0]\n w2 = g.f(1)[0]\n w3 = 1.0 - w1 - w2\n mesh.skinWeightList.append([0, w3, w2, w1])\n\n for i in range(v4):\n v_offset = g.tell()\n mesh.vertPosList.append(g.f(3))\n mesh.vertNormList.append(g.f(3))\n indice_offset = g.tell()\n if verbose:\n logger.debug({\n \"v4 v_offset\": v_offset,\n \"v4 indice_offset\": indice_offset,\n })\n mesh.skinIndiceList.append(g.B(4))\n w1 = g.f(1)[0]\n w2 = g.f(1)[0]\n w3 = g.f(1)[0]\n w4 = 1.0 - w1 - w2 - w3\n mesh.skinWeightList.append([w4, w3, w2, w1])", "def vertex_count(self) -> int:\n return len(self._vertex_map)", "def countDataSize(self,filename):\n \n d = h5py.File(filename,'r')\n features = d['spectrometer/features'][:]\n select = self.selectData(features.astype(float), self.ifeature, d)\n N = len(features[select])\n d.close()\n\n N = (N//self.offsetLen) * self.offsetLen\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def read_triangle(f, layers=1):\n # Read nodes\n with open(f + '.node') as h:\n num_nodes = int(h.readline().split(' ')[0])\n node_values = np.zeros((num_nodes, 2), dtype=np.float64)\n for line in h:\n if line[0] == '#':\n continue\n node, x, y = line.split()[:3]\n node_values[int(node) - 1, :] = [float(x), float(y)]\n\n nodes = op2.Set(num_nodes, \"nodes\")\n coords = op2.Dat(nodes ** 2, node_values, name=\"coords\")\n\n # Read elements\n with open(f + '.ele') as h:\n num_tri, nodes_per_tri, num_attrs = [int(x) for x in h.readline().split()]\n map_values = np.zeros((num_tri, nodes_per_tri), dtype=np.int32)\n for line in h:\n if line[0] == '#':\n continue\n vals = [int(x) - 1 for x in line.split()]\n map_values[vals[0], :] = vals[1:nodes_per_tri + 1]\n\n elements = op2.Set(num_tri, \"elements\", layers=layers)\n elem_node = op2.Map(elements, nodes, nodes_per_tri, map_values, \"elem_node\")\n\n return nodes, coords, elements, elem_node", "def bpCount(file):\n amount_bp = len(file)\n return amount_bp", "def size(self):\n num_vert = 0\n num_edg = 0\n for vertex in self.vertices():\n num_vert += 1\n num_edg += len(self.neighbors(vertex))\n return (num_vert, num_edg)", "def part2(fname: dict) -> int:\n return sum(len(set.intersection(*[set(pax) for pax in group])) for group in get_data(fname))", "def number_of_trips(filename): \r\n \r\n with open(filename, 'r') as f_in:\r\n # set up csv reader object\r\n trip_reader = csv.DictReader(f_in)\r\n \r\n # initialize count variables\r\n n_subscribers = 0\r\n n_customers = 0\r\n \r\n # tally up ride types\r\n for row in trip_reader:\r\n if row['user_type'] == 'Subscriber':\r\n n_subscribers += 1\r\n else:\r\n n_customers += 1\r\n \r\n # compute total number of rides\r\n n_total = n_subscribers + n_customers\r\n \r\n # return tallies as a tuple\r\n return(n_subscribers, n_customers, n_total)", "def file_len(fname):\n with open(fname) as f:\n for i, l in enumerate(f):\n pass\n Nrows = i + 1\n return Nrows", "def n_n(output_path):\n lef = {}\n rig = {}\n rellef = {}\n relrig = {}\n\n triple = open(Path(output_path, \"train2id.txt\"), \"r\")\n valid = open(Path(output_path, \"valid2id.txt\"), \"r\")\n test = open(Path(output_path, \"test2id.txt\"), \"r\")\n\n ls = triple.readlines()\n tot = len(ls) - 1\n\n # (int)(triple.readline())\n for i in range(tot):\n content = ls[i + 1]\n # content = triple.readline()\n h, t, r = content.strip().split()\n if not (h, r) in lef:\n lef[(h, r)] = []\n if not (r, t) in rig:\n rig[(r, t)] = []\n lef[(h, r)].append(t)\n rig[(r, t)].append(h)\n if not r in rellef:\n rellef[r] = {}\n if not r in relrig:\n relrig[r] = {}\n rellef[r][h] = 1\n relrig[r][t] = 1\n\n ls = valid.readlines()\n tot = len(ls) - 1\n # (int)(valid.readline())\n for i in range(tot):\n content = ls[i + 1]\n # content = valid.readline()\n h, t, r = content.strip().split()\n if not (h, r) in lef:\n lef[(h, r)] = []\n if not (r, t) in rig:\n rig[(r, t)] = []\n lef[(h, r)].append(t)\n rig[(r, t)].append(h)\n if not r in rellef:\n rellef[r] = {}\n if not r in relrig:\n relrig[r] = {}\n rellef[r][h] = 1\n relrig[r][t] = 1\n\n ls = test.readlines()\n tot = len(ls) - 1\n # (int)(test.readline())\n for i in range(tot):\n content = ls[i + 1]\n # content = test.readline()\n h, t, r = content.strip().split()\n if not (h, r) in lef:\n lef[(h, r)] = []\n if not (r, t) in rig:\n rig[(r, t)] = []\n lef[(h, r)].append(t)\n rig[(r, t)].append(h)\n if not r in rellef:\n rellef[r] = {}\n if not r in relrig:\n relrig[r] = {}\n rellef[r][h] = 1\n relrig[r][t] = 1\n\n test.close()\n valid.close()\n triple.close()\n\n f = open(Path(output_path, \"type_constrain.txt\"), \"w\")\n f.write(\"%d\\n\" % (len(rellef)))\n for i in rellef:\n f.write(\"%s\\t%d\" % (i, len(rellef[i])))\n for j in rellef[i]:\n f.write(\"\\t%s\" % (j))\n f.write(\"\\n\")\n f.write(\"%s\\t%d\" % (i, len(relrig[i])))\n for j in relrig[i]:\n f.write(\"\\t%s\" % (j))\n f.write(\"\\n\")\n f.close()\n\n rellef = {}\n totlef = {}\n relrig = {}\n totrig = {}\n\n for i in lef:\n if not i[1] in rellef:\n rellef[i[1]] = 0\n totlef[i[1]] = 0\n rellef[i[1]] += len(lef[i])\n totlef[i[1]] += 1.0\n\n for i in rig:\n if not i[0] in relrig:\n relrig[i[0]] = 0\n totrig[i[0]] = 0\n relrig[i[0]] += len(rig[i])\n totrig[i[0]] += 1.0\n\n s11 = 0\n s1n = 0\n sn1 = 0\n snn = 0\n f = open(Path(output_path, \"test2id.txt\"), \"r\")\n ls = f.readlines()\n tot = len(ls) - 1\n # tot = (int)(f.readline())\n for i in range(tot):\n content = ls[i + 1]\n # content = f.readline()\n h, t, r = content.strip().split()\n rign = rellef[r] / totlef[r]\n lefn = relrig[r] / totrig[r]\n if rign <= 1.5 and lefn <= 1.5:\n s11 += 1\n if rign > 1.5 and lefn <= 1.5:\n s1n += 1\n if rign <= 1.5 and lefn > 1.5:\n sn1 += 1\n if rign > 1.5 and lefn > 1.5:\n snn += 1\n f.close()\n\n f = open(Path(output_path, \"test2id.txt\"), \"r\")\n f11 = open(Path(output_path, \"1-1.txt\"), \"w\")\n f1n = open(Path(output_path, \"1-n.txt\"), \"w\")\n fn1 = open(Path(output_path, \"n-1.txt\"), \"w\")\n fnn = open(Path(output_path, \"n-n.txt\"), \"w\")\n fall = open(Path(output_path,\"test2id_all.txt\"), \"w\")\n fall = open(Path(output_path,\"test2id_all.txt\"), \"w\")\n\n ls = f.readlines()\n tot = len(ls) - 1\n\n # tot = (int)(f.readline())\n fall.write(\"%d\\n\" % (tot))\n f11.write(\"%d\\n\" % (s11))\n f1n.write(\"%d\\n\" % (s1n))\n fn1.write(\"%d\\n\" % (sn1))\n fnn.write(\"%d\\n\" % (snn))\n for i in range(tot):\n content = ls[i + 1]\n # content = f.readline()\n h, t, r = content.strip().split()\n rign = rellef[r] / totlef[r]\n lefn = relrig[r] / totrig[r]\n if rign <= 1.5 and lefn <= 1.5:\n f11.write(content)\n fall.write(\"0\" + \"\\t\" + content)\n if rign > 1.5 and lefn <= 1.5:\n f1n.write(content)\n fall.write(\"1\" + \"\\t\" + content)\n if rign <= 1.5 and lefn > 1.5:\n fn1.write(content)\n fall.write(\"2\" + \"\\t\" + content)\n if rign > 1.5 and lefn > 1.5:\n fnn.write(content)\n fall.write(\"3\" + \"\\t\" + content)\n fall.close()\n f.close()\n f11.close()\n f1n.close()\n fn1.close()\n fnn.close()", "def count_positions(fname):\r\n with open(fname) as f:\r\n for i, l in enumerate(f):\r\n pass\r\n return i + 1", "def data_info(data):\n filename = data[\"filename\"]\n X_var = data[\"X_var\"]\n Y_var = data[\"Y_var\"]\n X,Y = read_file(filename,X_var,Y_var)\n input_dim = len(X_var)\n output_dim = len(Y_var)\n return X,Y,input_dim,output_dim", "def read_input(filename):\n with open(filename, 'r') as f:\n (N) = map(int, next(f).split())\n def parse_line(line):\n l = line.split()\n h = 0 if l[0] == 'H' else 1\n n = int(l[1])\n return [h, l[2:]]\n\n photos = transform_strings([parse_line(line) for line in f])\n return (len(photos), photos)", "def getNumVertexes(self):\n return _osgAnimation.RigTransformHardware_getNumVertexes(self)", "def __len__(self):\n return len(self._vertices)", "def getFileCount(self) -> int:\n ...", "def parse_triangle_files(self):\n nodes = {}\n boundary_nodes = []\n\n # parse node file into nodes\n with open(self.files['node']) as node_file:\n header = True\n for line in node_file:\n if header:\n header = False\n continue\n content = list(filter(lambda a: bool(a), line.split(' '))) # pylint: disable=W0108\n if not '#' in content[0]:\n is_boundary = content[3] == '1\\n'\n nodes[int(content[0])] = {\n 'id': int(content[0]),\n 'coords': [int(content[1]), int(content[2])],\n 'distance': 0 if is_boundary else None,\n 'relations': [],\n 'level_cycles': [], # ids of any level cycles this node is a part of\n 'level_paths': [], # ids of any level paths this node is a part of\n 'is_root_element': False,\n 'betweener_paths': []\n }\n if is_boundary:\n boundary_nodes.append(int(content[0]))\n node_file.close()\n\n # parse edge files into node relations\n with open(self.files['edge']) as edge_file:\n header = True\n for line in edge_file:\n if header:\n header = False\n continue\n content = list(filter(bool, line.split(' ')))\n if not '#' in content[0]:\n nodes[int(content[1])]['relations'].append(int(content[2]))\n nodes[int(content[2])]['relations'].append(int(content[1]))\n edge_file.close()\n\n # with open(self.files['ele']) as ele_file:\n # header = True\n # for line in edge_file:\n # if header:\n # header = False\n # continue\n # content = list(filter(bool, line.split(' ')))\n # if not '#' in content[0]:\n # nodes[int(content[1])]['relations'].append(int(content[2]))\n # nodes[int(content[2])]['relations'].append(int(content[1]))\n # edge_file.close()\n\n # sorts relations clockwise\n for node_id, node in nodes.items():\n nodes[node_id]['relations'] = sorted(node['relations'], key=(\n lambda related_node_id: (\n self.calculate_clockwise_angle_and_distance(node, nodes.get(related_node_id)) # pylint: disable=W0640\n )\n ))\n\n levels = self.get_levels(nodes, boundary_nodes)\n\n for level in levels:\n for node_id in level['node_ids']:\n self.identify_special_nodes(nodes, node_id)\n\n return nodes, boundary_nodes, levels", "def line_count(file):\n with open(file, \"r\") as f:\n return sum(1 for line in f)", "def get_num_examples(path_in):\n i = 0\n with open(path_in, 'r', encoding='utf8') as f:\n for _ in f:\n i += 1\n return i", "def read_test_tuples():\n lines = read_input(25, True)\n point_sets = list(parse_points(lines))\n expected_counts = [4, 3, 8]\n\n return zip(point_sets, expected_counts)", "def fileCount(self):\n pass", "def loadVTKPolydataFile(infile):\n\n lines = None\n\n with open(infile, 'rt') as f:\n lines = f.readlines()\n\n lines = [l.strip() for l in lines]\n\n if lines[3] != 'DATASET POLYDATA':\n raise ValueError('Only the POLYDATA data type is supported')\n\n nVertices = int(lines[4].split()[1])\n nPolygons = int(lines[5 + nVertices].split()[1])\n nIndices = int(lines[5 + nVertices].split()[2]) - nPolygons\n\n vertices = np.zeros((nVertices, 3), dtype=np.float32)\n polygonLengths = np.zeros( nPolygons, dtype=np.uint32)\n indices = np.zeros( nIndices, dtype=np.uint32)\n\n for i in range(nVertices):\n vertLine = lines[i + 5]\n vertices[i, :] = [float(w) for w in vertLine.split()]\n\n indexOffset = 0\n for i in range(nPolygons):\n\n polyLine = lines[6 + nVertices + i].split()\n polygonLengths[i] = int(polyLine[0])\n\n start = indexOffset\n end = indexOffset + polygonLengths[i]\n indices[start:end] = [int(w) for w in polyLine[1:]]\n\n indexOffset += polygonLengths[i]\n\n return vertices, polygonLengths, indices", "def readGR3File(inputFilename):\n print 'Reading ' + inputFilename + ' ...'\n infile = open(inputFilename, 'r')\n description = infile.readline().strip() # remove leading/trailing whitespace\n tmpStr = infile.readline()\n nTriangles, nNodes = (int(s) for s in tmpStr.split())\n print ' nTriangles={0:d} nNodes={1:d}'.format(nTriangles, nNodes)\n\n # nodes\n nodeArray = readNodeBlock(infile, nNodes)\n nodenum = np.array(nodeArray[:, 0].flatten(), dtype=int)\n nodexyz = np.zeros((nNodes, 3))\n nodexyz[:, :2] = nodeArray[:, 1:3]\n nodalValues = nodeArray[:, 3]\n\n print ' Nodal values min={0:g} max={1:g}'.format(min(nodalValues), max(nodalValues))\n\n # triangular elements\n triArray = readElemBlock(infile, nTriangles)\n\n trinum = triArray[:, 0].flatten()\n tritype = triArray[0, 1]\n trinodes = triArray[:, -3:] - 1 # three last columns, 0-based indexing\n #triangles = meshElements(trinodes,trinum,tritype)\n\n x = nodexyz[:, 0]\n y = nodexyz[:, 1]\n\n tmpStr = infile.readline()\n boundaries = []\n if len(tmpStr) > 0:\n # boundary information, if not end of file\n nOpenBndSegments = int(tmpStr.split()[0])\n nOpenBndNodesTot = int(infile.readline().split()[0])\n print ' nOpenBndSegments={0:d} nOpenBndNodesTot={1:d}'.format(nOpenBndSegments, nOpenBndNodesTot)\n for iBnd in range(nOpenBndSegments):\n bndHeader = infile.readline().split()\n nBndNodes = int(bndHeader[0])\n tag = bndHeader[-1]\n if tag.isdigit():\n tag = 'open' + tag\n print ' open bnd {0:d} {1:s}: {2:d} nodes'.format(iBnd + 1, tag, nBndNodes)\n tmpList = []\n for iN in range(nBndNodes):\n tmpList.append(int(infile.readline()))\n nodes = np.array(tmpList, dtype=int) - 1\n boundaries.append(meshContainer.meshBoundary('open', tag, nodes))\n nLandBndSegments = int(infile.readline().split()[0])\n nLandBndNodesTot = int(infile.readline().split()[0])\n landBndTags = range(\n nOpenBndSegments + 1,\n nOpenBndSegments + nLandBndSegments + 1)\n print ' nLandBndSegments={0:d} nLandBndNodesTot={1:d}'.format(nLandBndSegments, nLandBndNodesTot)\n for iBnd in range(nLandBndSegments):\n bndHeader = infile.readline().split()\n nBndNodes = int(bndHeader[0])\n try:\n landType = int(bndHeader[1])\n except:\n print \"\"\"Land boundary type missing in gr3 file. Add 0/1 (land/island) after number of nodes in each land boudary, e.g.\n 1002 = Total number of closed boundary nodes\n 501 0 = Number of nodes in closed boundary 1\"\"\"\n raise Exception(\n 'Could not parse land boundary type (0/1 - land/island)\\n')\n landType = 'island' if landType == 1 else 'land'\n tag = landType + bndHeader[-1]\n print ' land bnd {0:d} {1:s}: {2:d} nodes'.format(iBnd + 1, tag, nBndNodes)\n tmpList = []\n for iN in range(nBndNodes):\n tmpList.append(int(infile.readline()))\n #tmpList = fromfile(infile,dtype=int,count=nBndNodes,sep=' ')\n nodes = np.array(tmpList, dtype=int) - 1\n boundaries.append(meshContainer.meshBoundary(landType, tag, nodes))\n\n infile.close()\n\n # for better interpolation, round coordinates to 1e-4\n nDig = 4\n x = np.round(x, nDig)\n y = np.round(y, nDig)\n\n return x, y, nodalValues, trinodes, boundaries, description", "def count_data_items(fileids, train=True):\n sizes = 28000 if train else 22500\n return len(fileids) * sizes", "def linesCountingAux(file_name, nProcesses):\r\n\r\n linesPerProcessesList = []\r\n\r\n with open(file_name, \"r\") as file:\r\n lineCounting = 0\r\n\r\n for line in file:\r\n lineCounting += 1 #discover the lines in the text file\r\n\r\n linesPerProcesses = lineCounting // nProcesses\r\n\r\n for number in range(nProcesses):\r\n linesPerProcessesList.append(linesPerProcesses)\r\n if sum(linesPerProcessesList) < lineCounting:\r\n for number in range (lineCounting - sum(linesPerProcessesList)):\r\n linesPerProcessesList[number] += 1\r\n\r\n return linesPerProcessesList", "def data_edge_count(self) -> int:\n return int(self.graph_tuple_stats.data_edge_count or 0)", "def create_1d_coil_geometry(division, filename, directory):\n os.chdir(directory)\n npoints = division + 1\n length_array = np.zeros((npoints, 2))\n current_length = 0\n array = np.loadtxt(filename)\n for i in range(1, npoints):\n current_length += ((array[i, 1] - array[i - 1, 1]) ** 2 + (array[i, 2] - array[i - 1, 2]) ** 2 +\n (array[i, 3] - array[i - 1, 3]) ** 2) ** 0.5\n length_array[i - 1, 0] = i\n length_array[i, 1] = current_length\n length_array[npoints - 1, 0] = npoints\n return length_array", "def get_number_lines(running_reward_file, running_loss_file, action_count_file):\n if Path(running_reward_file).exists():\n data = np.loadtxt(running_reward_file).reshape(-1,2)\n return data.shape[0]\n if Path(running_loss_file).exists():\n data = np.loadtxt(running_loss_file).reshape(-1,2)\n return data.shape[0]\n if Path(action_count_file).exists():\n data = np.loadtxt(action_count_file).reshape(-1,2)\n return data.shape[0]\n raise NameError(\"No files to count lines\")", "def calc_side_lengths(triangles):\n first_vec = [2, 0, 1]\n second_vec = [1, 2, 0]\n sides = triangles[:, first_vec] - triangles[:, second_vec]\n lengths = np.sqrt(np.sum(sides**2, axis=2))\n return lengths", "def countLength():\n counter = 0\n\n with open('bc.processed3.csv', 'r') as openfile:\n for line in openfile:\n counter += 1\n if counter == 1:\n print line\n\n print('Length: ', counter)", "def pre_process_file(filename):\n\n num_lines = 0\n vm_ids = set()\n with open(filename) as trace:\n for item in csv.reader(trace, delimiter=','):\n num_lines += 1\n disk_id = int(item[2])\n vm_ids.add(disk_id) # can make it more efficient\n no_of_vms = len(vm_ids)\n return (num_lines, no_of_vms, vm_ids)", "def load_triangles(triangle_bytes, header):\n triangles = list()\n for i in range(header.num_tris):\n triangle = triangle_t(list(struct.unpack(\"<hhh\", triangle_bytes[12*i:12*i+6])), list(struct.unpack(\"<hhh\", triangle_bytes[12*i+6:12*i+12])))\n # print(triangle)\n triangles.append(triangle)\n return triangles", "def xFileInfo(filename):\n delim = getDelimiter(filename)\n f = open(filename, 'r')\n reader = csv.reader(f, delimiter=delim)\n num_rows = 0\n for (row_i, row) in enumerate(reader):\n if row_i == 0: #ignore empty strings (e.g. at end of row)\n num_cols = len([val for val in row if val])\n num_rows += 1\n f.close()\n return (num_rows, num_cols)", "def load_counts(filename, lengths=None, base=None):\n n = None\n if lengths is not None:\n n = lengths.sum()\n shape = (n, n)\n else:\n shape = None\n # This is the interaction count files\n dataframe = pd.read_csv(filename, sep=\"\\t\", comment=\"#\", header=None)\n row, col, data = dataframe.as_matrix().T\n\n # If there are NAs remove them\n mask = np.isnan(data)\n if np.any(mask):\n warnings.warn(\n \"NAs detected in %s. \"\n \"Removing NAs and replacing with 0.\" % filename)\n row = row[np.invert(mask)]\n col = col[np.invert(mask)]\n data = data[np.invert(mask)]\n\n # XXX We need to deal with the fact that we should not duplicate entries\n # for the diagonal.\n # XXX what if n doesn't exist?\n if base is not None:\n if base not in [0, 1]:\n raise ValueError(\"indices should start either at 0 or 1\")\n col -= base\n row -= base\n else:\n warnings.warn(\n \"Attempting to guess whether counts are 0 or 1 based\")\n\n if (col.min() >= 1 and row.min() >= 1) and \\\n ((n is None) or (col.max() == n)):\n # This is a hack to deal with the fact that sometimes, the files\n # are indexed at 1 and not 0\n\n col -= 1\n row -= 1\n\n if shape is None:\n n = max(col.max(), row.max()) + 1\n shape = (n, n)\n\n data = data.astype(float)\n counts = sparse.coo_matrix((data, (row, col)), shape=shape)\n return counts", "def get_filesize(inputfile) -> int:\n with open(inputfile, \"rb\") as f:\n lines = 0\n buf_size = 1024 * 1024\n read_f = f.raw.read\n\n buf = read_f(buf_size)\n while buf:\n lines += buf.count(b\"\\n\")\n buf = read_f(buf_size)\n\n return lines", "def get_dimensions ( file_in, separator ) :\n try :\n logger.info ( \"Extract dimensions from xyz file \" + str(file_in) ) \n d = {}\n first_row = True\n d[NOPS] = 0\n file = open(file_in, 'r')\n for line in file :\n d[NOPS] = d[NOPS] + 1\n l = line.rstrip().split(separator)\n x = float(l[0])\n y = float(l[1])\n z = float(l[2])\n if first_row :\n d[MINX] = x\n d[MAXX] = x\n d[MINY] = y\n d[MAXY] = y\n d[MINZ] = z\n d[MAXZ] = z\n first_row = False\n else :\n if x < d[MINX] :\n d[MINX] = x\n if x > d[MAXX] :\n d[MAXX] = x \n if y < d[MINY] :\n d[MINY] = y\n if y > d[MAXY] :\n d[MAXY] = y \n if z < d[MINZ] :\n d[MINZ] = z\n if z > d[MAXZ] :\n d[MAXZ] = z \n file.close() \n logger.info ('Now return')\n return d\n except Exception, err:\n logger.critical(\"Extract dimensions from xyz file failed: ERROR: %s\\n\" % str(err))\n raise", "def count_cop(self, infile):\n n_cop = 0\n dgs_in = self._file_handler.file_to_dg_list(infile)\n for dg in dgs_in:\n if dg.has_cop_deprel():\n n_cop += 1\n return n_cop, len(dgs_in)", "def listRows(file):\n\twith open(file) as f:\n\t\tcount = 0\n\t\tfor line in f.readlines():\n\t\t\tcount += 1\n\t\treturn count -1", "def getVertexNumbers(self):\n return self.vertexIndex.keys()", "def reader(filename,only_length=False):\n print(\"Counting lines in file %s\"%filename)\n total_lines=0\n for n,line in enumerate(open(filename,\"r\")):\n total_lines+=1\n \n if only_length:\n return total_lines\n \n X,Y,Z,W,J=[np.zeros(total_lines) for _ in range(5)]\n \n for n, line in enumerate(open(filename, 'r')):\n if n%1000000==0:\n print(\"Reading line %d of %d from file %s\" %(n,total_lines,filename))\n split_line=np.array(line.split(\" \"), dtype=float) \n X[n]=split_line[0];\n Y[n]=split_line[1];\n Z[n]=split_line[2];\n W[n]=split_line[3];\n J[n]=int(split_line[4]);\n return X,Y,Z,W,J", "def pfd_read (r) :\n s = r.readline()\n if s == \"\" :\n return False\n l = s.split()\n numVert = int(l[0])\n numRule = int(l[1])\n assert numVert > 0 and numVert <= 100\n assert numRule > 0 and numRule <= 100\n v = [[0,[]]] #build the Vertex array\n for i in range(1, numVert):\n temp = [0,[]]\n v.append(temp)\n s = r.readline()\n for i in range(0, numRule):\n if s == \"\":\n return False\n l = s.split()\n v[int(l[0])-1][0] = int(l[1]) #verts[l[0]].numPred = l[1]\n #verts[l[0]].preds = [0] * (len(l)-2) #I don't know whether this line is necessary\n lenl = len(l)\n for j in range(2,lenl):\n v[int(l[j])-1][1].append(int(l[0]))\n #verts[l[j]-1].succ.append(l[0]) \n s = r.readline()\n return v", "def part1(filename: str) -> int:\n data = first_line(filename)\n data = json.loads(data)\n return total_nums(data)", "def get_num_vertices(self):\n\n return self._graph_state.get_num_vertices()", "def get_dataset_size(file_path):\n size = 1\n file_list = tf.io.gfile.glob(file_path)\n for file in file_list:\n for record in tf.compat.v1.io.tf_record_iterator(file, options=tf.io.TFRecordOptions(\n compression_type='GZIP')):\n size += 1\n return size", "def from_polyfile(name):\n\n from anuga.utilities.numerical_tools import anglediff\n from math import pi\n import os.path\n root, ext = os.path.splitext(name)\n\n if ext == 'poly':\n filename = name\n else:\n filename = name + '.poly'\n\n\n fid = open(filename)\n\n points = [] #x, y\n values = [] #z\n ##vertex_values = [] #Repeated z\n triangles = [] #v0, v1, v2\n\n lines = fid.readlines()\n\n keyword = lines[0].strip()\n msg = 'First line in .poly file must contain the keyword: POINTS'\n assert keyword == 'POINTS', msg\n\n offending = 0\n i = 1\n while keyword == 'POINTS':\n line = lines[i].strip()\n i += 1\n\n if line == 'POLYS':\n keyword = line\n break\n\n fields = line.split(':')\n assert int(fields[0]) == i-1, 'Point indices not consecutive'\n\n #Split the three floats\n xyz = fields[1].split()\n\n x = float(xyz[0])\n y = float(xyz[1])\n z = float(xyz[2])\n\n points.append([x, y])\n values.append(z)\n\n\n k = i\n while keyword == 'POLYS':\n line = lines[i].strip()\n i += 1\n\n if line == 'END':\n keyword = line\n break\n\n\n fields = line.split(':')\n assert int(fields[0]) == i-k, 'Poly indices not consecutive'\n\n #Split the three indices\n vvv = fields[1].split()\n\n i0 = int(vvv[0])-1\n i1 = int(vvv[1])-1\n i2 = int(vvv[2])-1\n\n #Check for and exclude degenerate areas\n x0 = points[i0][0]\n y0 = points[i0][1]\n x1 = points[i1][0]\n y1 = points[i1][1]\n x2 = points[i2][0]\n y2 = points[i2][1]\n\n area = abs((x1*y0-x0*y1)+(x2*y1-x1*y2)+(x0*y2-x2*y0))/2\n if area > 0:\n\n #Ensure that points are arranged in counter clock-wise order\n v0 = [x1-x0, y1-y0]\n v1 = [x2-x1, y2-y1]\n v2 = [x0-x2, y0-y2]\n\n a0 = anglediff(v1, v0)\n a1 = anglediff(v2, v1)\n a2 = anglediff(v0, v2)\n\n\n if a0 < pi and a1 < pi and a2 < pi:\n #all is well\n j0 = i0\n j1 = i1\n j2 = i2\n else:\n #Swap two vertices\n j0 = i1\n j1 = i0\n j2 = i2\n\n triangles.append([j0, j1, j2])\n ##vertex_values.append([values[j0], values[j1], values[j2]])\n else:\n offending +=1\n\n log.critical('Removed %d offending triangles out of %d'\n % (offending, len(lines)))\n return points, triangles, values", "def __len__(self) -> int:\n import h5py\n\n with h5py.File(\n os.path.join(self.root, self.data_dir, self.img_file_name), \"r\"\n ) as f:\n num_datapoints: int = f[self.split][\"pv_log\"].shape[0]\n\n return num_datapoints", "def get_triangles( self, N ):\n\n # store N as an instance variable\n self.N = N\n\n # initialize array to store locations of points for all triangles in the\n # tessellation sequence\n self.triangles = np.zeros( ( self.N, 3, 2 ) )\n\n # define points of the first triangle in the tessellation sequence\n point_c = np.array( [ 0, 0 ] )\n point_b = self.a * np.array( [ np.cos( self.C ), np.sin( self.C ) ] )\n point_a = np.array( [ self.b, 0 ] )\n\n # stack the points into a single array of shape (3, 2 )\n triangle = np.vstack( [ point_c, point_b, point_a ] )\n\n # loop over the number of triangles in the sequence\n for i in range( self.N ):\n\n # store the points of the i-th triangle in the array\n self.triangles[ i ] = triangle\n\n # compute the next triangle in the tessellation sequence\n triangle = self.next_triangle( triangle = triangle )\n\n # shift the next triangle in the tessellation sequence such that its\n # point C is in the same location as point B of the previous triangle\n triangle += ( self.triangles[ i - 1, 1 ] - self.triangles[ 0, 0 ] )", "def so_data_statistics(data_file):\n with open(data_file, \"r\") as f:\n data = json.load(f)\n\n answer_to_num_questions = collections.Counter()\n comment_to_num_questions = collections.Counter()\n num_comments = 0\n num_answers = 0\n num_questions = len(data)\n\n for q in data:\n q = json.loads(q)\n q_comments = 0\n q_comments += len(q[\"comments\"])\n q_answers = len(q[\"answers\"])\n for a in q[\"answers\"]:\n q_comments += len(a[\"comments\"])\n\n answer_to_num_questions[q_answers] += 1\n comment_to_num_questions[q_comments] += 1\n\n num_comments += q_comments\n num_answers += q_answers\n\n print \"Num comments: {0}, Num answers: {1}, Num_questions: {2}\".format(\n num_comments, num_answers, num_questions)\n print \"-\" * 10\n print \"Answers map: \", answer_to_num_questions\n print \"Comments map: \", comment_to_num_questions\n\n return num_comments, num_answers, num_questions, answer_to_num_questions, \\\n comment_to_num_questions", "def numberFiles(self):\n with open(self.inputfile) as fin:\n for n, _ in enumerate(fin, start=1): pass\n self.n = n\n return self.n", "def get_vertices_num(self):\n return self.coords.shape[0]", "def strang_mesh(filename):\n\n from math import pi\n from anuga.utilities.numerical_tools import anglediff\n\n\n fid = open(filename)\n points = [] # List of x, y coordinates\n triangles = [] # List of vertex ids as listed in the file\n\n for line in fid.readlines():\n fields = line.split()\n if len(fields) == 2:\n # we are reading vertex coordinates\n points.append([float(fields[0]), float(fields[1])])\n elif len(fields) == 3:\n # we are reading triangle point id's (format ae+b)\n triangles.append([int(float(fields[0]))-1,\n int(float(fields[1]))-1,\n int(float(fields[2]))-1])\n else:\n raise Excetion('wrong format in %s' % filename)\n\n elements = [] #Final list of elements\n\n for t in triangles:\n #Get vertex coordinates\n v0 = t[0]\n v1 = t[1]\n v2 = t[2]\n\n x0 = points[v0][0]\n y0 = points[v0][1]\n x1 = points[v1][0]\n y1 = points[v1][1]\n x2 = points[v2][0]\n y2 = points[v2][1]\n\n #Check that points are arranged in counter clock-wise order\n vec0 = [x1-x0, y1-y0]\n vec1 = [x2-x1, y2-y1]\n vec2 = [x0-x2, y0-y2]\n\n a0 = anglediff(vec1, vec0)\n a1 = anglediff(vec2, vec1)\n a2 = anglediff(vec0, vec2)\n\n if a0 < pi and a1 < pi and a2 < pi:\n elements.append([v0, v1, v2])\n else:\n elements.append([v0, v2, v1])\n\n return points, elements", "def vertex_count(self):\n return len(self._outgoing)", "def n_file_elements(cls):\n \n return randint(1, (3 * Root.size))", "def contig_count(contig):\n return sum([1 for line in open(contig, 'rU').readlines() if line.startswith('>')])", "def load_lengths(filename, return_base=False):\n data = pd.read_csv(filename, sep=\"\\t\", comment=\"#\", header=None)\n data = data.as_matrix()\n _, idx, lengths = np.unique(data[:, 0], return_counts=True,\n return_index=True)\n if return_base:\n return lengths[idx.argsort()], data[0, 3]\n else:\n return lengths[idx.argsort()]", "def __init__(self, file_path):\n\n # Comments\n # mtllib mtl_name\n # o object_name\n # v x y z\n # vt u v\n # vn x y z\n # f v0/t0/n0 v1/t1/n1 v2/t2/n2\n\n print('loading mesh \"%s\"' % file_path)\n mesh_file = open(file_path, 'r')\n\n verts = []\n texs = []\n normals = []\n faces = []\n\n # For each line of the input file\n for line in mesh_file:\n line = line.rstrip(' \\r\\n')\n\n # Skip comments\n if line.startswith('#') or line == '':\n continue\n\n tokens = line.split(' ')\n tokens = map(lambda t: t.strip(' '), tokens)\n tokens = list(filter(lambda t: t != '', tokens))\n\n prefix = tokens[0]\n tokens = tokens[1:]\n\n if prefix == 'v':\n vert = list(map(lambda v: float(v), tokens))\n verts.append(vert)\n\n if prefix == 'vt':\n tc = list(map(lambda v: float(v), tokens))\n texs.append(tc)\n\n if prefix == 'vn':\n normal = list(map(lambda v: float(v), tokens))\n normals.append(normal)\n\n if prefix == 'f':\n assert len(tokens) == 3, \"only triangle faces are supported\"\n\n face = []\n for token in tokens:\n indices = list(map(lambda idx: int(idx), token.split('/')))\n face.append(indices)\n\n faces.append(face)\n\n mesh_file.close()\n\n self.num_faces = len(faces)\n\n print('num verts=%d' % len(verts))\n print('num_faces=%d' % self.num_faces)\n\n # Create numpy arrays to store the vertex data\n list_verts = np.zeros(shape=(3 * self.num_faces, 3), dtype=np.float32)\n list_texcs = np.zeros(shape=3 * 2 * self.num_faces, dtype=np.float32)\n list_norms = np.zeros(shape=3 * 3 * self.num_faces, dtype=np.float32)\n\n cur_vert_idx = 0\n\n # For each triangle\n for face in faces:\n # For each triplet of indices\n for triplet in face:\n v_idx, t_idx, n_idx = triplet\n\n # Note: OBJ uses 1-based indexing\n vert = verts[v_idx-1]\n texc = texs[t_idx-1]\n normal = normals[n_idx-1]\n\n list_verts[cur_vert_idx, :] = vert\n list_texcs[2*cur_vert_idx:2*(cur_vert_idx+1)] = texc\n list_norms[3*cur_vert_idx:3*cur_vert_idx+3] = normal\n\n cur_vert_idx += 1\n\n # Re-center the object so that y=0 is at the base,\n # and the object is centered in x and z\n x_coords = list_verts[:, 0]\n z_coords = list_verts[:, 2]\n min_y = list_verts[:, 1].min()\n mean_x = (x_coords.min() + x_coords.max()) / 2\n mean_z = (z_coords.min() + z_coords.max()) / 2\n list_verts[:, 1] -= min_y\n list_verts[:, 0] -= mean_x\n list_verts[:, 2] -= mean_z\n\n # Compute the object extents after centering\n x_coords = list_verts[:, 0]\n y_coords = list_verts[:, 1]\n z_coords = list_verts[:, 2]\n self.y_max = y_coords.max()\n\n # Create a vertex list to be used for rendering\n self.vlist = pyglet.graphics.vertex_list(\n 3 * self.num_faces,\n ('v3f', list_verts.reshape(-1)),\n ('t2f', list_texcs),\n ('n3f', list_norms)\n )\n\n # Load the texture associated with this mesh\n file_name = os.path.split(file_path)[-1]\n tex_name = file_name.split('.')[0]\n tex_path = get_file_path('textures', tex_name, 'png')\n self.texture = load_texture(tex_path)", "def count_elements(path):\n count = 0\n with open(path, 'r') as f:\n groups = f.read().split('\\n\\n')\n for idx in range(len(groups)):\n word = groups[idx].split('\\n')\n no_of_ele = len(word)\n for i in range(no_of_ele-1):\n word[0] = word[0]+word[i+1]\n count += len(''.join(set(word[0])))\n return count" ]
[ "0.7772032", "0.68836695", "0.6398507", "0.6369426", "0.6362666", "0.6353974", "0.6323438", "0.6275137", "0.6170703", "0.6165812", "0.6144632", "0.613628", "0.61066425", "0.6011404", "0.59843576", "0.59520715", "0.5947087", "0.594126", "0.5935394", "0.59270984", "0.59270984", "0.5926359", "0.58514494", "0.58492893", "0.5836909", "0.5831675", "0.5831274", "0.5829534", "0.5826601", "0.5811575", "0.5778336", "0.57675093", "0.57538956", "0.5736303", "0.57076925", "0.56999564", "0.5698479", "0.5696968", "0.5686956", "0.56687146", "0.56588656", "0.5653763", "0.56502515", "0.56390417", "0.56024665", "0.55870193", "0.55844635", "0.5567423", "0.55649316", "0.55533016", "0.55512464", "0.5543442", "0.5538092", "0.5535526", "0.5528358", "0.55240124", "0.55178404", "0.5515604", "0.5512616", "0.55055326", "0.5478094", "0.5473247", "0.5468789", "0.54656523", "0.5454213", "0.5453736", "0.5441365", "0.5434383", "0.54289347", "0.5426822", "0.54233533", "0.5419709", "0.541386", "0.5409464", "0.54024774", "0.54002786", "0.53972715", "0.5392659", "0.5391981", "0.53919524", "0.53839743", "0.53838426", "0.538381", "0.5370671", "0.5369772", "0.5365828", "0.5359394", "0.53585804", "0.5356658", "0.5348002", "0.5342111", "0.53373665", "0.53328633", "0.5328268", "0.53233385", "0.5323326", "0.53231263", "0.5319525", "0.5303401", "0.5298637" ]
0.64086694
2
This function will return data about the number of triangles on each surface in a file inputs
def get_triangles_per_surface(my_core, entity_ranges): t_p_s = {} for surface in entity_ranges['Surfaces']: t_p_s[surface] = my_core.get_entities_by_type( surface, types.MBTRI).size() return t_p_s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_triangles(self, file):\n self.nibble(80)\n return struct.unpack(\"@i\", self.nibble(4))[0]", "def get_num_vertices(triangles):\n return numpy.amax(numpy.reshape(triangles, -1)) + 1", "def count_cells(fpath):\n cells = []\n for i in range(40):\n fname = f\"{fpath}/Mesh2d_{i}.vtu\"\n if not os.path.exists(fname):\n print(f\"File {fname} does not exist.\")\n break\n mesh = meshio.read(fname)\n for cell_block in mesh.cells:\n if cell_block.type in (\"triangle\"):\n num_cells = len(cell_block)\n print(f\"{i:2d}: {num_cells:6d} elements, {len(mesh.points):6d} vertices\")\n cells.append(num_cells)\n continue\n return cells", "def num_data_lines(filepath):\n\tif not file_exists(filepath):\n\t\treturn -1\n\tcount = 0\n\twith open(filepath, 'r') as f:\n\t\twhile read_floats(f):\n\t\t\tcount += 1\n\tf.close()\n\treturn count", "def countSamples(filename):\n with open(filename, \"r\") as f:\n line = f.readline().split(\"\\t\")\n return len(line) - 2", "def _get_las_npoints(fpath):\n with laspy.file.File(fpath) as f:\n return f.header.count", "def get_triangles_per_vertex(my_core, native_ranges):\n\n t_p_v_data = []\n tri_dimension = 2\n for vertex in native_ranges[types.MBVERTEX]:\n t_p_v_data.append(my_core.get_adjacencies(vertex, tri_dimension).size())\n return np.array(t_p_v_data)", "def test_triangle_count_05(self):\n body = {\"direction\": \"IN\", \"degree\": -1}\n code, res = Algorithm().post_triangle_count(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'edges_in': 13, 'vertices_in': 9, 'triangles': 2}\n else:\n assert 0", "def test_triangle_count_08(self):\n body = {\"direction\": \"IN\", \"degree\": 1}\n code, res = Algorithm().post_triangle_count(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'edges_in': 13, 'vertices_in': 9, 'triangles': 0}\n else:\n assert 0", "def test_triangle_count_04(self):\n body = {\"direction\": \"OUT\"}\n code, res = Algorithm().post_triangle_count(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'edges_out': 13, 'vertices_out': 7, 'triangles': 2}\n else:\n assert 0", "def test_triangle_count_06(self):\n body = {\"direction\": \"OUT\", \"degree\": -1}\n code, res = Algorithm().post_triangle_count(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'edges_out': 13, 'vertices_out': 7, 'triangles': 2}\n else:\n assert 0", "def countDataSize(self,filename):\n \n try:\n d = h5py.File(filename,'r')\n except:\n print(filename)\n return \n\n N = 0\n scan_edges = d['level2/Statistics/scan_edges'][:]\n for (start,end) in scan_edges:\n N += (end-start)//self.offsetLen * self.offsetLen\n d.close()\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def countDataSize(self,filename):\n \n d = h5py.File(filename,'r')\n features = d['spectrometer/features'][:]\n select = self.selectData(features.astype(float), self.ifeature, d)\n N = len(features[select])\n d.close()\n\n N = (N//self.offsetLen) * self.offsetLen\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def data_info(data):\n filename = data[\"filename\"]\n X_var = data[\"X_var\"]\n Y_var = data[\"Y_var\"]\n X,Y = read_file(filename,X_var,Y_var)\n input_dim = len(X_var)\n output_dim = len(Y_var)\n return X,Y,input_dim,output_dim", "def count_aux(self, infile):\n n_aux = 0\n n_tokens = 0\n dgs_in = self._file_handler.file_to_dg_list(infile)\n for dg in dgs_in:\n n_tokens += len(dg)\n transform = VGtransformer(dg, dep_style=self._dep_style)\n transform.transform()\n n_aux += transform.tot_aux\n return n_aux, n_tokens, len(dgs_in)", "def read_datafile(shower_name):\n\twith open(datafile_dir+'datafile_'+shower_name) as file:\n\t\tamplitudes = [float(line) for line in file]\n\tcount = [x+1 for x in range(len(amplitudes))[::-1]]\n\treturn amplitudes, count", "def get_array_size():\n tg_file = 'NA_CAS_gauges.txt'\n lines = open(tg_file).readlines()\n tg_nbr = len(lines)\n return tg_nbr", "def __len__(self):\n return self._num_samples_per_file * len(self._files) // self._world_size", "def create_1d_coil_geometry(division, filename, directory):\n os.chdir(directory)\n npoints = division + 1\n length_array = np.zeros((npoints, 2))\n current_length = 0\n array = np.loadtxt(filename)\n for i in range(1, npoints):\n current_length += ((array[i, 1] - array[i - 1, 1]) ** 2 + (array[i, 2] - array[i - 1, 2]) ** 2 +\n (array[i, 3] - array[i - 1, 3]) ** 2) ** 0.5\n length_array[i - 1, 0] = i\n length_array[i, 1] = current_length\n length_array[npoints - 1, 0] = npoints\n return length_array", "def load_data():\n data = []\n with euler.Resource('triangle.txt') as datafile:\n for line in datafile.readlines():\n data.insert(0, map(int, line.strip().split()))\n return data", "def test_triangle_count_07(self):\n body = {\"direction\": \"OUT\", \"degree\": 1}\n code, res = Algorithm().post_triangle_count(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'edges_out': 13, 'vertices_out': 7, 'triangles': 0}\n else:\n assert 0", "def read_input(filename):\n with open(filename, 'r') as f:\n (N) = map(int, next(f).split())\n def parse_line(line):\n l = line.split()\n h = 0 if l[0] == 'H' else 1\n n = int(l[1])\n return [h, l[2:]]\n\n photos = transform_strings([parse_line(line) for line in f])\n return (len(photos), photos)", "def get_edge_lengths(points: np.ndarray, triangles: np.ndarray) -> np.ndarray:\n edges, _ = get_edges(triangles)\n return np.linalg.norm(np.diff(points[edges], axis=1), axis=2).squeeze()", "def dimension(self):\r\n a = 0\r\n for x in self.faces():\r\n if (len(x) > a):\r\n a = len(x) \r\n return a-1", "def count_timepoints(sc, session, files):\n tuples = zip(range(len(files)), files)\n files_sc = sc.parallelize(tuples)\n\n def count_planes(kv):\n index, path2 = kv\n try:\n from ScanImageTiffReader import ScanImageTiffReader\n img = ScanImageTiffReader(path2).data()\n except Exception:\n import tifffile\n img = tifffile.imread(path2)\n return img.shape[0]\n\n data2 = files_sc.map(count_planes).collect()\n frame_numbers = np.array(data2)\n vol_numbers = frame_numbers / len(session.fieldMask)\n return vol_numbers.astype(int)", "def countsubcatchments(inputfilename=FileSettings.settingsdict['inputfilename']):\r\n global count\r\n with open(inputfilename, 'r') as swmmput:\r\n contents = swmmput.readlines()\r\n count = len(contents)\r\n return(count)", "def read_off_size(path):\n try:\n with open(path, 'r') as file:\n lines = file.readlines()\n if lines[0] != 'OFF\\n':\n print(path, 'Error: is not an .off file')\n num_vertices, num_faces = tuple(lines[1].split()[:2])\n return int(num_vertices), int(num_faces)\n except IOError:\n print('Error: Failed reading file:', path)", "def num_quadrature_points(self) -> int:", "def findTriangles(p):\n triangleCount = 0\n for a in range(3, p//3 + 1):\n for b in range(a+1, p//2):\n c = p - (a+b)\n if (a**2 + b**2) == c**2:\n triangleCount += 1\n return triangleCount", "def get_dimensions ( file_in, separator ) :\n try :\n logger.info ( \"Extract dimensions from xyz file \" + str(file_in) ) \n d = {}\n first_row = True\n d[NOPS] = 0\n file = open(file_in, 'r')\n for line in file :\n d[NOPS] = d[NOPS] + 1\n l = line.rstrip().split(separator)\n x = float(l[0])\n y = float(l[1])\n z = float(l[2])\n if first_row :\n d[MINX] = x\n d[MAXX] = x\n d[MINY] = y\n d[MAXY] = y\n d[MINZ] = z\n d[MAXZ] = z\n first_row = False\n else :\n if x < d[MINX] :\n d[MINX] = x\n if x > d[MAXX] :\n d[MAXX] = x \n if y < d[MINY] :\n d[MINY] = y\n if y > d[MAXY] :\n d[MAXY] = y \n if z < d[MINZ] :\n d[MINZ] = z\n if z > d[MAXZ] :\n d[MAXZ] = z \n file.close() \n logger.info ('Now return')\n return d\n except Exception, err:\n logger.critical(\"Extract dimensions from xyz file failed: ERROR: %s\\n\" % str(err))\n raise", "def reader(filename,only_length=False):\n print(\"Counting lines in file %s\"%filename)\n total_lines=0\n for n,line in enumerate(open(filename,\"r\")):\n total_lines+=1\n \n if only_length:\n return total_lines\n \n X,Y,Z,W,J=[np.zeros(total_lines) for _ in range(5)]\n \n for n, line in enumerate(open(filename, 'r')):\n if n%1000000==0:\n print(\"Reading line %d of %d from file %s\" %(n,total_lines,filename))\n split_line=np.array(line.split(\" \"), dtype=float) \n X[n]=split_line[0];\n Y[n]=split_line[1];\n Z[n]=split_line[2];\n W[n]=split_line[3];\n J[n]=int(split_line[4]);\n return X,Y,Z,W,J", "def total_number():\r\n total_number = 0\r\n file_read = read_file()\r\n for key in file_read:\r\n total_number = total_number + len(file_read[key])\r\n return total_number", "def calc_side_lengths(triangles):\n first_vec = [2, 0, 1]\n second_vec = [1, 2, 0]\n sides = triangles[:, first_vec] - triangles[:, second_vec]\n lengths = np.sqrt(np.sum(sides**2, axis=2))\n return lengths", "def numverts(self):\n return self._numvertstotal", "def readGR3File(inputFilename):\n print 'Reading ' + inputFilename + ' ...'\n infile = open(inputFilename, 'r')\n description = infile.readline().strip() # remove leading/trailing whitespace\n tmpStr = infile.readline()\n nTriangles, nNodes = (int(s) for s in tmpStr.split())\n print ' nTriangles={0:d} nNodes={1:d}'.format(nTriangles, nNodes)\n\n # nodes\n nodeArray = readNodeBlock(infile, nNodes)\n nodenum = np.array(nodeArray[:, 0].flatten(), dtype=int)\n nodexyz = np.zeros((nNodes, 3))\n nodexyz[:, :2] = nodeArray[:, 1:3]\n nodalValues = nodeArray[:, 3]\n\n print ' Nodal values min={0:g} max={1:g}'.format(min(nodalValues), max(nodalValues))\n\n # triangular elements\n triArray = readElemBlock(infile, nTriangles)\n\n trinum = triArray[:, 0].flatten()\n tritype = triArray[0, 1]\n trinodes = triArray[:, -3:] - 1 # three last columns, 0-based indexing\n #triangles = meshElements(trinodes,trinum,tritype)\n\n x = nodexyz[:, 0]\n y = nodexyz[:, 1]\n\n tmpStr = infile.readline()\n boundaries = []\n if len(tmpStr) > 0:\n # boundary information, if not end of file\n nOpenBndSegments = int(tmpStr.split()[0])\n nOpenBndNodesTot = int(infile.readline().split()[0])\n print ' nOpenBndSegments={0:d} nOpenBndNodesTot={1:d}'.format(nOpenBndSegments, nOpenBndNodesTot)\n for iBnd in range(nOpenBndSegments):\n bndHeader = infile.readline().split()\n nBndNodes = int(bndHeader[0])\n tag = bndHeader[-1]\n if tag.isdigit():\n tag = 'open' + tag\n print ' open bnd {0:d} {1:s}: {2:d} nodes'.format(iBnd + 1, tag, nBndNodes)\n tmpList = []\n for iN in range(nBndNodes):\n tmpList.append(int(infile.readline()))\n nodes = np.array(tmpList, dtype=int) - 1\n boundaries.append(meshContainer.meshBoundary('open', tag, nodes))\n nLandBndSegments = int(infile.readline().split()[0])\n nLandBndNodesTot = int(infile.readline().split()[0])\n landBndTags = range(\n nOpenBndSegments + 1,\n nOpenBndSegments + nLandBndSegments + 1)\n print ' nLandBndSegments={0:d} nLandBndNodesTot={1:d}'.format(nLandBndSegments, nLandBndNodesTot)\n for iBnd in range(nLandBndSegments):\n bndHeader = infile.readline().split()\n nBndNodes = int(bndHeader[0])\n try:\n landType = int(bndHeader[1])\n except:\n print \"\"\"Land boundary type missing in gr3 file. Add 0/1 (land/island) after number of nodes in each land boudary, e.g.\n 1002 = Total number of closed boundary nodes\n 501 0 = Number of nodes in closed boundary 1\"\"\"\n raise Exception(\n 'Could not parse land boundary type (0/1 - land/island)\\n')\n landType = 'island' if landType == 1 else 'land'\n tag = landType + bndHeader[-1]\n print ' land bnd {0:d} {1:s}: {2:d} nodes'.format(iBnd + 1, tag, nBndNodes)\n tmpList = []\n for iN in range(nBndNodes):\n tmpList.append(int(infile.readline()))\n #tmpList = fromfile(infile,dtype=int,count=nBndNodes,sep=' ')\n nodes = np.array(tmpList, dtype=int) - 1\n boundaries.append(meshContainer.meshBoundary(landType, tag, nodes))\n\n infile.close()\n\n # for better interpolation, round coordinates to 1e-4\n nDig = 4\n x = np.round(x, nDig)\n y = np.round(y, nDig)\n\n return x, y, nodalValues, trinodes, boundaries, description", "def file_read(file_name):\n \n #open specified file in read mode\n in_file = open(file_name, \"r\")\n \n #create data lists\n sp_length_v3 = []\n sp_period_v3 = [] \n\n #save header to string and split into list\n header_string = in_file.readline()\n header_v3 = header_string.split()\n \n #save revelent data to respective lists\n for line in in_file:\n values = line.split()\n sp_length_v3.append(float(values[1]))\n sp_period_v3.append(float(values[2]))\n \n #close the file\n in_file.close()\n \n #return 3D lists of lists containing data\n ans = [sp_length_v3, sp_period_v3, header_v3]\n \n return ans", "def from_polyfile(name):\n\n from anuga.utilities.numerical_tools import anglediff\n from math import pi\n import os.path\n root, ext = os.path.splitext(name)\n\n if ext == 'poly':\n filename = name\n else:\n filename = name + '.poly'\n\n\n fid = open(filename)\n\n points = [] #x, y\n values = [] #z\n ##vertex_values = [] #Repeated z\n triangles = [] #v0, v1, v2\n\n lines = fid.readlines()\n\n keyword = lines[0].strip()\n msg = 'First line in .poly file must contain the keyword: POINTS'\n assert keyword == 'POINTS', msg\n\n offending = 0\n i = 1\n while keyword == 'POINTS':\n line = lines[i].strip()\n i += 1\n\n if line == 'POLYS':\n keyword = line\n break\n\n fields = line.split(':')\n assert int(fields[0]) == i-1, 'Point indices not consecutive'\n\n #Split the three floats\n xyz = fields[1].split()\n\n x = float(xyz[0])\n y = float(xyz[1])\n z = float(xyz[2])\n\n points.append([x, y])\n values.append(z)\n\n\n k = i\n while keyword == 'POLYS':\n line = lines[i].strip()\n i += 1\n\n if line == 'END':\n keyword = line\n break\n\n\n fields = line.split(':')\n assert int(fields[0]) == i-k, 'Poly indices not consecutive'\n\n #Split the three indices\n vvv = fields[1].split()\n\n i0 = int(vvv[0])-1\n i1 = int(vvv[1])-1\n i2 = int(vvv[2])-1\n\n #Check for and exclude degenerate areas\n x0 = points[i0][0]\n y0 = points[i0][1]\n x1 = points[i1][0]\n y1 = points[i1][1]\n x2 = points[i2][0]\n y2 = points[i2][1]\n\n area = abs((x1*y0-x0*y1)+(x2*y1-x1*y2)+(x0*y2-x2*y0))/2\n if area > 0:\n\n #Ensure that points are arranged in counter clock-wise order\n v0 = [x1-x0, y1-y0]\n v1 = [x2-x1, y2-y1]\n v2 = [x0-x2, y0-y2]\n\n a0 = anglediff(v1, v0)\n a1 = anglediff(v2, v1)\n a2 = anglediff(v0, v2)\n\n\n if a0 < pi and a1 < pi and a2 < pi:\n #all is well\n j0 = i0\n j1 = i1\n j2 = i2\n else:\n #Swap two vertices\n j0 = i1\n j1 = i0\n j2 = i2\n\n triangles.append([j0, j1, j2])\n ##vertex_values.append([values[j0], values[j1], values[j2]])\n else:\n offending +=1\n\n log.critical('Removed %d offending triangles out of %d'\n % (offending, len(lines)))\n return points, triangles, values", "def n_file_elements(cls):\n \n return randint(1, (3 * Root.size))", "def gather_counts(directory):\n counts_un = defaultdict(int)\n counts_bi = defaultdict(int)\n counts_tri = defaultdict(int)\n prev_prev = \"<s>\"\n prev = \"<s>\"\n for filename in os.listdir(f\"./{directory}\"):\n if \".DS_Store\" in filename:\n continue\n with open(f\"./{directory}/{filename}\", \"r\") as f:\n for line in f:\n line = line.strip()\n if len(line) == 0:\n continue\n counts_un[line+\"\\n\"] += 1\n counts_bi[prev+\"\\n\"+line+\"\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\"+line+\"\\n\"] += 1\n prev_prev = prev\n prev = line\n counts_un[\"</s>\\n\"] += 2\n counts_bi[\"</s>\\n</s>\\n\"] += 1\n counts_bi[prev+\"\\n\"+\"</s>\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\" + \"</s>\\n\"] += 1\n counts_tri[prev+\"\\n</s>\\n</s>\\n\"] += 1\n return counts_un, counts_bi, counts_tri", "def get_triangle_count(remote, objectid):\n cmd1 = mmapi.StoredCommands()\n key1 = cmd1.AppendSceneCommand_GetTriangleCount(objectid)\n remote.runCommand(cmd1)\n result_val = mmapi.any_result()\n cmd1.GetSceneCommandResult_GetTriangleCount(key1, result_val)\n return result_val.i", "def get_dimensions(self):\r\n x = []\r\n y = []\r\n z = []\r\n for i in self.verts:\r\n x.append(i[0])\r\n y.append(i[1])\r\n z.append(i[2])\r\n\r\n x.append(abs(min(x)))\r\n y.append(abs(min(y)))\r\n z.append(abs(min(z)))\r\n\r\n return max(x), max(y), max(z)", "def get_triangles( self, N ):\n\n # store N as an instance variable\n self.N = N\n\n # initialize array to store locations of points for all triangles in the\n # tessellation sequence\n self.triangles = np.zeros( ( self.N, 3, 2 ) )\n\n # define points of the first triangle in the tessellation sequence\n point_c = np.array( [ 0, 0 ] )\n point_b = self.a * np.array( [ np.cos( self.C ), np.sin( self.C ) ] )\n point_a = np.array( [ self.b, 0 ] )\n\n # stack the points into a single array of shape (3, 2 )\n triangle = np.vstack( [ point_c, point_b, point_a ] )\n\n # loop over the number of triangles in the sequence\n for i in range( self.N ):\n\n # store the points of the i-th triangle in the array\n self.triangles[ i ] = triangle\n\n # compute the next triangle in the tessellation sequence\n triangle = self.next_triangle( triangle = triangle )\n\n # shift the next triangle in the tessellation sequence such that its\n # point C is in the same location as point B of the previous triangle\n triangle += ( self.triangles[ i - 1, 1 ] - self.triangles[ 0, 0 ] )", "def std_triangles_count(graph):\n if nx.is_directed(graph):\n raise Exception(\"Graph is not undirected\")\n\n return sum(nx.triangles(graph).values()) // 3", "def getNbins(self,axis,includeTotalBin = True):\n\n\t\tif axis == \"f\":\n\t\t\tnCells = 1 if self.nCells == 0 else self.nCells\n\t\t\treturn nCells\n\n\t\tif axis == \"i\":\n\t\t\treturn self.meshInfo[1]\n\n\t\tif axis == \"j\":\n\t\t\treturn self.meshInfo[2]\n\n\t\tif axis == \"k\":\n\t\t\treturn self.meshInfo[3]\n\n\t\tif axis == \"d\":\n\t\t\tnDir = 1 if self.nDir == 0 else self.nDir\n\t\t\treturn nDir\n\n\t\tif axis == \"u\":\n\t\t\tnUsr = 1 if self.nUsr == 0 else self.nUsr\n\t\t\tnUsr = nUsr - 1 if self.usrTC == \"t\" and not includeTotalBin else nUsr\n\t\t\treturn nUsr\n\n\t\tif axis == \"s\":\n\t\t\tnSeg = 1 if self.nSeg == 0 else self.nSeg\n\t\t\tnSeg = nSeg - 1 if self.segTC == \"t\" and not includeTotalBin else nSeg\n\t\t\treturn nSeg\n\n\t\tif axis == \"m\":\n\t\t\tnMul = 1 if self.nMul == 0 else self.nMul\n\t\t\tnMul = nMul - 1 if self.mulTC == \"t\" and not includeTotalBin else nMul\n\t\t\treturn nMul\n\n\t\tif axis == \"c\":\n\t\t\tnCos = 1 if self.nCos == 0 else self.nCos\n\t\t\tnCos = nCos - 1 if self.cosTC == \"t\" and not includeTotalBin else nCos\n\t\t\treturn nCos\n\n\t\tif axis == \"e\":\n\t\t\tnErg = 1 if self.nErg == 0 else self.nErg\n\t\t\tnErg = nErg - 1 if self.ergTC == \"t\" and not includeTotalBin else nErg\n\t\t\treturn nErg\n\n\t\tif axis == \"t\":\n\t\t\tnTim = 1 if self.nTim == 0 else self.nTim\n\t\t\tnTim = nTim - 1 if self.timTC == \"t\" and not includeTotalBin else nTim\n\t\t\treturn nTim", "def read_txt(if_name):\n n = 0\n paper_shape = []\n present_shape = []\n input_file = open(if_name,'r')\n i = 0\n\n for line in input_file:\n\n if i > 1:\n\n i += 1\n line = line.strip().split(' ')\n if len(line) < 2:\n break\n present_shape.append([int(e) for e in line])\n\n if i == 1:\n i += 1\n line = line.strip()\n n = int(line)\n\n if i == 0:\n i += 1\n line = line.strip().split(' ')\n paper_shape = [int(e) for e in line]\n\n input_file.close()\n return n, paper_shape, present_shape", "def bpCount(file):\n amount_bp = len(file)\n return amount_bp", "def get_all_object_triangles( filename, scale , translation=(0,0,0)):\n import warnings\n warnings.warn(\"@PendingDeprecationWarning\", PendingDeprecationWarning)\n vertexObjs = read_vertices_objects( filename )\n faceObjs = read_faces_objects( filename )\n\n r = []\n j = 0\n \n # Validation:\n vertices, faces = [],[]\n for obj in range(len(vertexObjs)):\n vertices += vertexObjs[obj]\n faces += faceObjs[obj]\n max_vertex_index = max([max(x) for x in faces])\n if len(vertices) != max_vertex_index:\n logging.warning( \"ParseWarning: A face's vertex index number is does not match the quantity of read vertices.\" )\n logging.warning( \"Qty of Vertices: \"+str(len(vertices))+\", Largest Face Index: \"+str(max_vertex_index) )\n\n # Parse as Tris:\n for obj in range(len(vertexObjs)):\n vertices = vertexObjs[obj]\n faces = faceObjs[obj]\n r.append([])\n c = 0\n for f in faces: # for every face\n for i in f: # for each index point in face\n c+=1\n try:\n # Get the face[i] vertex\n v = vertices[i-1]\n except IndexError as indErr:\n logging.warning(\"IndexError: Attempted to access index: \"+str(i-1)+\" in list of length: \"+str(len(vertices)))\n raise IndexError\n # Scale the face[i] vertex\n scV = [v[0]*scale, v[1]*scale, v[2]*scale]\n # Translate the scaled face vertex:\n t = translation\n tmpv = [scV[0]+t[0],scV[1]+t[1], scV[2]+t[2]]\n # Retain this vertex\n r[j].append(tmpv)\n # ---------------------\n if c % 3 == 0:\n j+=1\n r.append([])\n r = r[:len(r)-1] # remove the final empty list.\n\n checkShapeValidity( r )\n return r", "def load_triangles(triangle_bytes, header):\n triangles = list()\n for i in range(header.num_tris):\n triangle = triangle_t(list(struct.unpack(\"<hhh\", triangle_bytes[12*i:12*i+6])), list(struct.unpack(\"<hhh\", triangle_bytes[12*i+6:12*i+12])))\n # print(triangle)\n triangles.append(triangle)\n return triangles", "def test_number_of_surface_objects(self):\n for O in self.mod.objts.itervalues():\n no_of_surfaces = 0\n for C in O.conts.itervalues():\n if C.surf != 0:\n no_of_surfaces += 1\n self.assertEqual(O.surfsize, no_of_surfaces)", "def dimensions():", "def test_number_of_vertex_elements_in_MESH_chunk(self):\n for O in self.mod.objts.itervalues():\n for M in O.meshes.itervalues():\n self.assertEqual(M.vsize, len(M.vert))", "def read_triangle(f, layers=1):\n # Read nodes\n with open(f + '.node') as h:\n num_nodes = int(h.readline().split(' ')[0])\n node_values = np.zeros((num_nodes, 2), dtype=np.float64)\n for line in h:\n if line[0] == '#':\n continue\n node, x, y = line.split()[:3]\n node_values[int(node) - 1, :] = [float(x), float(y)]\n\n nodes = op2.Set(num_nodes, \"nodes\")\n coords = op2.Dat(nodes ** 2, node_values, name=\"coords\")\n\n # Read elements\n with open(f + '.ele') as h:\n num_tri, nodes_per_tri, num_attrs = [int(x) for x in h.readline().split()]\n map_values = np.zeros((num_tri, nodes_per_tri), dtype=np.int32)\n for line in h:\n if line[0] == '#':\n continue\n vals = [int(x) - 1 for x in line.split()]\n map_values[vals[0], :] = vals[1:nodes_per_tri + 1]\n\n elements = op2.Set(num_tri, \"elements\", layers=layers)\n elem_node = op2.Map(elements, nodes, nodes_per_tri, map_values, \"elem_node\")\n\n return nodes, coords, elements, elem_node", "def get_number_lines(running_reward_file, running_loss_file, action_count_file):\n if Path(running_reward_file).exists():\n data = np.loadtxt(running_reward_file).reshape(-1,2)\n return data.shape[0]\n if Path(running_loss_file).exists():\n data = np.loadtxt(running_loss_file).reshape(-1,2)\n return data.shape[0]\n if Path(action_count_file).exists():\n data = np.loadtxt(action_count_file).reshape(-1,2)\n return data.shape[0]\n raise NameError(\"No files to count lines\")", "def get_dataset_length(file_path, had_header=True):\n with open(file_path, 'r') as f:\n length = 0\n for _ in f:\n length += 1\n length = length - had_header\n return length", "def __len__(self) -> int:\n import h5py\n\n with h5py.File(\n os.path.join(self.root, self.data_dir, self.img_file_name), \"r\"\n ) as f:\n num_datapoints: int = f[self.split][\"pv_log\"].shape[0]\n\n return num_datapoints", "def get_library_sizes(args):\n with open(args.counts, \"r\") as counts:\n sizes = []\n head = True\n for line in counts:\n line = line.strip()\n if head:\n head = False\n samples = line.split(\"\\t\")[3:]\n total_counts = [0] * len(samples)\n else:\n counts = line.split(\"\\t\")\n if counts[1] == \"NA\":\n break\n else:\n counts = counts[3:]\n for i in range(len(counts)):\n total_counts[i] += int(counts[i])\n\n for i in range(len(samples)):\n sizes.append([samples[i], total_counts[i]])\n\n return sizes", "def data_len(self):\n Nrows_data = 0\n with self._compression_safe_file_opener(self.input_fname, \"r\") as f:\n for i, l in enumerate(f):\n if (l[0 : len(self.header_char)] != self.header_char) and (l != \"\\n\"):\n Nrows_data += 1\n return Nrows_data", "def getFileCount(self) -> int:\n ...", "def test_number_of_MESH_chunks(self):\n for O in self.mod.objts.itervalues():\n self.assertEqual(O.meshsize, len(O.meshes))", "def _tvb_surface_to_tri(self, surface_file_name):\n surface_file_path = os.path.join(OM_STORAGE_DIR, surface_file_name)\n\n #TODO: check file doesn't already exist\n LOG.info(\"Writing TVB surface to .tri file: %s\" % surface_file_path)\n file_handle = file(surface_file_path, \"a\")\n\n file_handle.write(\"- %d \\n\" % self.sources.number_of_vertices)\n verts_norms = numpy.hstack((self.sources.vertices, \n self.sources.vertex_normals))\n numpy.savetxt(file_handle, verts_norms)\n\n tri_str = \"- \" + (3 * (str(self.sources.number_of_triangles) + \" \")) + \"\\n\"\n file_handle.write(tri_str)\n numpy.savetxt(file_handle, self.sources.triangles, fmt=\"%d\")\n\n file_handle.close()\n LOG.info(\"%s written successfully.\" % surface_file_name)\n\n return surface_file_path", "def dataDimensions(data):\n logging.info('Number of rows of data: %s' % len(data))\n logging.info('Number of columns of data: %s' % len(data[1]))", "def _extract_track_lengths(track_key,conn):\n print track_key\n \n (fname,iden_key,track_key) = conn.execute(\"select fout,iden_key,comp_key from tracking where comp_key = ?\",\n track_key).fetchone()\n \n F = h5py.File(fname,'r')\n len_vec = F[fd('tracking',track_key)]['length'][:]\n \n temp = 0\n dtime = 0\n fr_count = 0\n for g in F.keys():\n if g[0:5] == 'frame':\n temp += F[g].attrs['temperature']\n dtime += F[g].attrs['dtime']\n fr_count += 1\n\n \n F.close()\n del F\n return len_vec, temp/fr_count, dtime/fr_count", "def ss_triangle_count(graph: ScipyGraph) -> int:\n props = ScipyGraph.Type.compute_abstract_properties(graph, {\"edge_type\"})\n if props[\"edge_type\"] == \"map\":\n # Drop weights before performing triangle count\n m = graph.value.copy()\n m.data = np.ones_like(m.data)\n elif props[\"edge_type\"] == \"set\":\n m = graph.value\n L = ss.tril(m, k=-1).tocsr()\n U = ss.triu(m, k=1).tocsc()\n return int((L @ U.T).multiply(L).sum())", "def analyze_pressure_dump(filename, Lx=200., Ly=200, Lz=900., N=10, bin_divide_flag=False, Natoms=113579):\n myfile = open(filename+'.txt')\n trajectory = []\n traj_pd = []\n frames = []\n\n for _ in range(3):\n next(myfile)\n count = 0\n while EOF(myfile):\n count += 1\n s = next(myfile) # info with the time step\n\n x = np.zeros(N, dtype=[('Chunk',np.float32), ('Coord1',np.float32), ('Ncount',np.float32), ('density',np.float32), ('temp',np.float32), ('vx',np.float32), ('fx',np.float32),('c_pciKE[1]',np.float32), ('c_pciKE[2]',np.float32), ('c_pciKE[3]',np.float32), ('c_pciVIR[1]',np.float32), ('c_pciVIR[2]',np.float32), ('c_pciVIR[3]',np.float32), ('c_pgelELAS[1]',np.float32), ('c_pgelELAS[2]',np.float32), ('c_pgelELAS[3]',np.float32), ('c_pgelVIR[1]', np.float32), ('c_pgelVIR[2]', np.float32), ('c_pgelVIR[3]', np.float32), ('c_pgelPAIR[1]', np.float32), ('c_pgelPAIR[2]', np.float32), ('c_pgelPAIR[3]', np.float32)])\n\n# Chunk Coord1 Ncount density/number temp vx fx c_pciKE[1] c_pciKE[2] c_pciKE[3] c_pciVIR[1] c_pciVIR[2] c_pciVIR[3] c_pgelELAS[1] c_pgelELAS[2] c_pgelELAS[3] c_pgelVIR[1] c_pgelVIR[2] c_pgelVIR[3] c_pgelPAIR[1] c_pgelPAIR[2] c_pgelPAIR[3]\n\n list_line = re.findall(\"[-+]?\\d+[\\.]?\\d*[eE]?[-+]?\\d*\", s)\n frame, _, _ = list_line\n frames.append(int(frame))\n # print( \"reading lines\")\n\n for i in xrange(N):\n count += 1\n s = next(myfile)\n list_line = re.findall(\"[-+]?\\d+[\\.]?\\d*[eE]?[-+]?\\d*\", s)\n # print( \"reading line\", i, list_line)\n for il, l in enumerate(list_line):\n x[i][il] = float(l)\n\n trajectory.append(x)\n\n # names = x.dtype.fields.keys()\n # data = x.dtype.fields.values()\n\n df = pd.DataFrame.from_records(x)\n traj_pd.append(df)\n\n myfile.close()\n\n\n\n # # volume = 218.*44.*44.\n volume = Lx*Ly*Lz\n # N_atoms = 113579\n # if bin_divide_flag:\n # bin_volume = volume / float(N)\n # else:\n # bin_volume = 1.\n\n bin_volume = volume / float(N)\n # bin_volume = volume\n # bin_volume /= float(Natoms)\n\n Combine_PD = pd.concat(traj_pd)\n FINAL_PD = pd.DataFrame()\n\n FINAL_PD['Coord1'] = Combine_PD['Coord1']\n FINAL_PD['p_ciKE'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pciKE[1]'] + Combine_PD['c_pciKE[2]'] + Combine_PD['c_pciKE[3]'])/(3.*bin_volume)\n FINAL_PD['p_ciVIR'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pciVIR[1]'] + Combine_PD['c_pciVIR[2]'] + Combine_PD['c_pciVIR[3]'])/(3.*bin_volume)\n FINAL_PD['p_gelELAS'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pgelELAS[1]'] + Combine_PD['c_pgelELAS[2]'] + Combine_PD['c_pgelELAS[3]'])/(3.*bin_volume)\n\n FINAL_PD['p_gelVIR'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pgelVIR[1]'] + Combine_PD['c_pgelVIR[2]'] + Combine_PD['c_pgelVIR[3]'])/(3.*bin_volume)\n FINAL_PD['p_gelPAIR'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pgelPAIR[1]'] + Combine_PD['c_pgelPAIR[2]'] + Combine_PD['c_pgelPAIR[3]'])/(3.*bin_volume)\n\n # So now I have to\n # P_bin = (sigma_per_atom_xx + ... + sigma_per_atom_zz)/(bin_volume*3)\n # *N_atoms_per_bin\n # N_atoms_per_bin = number_density*N_atoms\n\n\n df_concat = FINAL_PD\n\n by_row_index = df_concat.groupby(df_concat.index)\n df_means = by_row_index.mean()\n by_row_index_2 = df_concat.groupby(df_concat.index)\n df_stds = by_row_index_2.std()\n\n # print( df_means.head())\n # print( df_stds.head())\n return df_means, df_stds", "def fileCount(self):\n pass", "def count_lines(filename):\n with open(filename, 'r', encoding='utf-8') as file:\n lines_count = int()\n for line in file:\n lines_count += 1\n info_tuple = (filename, lines_count)\n return info_tuple", "def _ReadExtent(self):\n # Read the mesh file as line strings, remove lines with comment = !\n v = np.array(np.__version__.split('.')[0:2], dtype=int)\n FileName = self.GetMeshFileName()\n try:\n if v[0] >= 1 and v[1] >= 10:\n # max_rows in numpy versions >= 1.10\n msh = np.genfromtxt(FileName, delimiter='\\n', dtype=np.str,comments='!', max_rows=1)\n else:\n # This reads whole file :(\n msh = np.genfromtxt(FileName, delimiter='\\n', dtype=np.str, comments='!')[0]\n except (IOError, OSError) as fe:\n raise _helpers.PVGeoError(str(fe))\n # Fist line is the size of the model\n self.__sizeM = np.array(msh.ravel()[0].split(), dtype=int)\n # Check if the mesh is a UBC 2D mesh\n if self.__sizeM.shape[0] == 1:\n # Read in data from file\n xpts, xdisc, zpts, zdisc = ubcMeshReaderBase._ubcMesh2D_part(FileName)\n nx = np.sum(np.array(xdisc,dtype=int))+1\n nz = np.sum(np.array(zdisc,dtype=int))+1\n return (0,nx, 0,1, 0,nz)\n # Check if the mesh is a UBC 3D mesh or OcTree\n elif self.__sizeM.shape[0] >= 3:\n # Get mesh dimensions\n dim = self.__sizeM[0:3]\n ne,nn,nz = dim[0], dim[1], dim[2]\n return (0,ne, 0,nn, 0,nz)\n else:\n raise _helpers.PVGeoError('File format not recognized')", "def get_num_chunks(self) -> int:", "def part2(fname: dict) -> int:\n return sum(len(set.intersection(*[set(pax) for pax in group])) for group in get_data(fname))", "def voxel_count(self):\n return self.cols * self.rows * self.sections", "def calc_face_dimensions(face):\n vertical = filter_vertical_edges(face.edges, face.normal).pop()\n horizontal = filter_horizontal_edges(face.edges, face.normal).pop()\n return horizontal.calc_length(), vertical.calc_length()", "def _count_data(path):\n matcher = re.compile(r'[0-9]+\\.dec')\n match = lambda name: bool(matcher.match(name))\n names = os.listdir(path)\n n_data = len(list(filter(match, names)))\n return n_data", "def file_len(fname):\n with open(fname) as f:\n for i, l in enumerate(f):\n pass\n Nrows = i + 1\n return Nrows", "def ReadFenics(self, filename, element_type):\n\n if element_type == \"tet\":\n etype = \"tetrahedron\"\n elif element_type == \"hex\":\n etype = \"hexahedron\"\n elif element_type == \"tri\":\n etype = \"triangle\"\n elif element_type == \"quad\":\n etype = \"quadrilateral\"\n\n import xml.etree.cElementTree as ET\n root = ET.parse(filename).getroot()\n X = []\n T = []\n for child in root:\n if child.attrib['celltype'] != etype:\n raise ValueError(\"xml file does not contain {} elements\".format(element_type))\n\n for child in root:\n for cchild in child:\n if cchild.tag == \"vertices\":\n if element_type == \"tet\" or element_type == \"hex\":\n for child3 in cchild:\n x = float(child3.attrib['x'])\n y = float(child3.attrib['y'])\n z = float(child3.attrib['z'])\n X.append([x,y,z])\n elif element_type == \"tri\" or element_type == \"quad\":\n for child3 in cchild:\n x = float(child3.attrib['x'])\n y = float(child3.attrib['y'])\n X.append([x,y])\n\n elif cchild.tag == \"cells\":\n if element_type == \"tet\":\n for child3 in cchild:\n v0 = int(child3.attrib['v0'])\n v1 = int(child3.attrib['v1'])\n v2 = int(child3.attrib['v2'])\n v3 = int(child3.attrib['v3'])\n T.append([v0,v1,v2,v3])\n elif element_type == \"tri\":\n for child3 in cchild:\n v0 = int(child3.attrib['v0'])\n v1 = int(child3.attrib['v1'])\n v2 = int(child3.attrib['v2'])\n T.append([v0,v1,v2])\n\n\n X = np.array(X)\n T = np.array(T,dtype=np.int64)\n\n self.elements = T\n self.points = X\n self.element_type = element_type\n self.nelem = self.elements.shape[0]\n self.nnode = self.points.shape[0]\n\n if self.points.shape[1] == 3:\n if np.allclose(self.points[:,2],0.):\n self.points = np.ascontiguousarray(self.points[:,:2])\n\n ndim = self.InferSpatialDimension()\n if self.element_type == \"tri\" or self.element_type == \"quad\":\n self.GetEdges()\n self.GetBoundaryEdges()\n elif self.element_type == \"tet\" or self.element_type == \"hex\":\n self.GetFaces()\n self.GetBoundaryFaces()\n self.GetBoundaryEdges()", "def read_geometry(filepath, read_metadata=False, read_stamp=False):\n volume_info = OrderedDict()\n\n TRIANGLE_MAGIC = 16777214\n QUAD_MAGIC = 16777215\n NEW_QUAD_MAGIC = 16777213\n with open(filepath, \"rb\") as fobj:\n magic = _fread3(fobj)\n if magic in (QUAD_MAGIC, NEW_QUAD_MAGIC): # Quad file\n nvert = _fread3(fobj)\n nquad = _fread3(fobj)\n (fmt, div) = (\">i2\", 100.) if magic == QUAD_MAGIC else (\">f4\", 1.)\n coords = np.fromfile(fobj, fmt, nvert * 3).astype(np.float) / div\n coords = coords.reshape(-1, 3)\n quads = _fread3_many(fobj, nquad * 4)\n quads = quads.reshape(nquad, 4)\n #\n # Face splitting follows\n #\n faces = np.zeros((2 * nquad, 3), dtype=np.int)\n nface = 0\n for quad in quads:\n if (quad[0] % 2) == 0:\n faces[nface] = quad[0], quad[1], quad[3]\n nface += 1\n faces[nface] = quad[2], quad[3], quad[1]\n nface += 1\n else:\n faces[nface] = quad[0], quad[1], quad[2]\n nface += 1\n faces[nface] = quad[0], quad[2], quad[3]\n nface += 1\n\n elif magic == TRIANGLE_MAGIC: # Triangle file\n create_stamp = fobj.readline().rstrip(b'\\n').decode('utf-8')\n fobj.readline()\n vnum = np.fromfile(fobj, \">i4\", 1)[0]\n fnum = np.fromfile(fobj, \">i4\", 1)[0]\n coords = np.fromfile(fobj, \">f4\", vnum * 3).reshape(vnum, 3)\n faces = np.fromfile(fobj, \">i4\", fnum * 3).reshape(fnum, 3)\n\n if read_metadata:\n volume_info = _read_volume_info(fobj)\n else:\n raise ValueError(\"File does not appear to be a Freesurfer surface\")\n\n coords = coords.astype(np.float) # XXX: due to mayavi bug on mac 32bits\n\n ret = (coords, faces)\n if read_metadata:\n if len(volume_info) == 0:\n warnings.warn('No volume information contained in the file')\n ret += (volume_info,)\n if read_stamp:\n ret += (create_stamp,)\n\n return ret", "def readCubeFile(self, filename):\n\n inputfile = open(filename, \"r\")\n header = \"\".join([inputfile.readline(), inputfile.readline()])\n\n temp = inputfile.readline().strip().split()\n self.numAtoms = int(temp[0])\n self.origin = list(map(float, temp[1:]))\n\n self.numPoints = [0] * 3\n self.spacing = [0] * 3\n for i in range(3):\n line = inputfile.readline().strip().split()\n self.numPoints[i] = int(line[0])\n temp = list(map(float, line[1:]))\n self.spacing[i] = temp[i]\n assert sum(temp[:i] + temp[i + 1:]) == 0\n\n # Read in the lines with atom data\n for i in range(self.numAtoms):\n line = inputfile.readline()\n\n self.data = np.zeros((self.numPoints[1], self.numPoints[0], self.numPoints[2]), \"float\")\n i = j = k = 0\n while i < self.numPoints[1]:\n line = next(inputfile)\n temp = list(map(float, line.strip().split()))\n for x in range(0, len(temp)):\n self.data[j, i, x + k] = temp[x]\n\n k += len(temp)\n if k == self.numPoints[2]:\n j += 1\n k = 0\n if j == self.numPoints[1]:\n i += 1\n j = 0\n\n inputfile.close()", "def parseInputFile(fn):\n\twith open(fn) as f:\n\t\tpoly = [float(x) for x in f.readline().strip().split()]\n\t\titers = int(f.readline().strip())\n\t\treturn (poly, iters)", "def load_faces(file_data, headers, indices):\n\n\n def swap_winding(indices):\n return (indices[0], indices[2], indices[1])\n \n\n def indices_from_face(face_data):\n base_vertex = face_data[3]\n base_index = face_data[5]\n index_count = face_data[6]\n\n faces_indices = [base_vertex + indices[base_index + current_index] \n for current_index in range(index_count)]\n\n #Split into lists of 3 - ie triangles\n faces = []\n for current_face_idx in range(0, len(faces_indices), 3):\n faces.append(faces_indices[current_face_idx:current_face_idx+3])\n\n return faces\n\n\n def face_from_pack(face_data):\n \"\"\" \n Extract just the data we want from the full chunk\n \"\"\"\n triangle_list = indices_from_face(face_data)\n return [(face_data[0], triangles,) for triangles in triangle_list]\n\n face_offset, face_length = headers[13]\n face_chunk = Struct(\"iiiiiiii2i2i3f3f3f3f2i\") \n face_size = face_chunk.size\n face_count = int(face_length / face_size)\n\n faces = []\n\n for current_face_idx in range(face_count):\n face_file_position = face_offset + current_face_idx * face_size\n current_face = face_chunk.unpack(file_data[face_file_position : face_file_position+face_size])\n\n #Check we are a valid face (Could use a filter later)\n if current_face[2] != 1: continue #Only support meshes at the moment\n\n new_faces = face_from_pack(current_face)\n faces.extend(new_faces)\n\n return faces", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def get_filesize(inputfile) -> int:\n with open(inputfile, \"rb\") as f:\n lines = 0\n buf_size = 1024 * 1024\n read_f = f.raw.read\n\n buf = read_f(buf_size)\n while buf:\n lines += buf.count(b\"\\n\")\n buf = read_f(buf_size)\n\n return lines", "def extract_triangles(mesh, materials_list):\n tri_list = []\n do_uv = bool(mesh.tessface_uv_textures)\n\n for mat in materials_list:\n for i, face in enumerate(mesh.tessfaces):\n f_v = face.vertices\n if mesh.materials[face.material_index].name != mat: continue\n\n uf = mesh.tessface_uv_textures.active.data[i] if do_uv else None\n\n fmt = 0\n if(do_uv): fmt = face.material_index\n\n if do_uv:\n f_uv = uf.uv\n\n if len(f_v) == 3:\n new_tri = tri_wrapper((f_v[0], f_v[1], f_v[2]), fmt)\n if (do_uv):\n new_tri.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[1]), uv_key(f_uv[2])\n else: new_tri.faceuvs = uv_key((0.0,0.0)), uv_key((1.0,0.0)), uv_key((0.0,1.0))\n tri_list.append(new_tri)\n\n else: # it's a quad\n new_tri = tri_wrapper((f_v[0], f_v[1], f_v[2]), fmt)\n new_tri_2 = tri_wrapper((f_v[0], f_v[2], f_v[3]), fmt)\n\n if (do_uv):\n new_tri.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[1]), uv_key(f_uv[2])\n new_tri_2.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[2]), uv_key(f_uv[3])\n else:\n new_tri.faceuvs = uv_key((0.0,0.0)), uv_key((1.0,0.0)), uv_key((0.0,1.0))\n new_tri_2.faceuvs = uv_key((0.0,0.0)), uv_key((1.0,0.0)), uv_key((0.0,1.0))\n\n tri_list.append(new_tri)\n tri_list.append(new_tri_2)\n\n return tri_list", "def N_POINTS(self) -> int:\n try:\n with self.fs.open(\n self.get_url().replace(\".\" + self.erddap.response, \".ncHeader\")\n ) as of:\n ncHeader = of.read().decode(\"utf-8\")\n lines = [line for line in ncHeader.splitlines() if \"row = \" in line][0]\n return int(lines.split(\"=\")[1].split(\";\")[0])\n except Exception:\n pass", "def get_n(self):\n return np.append([self.n_init],[s.n for s in self.surfaces])", "def inithr(_filename):\n # Open file provided\n _file = open(_filename)\n # Create empty array to hold data\n _data = np.zeros((1, 3), dtype=float)\n\n # Iterate through the file line by line\n for _line in _file:\n # Split each line into constituent values\n _x = _line.split()\n # Append data array with each value, converted to float, convert parallax angle to distance\n _data = np.append(_data, np.array([float(_x[1]), float(_x[2]), (1 / float(_x[3]))], ndmin=2), axis=0)\n\n # Iterate through data array\n for _row in _data:\n np.seterr(divide='ignore')\n # Convert magnitude to luminosity\n _row[0] = _row[0] - 5 * (np.log10(_row[2]) - 1)\n # Convert B-V colour to temperature\n _row[1] = 4600 * ((1 / (0.92 * _row[1] + 1.7)) + 1 / (0.92 * _row[1] + 0.62))\n\n # Delete first empty row\n _data = np.delete(_data, 0, axis=0)\n\n # Return parsed data\n return _data", "def read_file(file):\n if opts.input_type == 'fits':\n data = fileio.read_fits(file)\n else:\n data = fileio.read_ascii(file)\n c_id = data[0,:]\n g_num = np.array(range(len(c_id)), dtype = 'int')\n g_id = data[3,:]\n g_ra = np.array(data[4,:], dtype = 'float')\n g_dec = np.array(data[5,:], dtype = 'float')\n g_z = np.array(data[6,:], dtype = 'float')\n return c_id, g_num, g_id, g_ra, g_dec, g_z", "def bufcount(filename):\n\timport gzip\n\tif filename.split('.')[-1] in ['gz','gzip']: f = gzip.open(filename)\n\telse: f = open(filename)\n\tlines = 0\n\tbuf_size = 1024 * 1024\n\tread_f = f.read # loop optimization\n\t\n\tbuf = read_f(buf_size)\n\twhile buf:\n\t\tlines += buf.count('\\n')\n\t\tbuf = read_f(buf_size)\n\t\tf.close\n\treturn lines", "def part2(filename: str) -> int:\n data = first_line(filename)\n data = json.loads(data)\n return total_nums_no_red(data)", "def compute_surface_area(self):\n return np.sum(self._find_triangle_areas())", "def part_1() -> int:\n initial_input = _load_input()\n rows = len(initial_input)\n cols = len(initial_input[0])\n\n input = initial_input.copy()\n total_glow_count = 0\n\n for _ in range(100):\n flashed = list()\n for row in range(rows):\n for col in range(cols):\n coords = [[col, row]]\n new_input, glow_count = _get_glow_counts(coords, input, flashed)\n input = new_input\n total_glow_count += glow_count\n\n return total_glow_count", "def get_data(data_file_path):\n data_file = open(data_file_path, 'r').readlines()\n data = []\n n = -1\n dim = -1\n for i in range(len(data_file)):\n line_elems = [float(x) for x in data_file[i].split()]\n if i == 0:\n n = int(line_elems[0])\n dim = int(line_elems[1])\n else:\n data.append(np.array(line_elems))\n return data, n, dim", "def get_faces_nr(self):\r\n\r\n logger.debug('Getting number of faces in each frame')\r\n\r\n if len(self.tracked_faces) == 0:\r\n\r\n # Try to load YAML file\r\n\r\n if os.path.exists(self.track_path):\r\n\r\n print 'Loading YAML file with tracking results'\r\n logger.debug('Loading YAML file with tracking results')\r\n\r\n with open(self.track_path) as f:\r\n\r\n self.tracked_faces = yaml.load(f)\r\n\r\n print 'YAML file with tracking results loaded'\r\n logger.debug('YAML file with tracking results loaded')\r\n\r\n else:\r\n\r\n print 'Warning! No tracking results found!'\r\n logger.warning('No tracking results found!')\r\n\r\n return\r\n\r\n self.faces_nr = {}\r\n\r\n for segment_dict in self.tracked_faces:\r\n\r\n frame_list = segment_dict[c.FRAMES_KEY]\r\n\r\n for frame_dict in frame_list:\r\n\r\n frame_name = frame_dict[c.SAVED_FRAME_NAME_KEY]\r\n\r\n if frame_name in self.faces_nr:\r\n\r\n self.faces_nr[frame_name] += 1\r\n\r\n else:\r\n\r\n self.faces_nr[frame_name] = 1\r\n\r\n # Save YAML file\r\n\r\n utils.save_YAML_file(self.faces_nr_path, self.faces_nr)", "def readprimitive(f): \n \n ## read in lines from input file and ignore blank lines and comment lines\n lines = [line.rstrip() for line in f if line.rstrip() if line[0] != '#']\n\n # a1,a2,a3\n A = np.array([[float(lines[0].split()[0]),float(lines[0].split()[1]),float(lines[0].split()[2])],\n [float(lines[1].split()[0]),float(lines[1].split()[1]),float(lines[1].split()[2])],\n [float(lines[2].split()[0]),float(lines[2].split()[1]),float(lines[2].split()[2])]]).T\n \n # number of basis atoms\n num_basis = int(lines[3].split()[0]) \n\n # basis atom positions in unit cell\n unitcell_pos = []\n for i in range(num_basis): \n unitcell_pos.append([float(lines[4+i].split()[0]),float(lines[4+i].split()[1]),float(lines[4+i].split()[2])]) \n \n return (A,unitcell_pos)", "def part1(filename: str) -> int:\n data = first_line(filename)\n data = json.loads(data)\n return total_nums(data)", "def obtener_cantidad_vertices(self):\n return len(self.vertices.keys())", "def get_counts(filename, alphabet, kmin, kmax):\n # get the list of kmers to count with length between kmin and kmax\n kmers_list = get_all_possible_kmers(alphabet, kmin, kmax)\n # initialyze the counter with all possible kmer with length\n # between kmin and kmax with zero counts\n counter = Counter(dict([(km, 0) for km in kmers_list]))\n # open and read in the kmers/string in the file\n with gzip.open(filename, 'rt') as fh:\n # iterates through the strings\n for line in fh:\n # make the adjustments int the strings\n kmer = line.replace('\\n', '')\n # check if kmer/string is in the counter\n if kmer in counter:\n # if kmer is in add 1 other wise keep the zero count\n counter[kmer] += 1\n return counter", "def load_verts(file_data, headers, scale_factor):\n\n\n def vert_from_pack(vert_data):\n return (\n (vert_data[0] * scale_factor, vert_data[1] * scale_factor, vert_data[2] * scale_factor,), #XYZ\n (vert_data[3], vert_data[4],), #UV1\n (vert_data[5], vert_data[6],), #UV2\n (vert_data[7], vert_data[8], vert_data[9],), #Normal\n (vert_data[10], vert_data[11], vert_data[12], vert_data[13],), #RGBA\n )\n\n vert_offset, vert_length = headers[10]\n vert_chunk = Struct(\"3f2f2f3f4B\") \n vert_size = vert_chunk.size\n vert_count = int(vert_length / vert_size)\n\n print (\"Found {} vertices\".format(vert_count))\n\n vertices = []\n\n for current_vert_idx in range(vert_count):\n vert_file_position = vert_offset + current_vert_idx * vert_size\n current_vert = vert_chunk.unpack(file_data[vert_file_position : vert_file_position+vert_size])\n vertices.append(vert_from_pack(current_vert))\n\n return vertices", "def get_number_of_letters(self):\n filename = f'{self.path}/{self.filename}'\n file = open(filename, 'r', encoding='utf-8')\n \"\"\"Count number of lettes without digits, non letter characters, without xml tags\"\"\"\n data = file.read()\n data = re.sub('<.*?binary.*?>*<.*?binary.*?>',' ', data)\n data = re.sub('\\\\s\\\\s*', '', (re.sub('\\\\W|\\\\d', ' ', re.sub('<.*?>', '', data))))\n let_count = len(data)\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'number_of_letters', let_count)\n print(datetime.now(), '-', 'number_of_letters for', self.filename, 'calculated =', let_count)\n return None" ]
[ "0.78216827", "0.65307426", "0.64875203", "0.6224915", "0.6100354", "0.60187995", "0.59512895", "0.5924314", "0.59203315", "0.5906817", "0.59059787", "0.5882943", "0.5867339", "0.585497", "0.58549577", "0.5812365", "0.57997227", "0.57795846", "0.57746047", "0.57551175", "0.5748937", "0.5748146", "0.5725021", "0.5702728", "0.5698386", "0.5691206", "0.56838804", "0.56726533", "0.5660846", "0.5656267", "0.5645008", "0.56413954", "0.56186074", "0.56131804", "0.5612977", "0.5591385", "0.55882674", "0.5583263", "0.55814636", "0.55635816", "0.55606204", "0.55409354", "0.5535321", "0.55073744", "0.55027145", "0.5488749", "0.5487895", "0.54793304", "0.54753", "0.5460062", "0.54573315", "0.54482156", "0.54470176", "0.54455364", "0.5443752", "0.5437656", "0.5428198", "0.5427783", "0.541538", "0.54097253", "0.5398149", "0.53920686", "0.5391353", "0.5389164", "0.5385579", "0.53842545", "0.5364776", "0.5356398", "0.5355718", "0.53523314", "0.5346526", "0.53442633", "0.53398955", "0.5333428", "0.5324331", "0.5324294", "0.53221965", "0.5320378", "0.5317432", "0.5317432", "0.5317432", "0.5317432", "0.5309065", "0.5297589", "0.529438", "0.52812845", "0.5279945", "0.5278592", "0.5276384", "0.52760696", "0.527073", "0.52688897", "0.526617", "0.5262776", "0.52577895", "0.52572966", "0.52495706", "0.5245097", "0.52390534", "0.5237486" ]
0.6084134
5
Open a PCAP, seek to a packet offset, then get all packets belonging to the same connection
def packets_for_stream(fobj, offset): pcap = dpkt.pcap.Reader(fobj) pcapiter = iter(pcap) ts, raw = pcapiter.next() fobj.seek(offset) for p in next_connection_packets(pcapiter, linktype=pcap.datalink()): yield p
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_packets(pcap):\n\n # For each packet in the pcap process the contents\n for timestamp, buf, hdr_len in pcap:\n \n # Unpack the Ethernet frame (mac src/dst, ethertype)\n eth = dpkt.ethernet.Ethernet(buf)\n # print('Ethernet Frame: ', mac_addr(eth.src), mac_addr(eth.dst), eth.type)\n\n # Make sure the Ethernet data contains an IP packet\n if not isinstance(eth.data, dpkt.ip.IP):\n # print('Non IP Packet type not supported %s\\n' % eth.data.__class__.__name__)\n continue\n\n # Now unpack the data within the Ethernet frame (the IP packet)\n # Pulling out src, dst, length, fragment info, TTL, and Protocol\n ip = eth.data\n\n # Pull out fragment information (flags and offset all packed into off field, so use bitmasks)\n # do_not_fragment = bool(ip.off & dpkt.ip.IP_DF)\n # more_fragments = bool(ip.off & dpkt.ip.IP_MF)\n # fragment_offset = ip.off & dpkt.ip.IP_OFFMASK\n\n # Print out the info\n # print('IP: %s -> %s (len=%d ttl=%d DF=%d MF=%d offset=%d)\\n' % \\\n # (inet_to_str(ip.src), inet_to_str(ip.dst), ip.len, ip.ttl, do_not_fragment, more_fragments, fragment_offset)) \n\n pkt = Packet(timestamp, buf, hdr_len)\n\n if ip.p == dpkt.ip.IP_PROTO_TCP or ip.p == dpkt.ip.IP_PROTO_UDP: \n # all flow\n flow = Flow(ip.src, ip.dst, ip.data.sport, ip.data.dport, ip.p)\n if flow not in all_flows:\n all_flows[flow] = [pkt]\n else:\n x = len(all_flows[flow]) - 1\n if x < 0:\n all_flows[flow].append(pkt)\n else:\n if time_diff(all_flows[flow][x].timestamp, timestamp) <= 5400: #90mins\n all_flows[flow].append(pkt)\n\n if ip.p == dpkt.ip.IP_PROTO_TCP: \n # TCP flow\n flow = Flow(ip.src, ip.dst, ip.data.sport, ip.data.dport, ip.p)\n if flow not in tcp_flows:\n tcp_flows[flow] = [pkt]\n else:\n x = len(tcp_flows[flow]) - 1\n if x < 0:\n tcp_flows[flow].append(pkt)\n else:\n if time_diff(tcp_flows[flow][x].timestamp, timestamp) <= 5400:\n tcp_flows[flow].append(pkt)\n all_host_pairs(pkt, ip)\n elif ip.p == dpkt.ip.IP_PROTO_UDP:\n # UDP flow\n flow = Flow(ip.src, ip.dst, ip.data.sport, ip.data.dport, ip.p)\n if flow not in udp_flows:\n udp_flows[flow] = [pkt]\n else:\n x = len(udp_flows[flow]) - 1\n if x < 0:\n udp_flows[flow].append(pkt)\n else:\n if time_diff(udp_flows[flow][x].timestamp, timestamp) <= 5400:\n udp_flows[flow].append(pkt)\n else:\n continue\n\n print(\"Number of All flows: %d | Number of TCP flows: %d | Number of UDP flows: %d\" % (len(all_flows), len(tcp_flows), len(udp_flows)))\n\n # -- Flow Duration\n for f in all_flows:\n size = len(all_flows[f])\n if size >= 2:\n all_flow_dur.append(time_diff(all_flows[f][0].timestamp, all_flows[f][size-1].timestamp))\n \n for f in tcp_flows:\n size = len(tcp_flows[f])\n if size >= 2:\n tcp_flow_dur.append(time_diff(tcp_flows[f][0].timestamp, tcp_flows[f][size-1].timestamp))\n \n for f in udp_flows:\n size = len(udp_flows[f])\n if size >= 2:\n udp_flow_dur.append(time_diff(udp_flows[f][0].timestamp, udp_flows[f][size-1].timestamp))\n\n print \"lens: \", len(all_flow_dur), len(tcp_flow_dur), len(udp_flow_dur)\n\n # -- Flow Size\n for f in all_flows:\n f_bytes = 0\n size = len(all_flows[f])\n all_flow_size_pkt.append(size)\n for p in all_flows[f]:\n f_bytes += p.length\n all_flow_size_byte.append(f_bytes)\n \n for f in tcp_flows:\n f_bytes = 0\n f_overhead = 0\n size = len(tcp_flows[f])\n tcp_flow_size_pkt.append(size)\n for p in tcp_flows[f]:\n f_bytes += p.length\n f_overhead += 18 + 20 #+ tcp_hdr\n tcp_flow_size_byte.append(f_bytes)\n if f_bytes == 0:\n f_bytes = 9999\n tcp_flow_size_overhead.append(f_overhead/float(f_bytes))\n \n for f in udp_flows:\n f_bytes = 0\n size = len(udp_flows[f])\n udp_flow_size_pkt.append(size)\n for p in udp_flows[f]:\n f_bytes += p.length\n udp_flow_size_byte.append(f_bytes)\n\n # -- Inter-packet Arrival time\n for f in all_flows:\n for i in range(len(all_flows[f])-1):\n all_flow_time.append(time_diff(all_flows[f][i].timestamp, all_flows[f][i+1].timestamp))\n\n for f in tcp_flows:\n for i in range(len(tcp_flows[f])-1):\n tcp_flow_time.append(time_diff(tcp_flows[f][i].timestamp, tcp_flows[f][i+1].timestamp))\n\n for f in udp_flows:\n for i in range(len(udp_flows[f])-1):\n udp_flow_time.append(time_diff(udp_flows[f][i].timestamp, udp_flows[f][i+1].timestamp))\n\n # -- TCP State\n for f in tcp_flows:\n size = len(tcp_flows[f])\n last_pkt = tcp_flows[f][size-1]\n tcp = dpkt.ethernet.Ethernet(last_pkt.buf).data.data\n \n if (tcp.flags & dpkt.tcp.TH_SYN) != 0:\n f.state = 'Request'\n elif (tcp.flags & dpkt.tcp.TH_RST) != 0:\n f.state = 'Reset'\n elif (tcp.flags & dpkt.tcp.TH_FIN) != 0 and (tcp.flags & dpkt.tcp.TH_ACK) != 0:\n f.state = 'Finished'\n elif time_diff(tcp_flows[f][0].timestamp, tcp_flows[f][size-1].timestamp) <= 300:\n f.state = 'Ongoing'\n elif time_diff(tcp_flows[f][0].timestamp, tcp_flows[f][size-1].timestamp) > 300 \\\n and (tcp.flags & dpkt.tcp.TH_RST) == 0 and (tcp.flags & dpkt.tcp.TH_FIN) == 0:\n f.state = 'Failed'\n\n show_cdf_graphs()", "def pcap(self, fname):\n\t\tcap = pcapy.open_offline(fname)\n\n\t\tself.map = []\n\t\tself.p = PacketDecoder()\n\t\tcap.loop(0, self.process)\n\n\t\treturn self.map", "def _parse(self):\n \n # HUA determine the host ip address\n # read 20 packages and set the most frequent one\n ips_dict = {}\n count = 0\n for raw_packet in self.raw_packets:\n if count > 100: break\n ethernet = Ethernet(raw_packet[0:14])\n if(ethernet.type != 'IP'):\n continue\n ip = Ip(raw_packet[14:])\n if(ip.protocol != 'TCP') :\n continue\n if(ip.src not in ips_dict):\n ips_dict[ip.src] = 0\n ips_dict[ip.src] += 1\n if(ip.dst not in ips_dict):\n ips_dict[ip.dst] = 0\n ips_dict[ip.dst] += 1\n # get the most frequent one\n max_appear = 0\n ip = None\n for key, value in ips_dict.items():\n if value > max_appear:\n ip = key\n max_appear = value\n\n global _device_ip\n if not self.enableFilter or not _device_ip:\n _device_ip = ip\n\n global _tcp_buf\n _tcp_buf = {}\n number = 0\n self.begin_ts = self.packet_headers[-1]['ts']\n rcount = 0\n for raw_packet in self.raw_packets:\n pcap_packet = Pcap_packet()\n pcap_packet.pcap_num = rcount#number # add one to be consistent with wireshark\n pcap_packet.top_layer = 1\n pcap_packet.ethernet = Ethernet(raw_packet[0:14])\n \n #skip the packet that is not ip packet\n \n rcount += 1\n\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n\n pcap_packet.top_layer = 2\n pcap_packet.ip = Ip(raw_packet[14:])\n\n\n\n\n # just collect the packets between \n \n if self.enableFilter and not (pcap_packet.ip.src == _device_ip and pcap_packet.ip.dst == SERVER_IP) \\\n and not (pcap_packet.ip.dst == _device_ip and pcap_packet.ip.src == SERVER_IP):\n #print \"Ignore ip not ok\"\n continue\n '''\n if rcount < 10 or rcount > 2600:\n print 'rcount %d, time %d ---: %f' % (rcount, number, self.packet_headers[rcount - 1]['ts'] - self._ts_base)\n '''\n \n self.pcap_packets.append(pcap_packet)\n \n\n #skip the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n\n\n pcap_packet.top_layer = 3\n pcap_packet.tcp = Tcp(pcap_packet.ip, number)\n\n if pcap_packet.ip.src == _device_ip:\n pcap_packet.tcp.direction = \"out\"\n else:\n pcap_packet.tcp.direction = \"in\"\n\n\n #dispatch the tcp into tcp streams\n self._add_pkt_into_tcp_stream(pcap_packet, number)\n \n #reassemble tcp packet\n self._tcp_reassemble(pcap_packet.pcap_num, pcap_packet.ip.src, pcap_packet.ip.dst, pcap_packet.tcp)\n number += 1\n #endof for\n #flush the tcp_buf, other wise it will lose some http response\n for sockets in _tcp_buf.keys():\n self._tcp_flush(sockets)\n del _tcp_buf[sockets]", "def test():\n with open('univ1_pt8.pcap', 'rb') as f: #univ1_trace/univ1_pt8\n pcap = Reader(f)\n print_packets(pcap)\n # top_flows()\n host_pairs()", "def find_dac():\n\n\ts = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\ts.bind((\"0.0.0.0\", 7654))\n\n\twhile True:\n\t\tdata, addr = s.recvfrom(1024)\n\t\tbp = BroadcastPacket(data)\n\t\t\n\t\tprint \"Packet from %s: \" % (addr, )\n\t\tbp.dump()", "def parse_packets(pcap):\n # For each packet in the pcap process the contents\n flow_Info = []\n times = 0\n for timestamp, buf in pcap:\n times += 1\n tmp_flow_Info = {}\n\n # Unpack the Ethernet frame (mac src/dst, ethertype)\n eth = dpkt.ethernet.Ethernet(buf)\n # Unpack the data whthin the Ethernet frame (the IP packet)\n ip = eth.data\n\n # if protocol(ip.p) is not UDP(17) ,skip this packet\n if ip.p != 17:\n continue\n\n udp = ip.data\n # Temp_data = parse_data(eth.data.udp.data)\n # Filter CoAP by port\n if(udp.sport != 5683 or udp.dport != 5683):\n continue\n\n str_udp_data = parse_data(eth.data.udp.data)\n # skip packets of Non_confirmable\n if str_udp_data[0] == '5': \n continue\n\n cycle = 0\n index = 0\n Udp_data = []\n \n len_str_udp_data = len(str_udp_data)\n while cycle < (len_str_udp_data//3+1):\n # Udp_data.append(int('0x'+Str_Udp_data[index:index + 2], 16))\n Udp_data.append(int('0x' + str_udp_data[index:index + 2], 16))\n cycle += 1\n index += 3\n tmp_flow_Info['udp_data'] = (Udp_data)\n\n # confirmable or ack\n tmp_flow_Info['Coap_type'] = str_udp_data[0]\n #print(str_udp_data) \n \n # skip space and get \"Message ID\" \n HexMide = str_udp_data[6:8] + str_udp_data[9:11]\n tmp_flow_Info['Mid'] = int('0x'+HexMide, 16)\n\n tmp_flow_Info['Timestamp'] = str(datetime.datetime.fromtimestamp(timestamp))\n # print('Ethernet Frame: ', mac_addr(eth.src), mac_addr(eth.dst), eth.type)\n tmp_flow_Info['src'] = inet_to_str(ip.src)\n tmp_flow_Info['dst'] = inet_to_str(ip.dst)\n\n tmp_flow_Info['sport'] = udp.sport\n tmp_flow_Info['dport'] = udp.dport\n flow_Info.append(tmp_flow_Info)\n\n return flow_Info", "def pull(self):\n\n # For each packet in the pcap process the contents\n for item in self.input_stream:\n\n # Print out the timestamp in UTC\n print('%s -' % item['timestamp'], end='')\n\n # Transport info\n if item['transport']:\n print(item['transport']['type'], end='')\n\n # Print out the Packet info\n packet_type = item['packet']['type']\n print(packet_type, end='')\n packet = item['packet']\n if packet_type in ['IP', 'IP6']:\n # Is there domain info?\n if 'src_domain' in packet:\n print('%s(%s) --> %s(%s)' % (net_utils.inet_to_str(packet['src']), packet['src_domain'],\n net_utils.inet_to_str(packet['dst']), packet['dst_domain']), end='')\n else:\n print('%s --> %s' % (net_utils.inet_to_str(packet['src']), net_utils.inet_to_str(packet['dst'])), end='')\n else:\n print(str(packet))\n\n # Only include application if we have it\n if item['application']:\n print('Application: %s' % item['application']['type'], end='')\n print(str(item['application']), end='')\n\n # Just for newline\n print()", "def _parse(self):\n \n global _tcp_buf\n _tcp_buf = {}\n number = 1\n for raw_packet in self.raw_packets:\n pcap_packet = Pcap_packet()\n self.pcap_packets.append(pcap_packet)\n pcap_packet.pcap_num = number\n number += 1\n pcap_packet.top_layer = 1\n pcap_packet.ethernet = Ethernet(raw_packet[0:14])\n \n #skip the packet that is not ip packet\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n \n pcap_packet.top_layer = 2\n pcap_packet.ip = Ip(raw_packet[14:])\n \n #skip the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n pcap_packet.top_layer = 3\n pcap_packet.tcp = Tcp(pcap_packet.ip.packet[pcap_packet.ip.header_len: ])\n \n #skip the packets that is not http packet\n if (pcap_packet.tcp.src_port != 80 and pcap_packet.tcp.dst_port != 80):\n continue\n \n #dispatch the tcp into tcp streams\n self._add_pkt_into_tcp_stream(pcap_packet, pcap_packet.pcap_num)\n \n #reassemble tcp packet\n self._tcp_reassemble(pcap_packet.pcap_num, pcap_packet.ip.src, pcap_packet.ip.dst, pcap_packet.tcp)\n #endof for\n #flush the tcp_buf, other wise it will lose some http response\n for sockets in _tcp_buf.keys():\n self._tcp_flush(sockets)\n del _tcp_buf[sockets]", "def recv_raw(self) -> Dict[str, Any]:\n while True:\n try:\n packet = self.__recv_frame()\n except UnknownPacketException:\n continue\n\n # Hack for sniffing on localhost\n if packet['address']['interface'] == 'lo' and packet['address']['type'] != 4:\n continue\n\n if self.address and self.port:\n if (\n packet['ip_header']['source_address'] == self.address and\n packet['tcp_header']['source_port'] == self.port\n ):\n return packet\n if (\n packet['ip_header']['destination_address'] == self.address and\n packet['tcp_header']['destination_port'] == self.port\n ):\n return packet\n elif self.address:\n if (\n packet['ip_header']['source_address'] == self.address or\n packet['ip_header']['destination_address'] == self.address\n ):\n return packet\n elif self.port:\n if (\n packet['tcp_header']['source_port'] == self.port or\n packet['tcp_header']['destination_port'] == self.port\n ):\n return packet\n else:\n return packet", "def extract_tstat_data(pcap_filepath):\n connections = {}\n conn_id = 0\n print('We are here')\n with co.cd(os.path.basename(pcap_filepath[:-5])):\n with co.cd(os.listdir('.')[0]):\n print(connections)\n # Complete TCP connections\n connections, conn_id = extract_tstat_data_tcp_complete('log_tcp_complete', connections, conn_id)\n # Non complete TCP connections (less info, but still interesting data)\n connections, conn_id = extract_tstat_data_tcp_nocomplete('log_tcp_nocomplete', connections, conn_id)\n\n return connections", "def next_packet(filename, memorymap=True):\n with open(filename, 'rb') as f:\n \n #memory map the file if necessary (prob requires 64 bit systems)\n _file = f\n if memorymap:\n _file = mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ)\n \n while True:\n packet = _file.read(TS.PACKET_SIZE)\n if packet:\n # first byte SHOULD be the sync byte\n # but if it isn't find one.\n if packet[0] != TS.SYNC_BYTE:\n start_byte = 0\n print packet[0]\n for i in range(start_byte, TS.PACKET_SIZE):\n if packet[i] == TS.SYNC_BYTE:\n start_byte = i\n break\n # didn't find a new start? FAIL\n if start_byte == 0:\n raise Exception(\"failure to find sync byte in ts packet size.\")\n continue\n remainder = _file.read(TS.PACKET_SIZE - start_byte)\n packet = packet[start_byte:] + remainder\n yield packet\n else:\n break", "def _packet_in(self, ev):\n\n dp = ev.msg.datapath\n ofp = dp.ofproto\n parser = dp.ofproto_parser\n match = ev.msg.match\n\n ##SNDCP packet with multiple fragments recieved - print warning, send ICMP fragmentation needed\n ##TODO: Not WOrking correctly\n ## File \"/usr/local/lib/python2.7/dist-packages/ryu/ofproto/ofproto_v1_3_parser.py\", line 746, in __getitem__\n ## return dict(self._fields2)[key]\n ## KeyError: 'udp_dst'\n\n # if (match['eth_type'] == 0x0800 and match['ip_proto'] == inet.IPPROTO_UDP\n # and match['udp_dst'] == VGSN_PORT and match['sndcp_first_segment'] == 1\n # and match['sndcp_more_segments'] == 1):\n # _icmp_send(dp,match['in_port'],match['ipv4_dst'],match['ipv4_src'],match['eth_dst'],match['eth_src'],icmp_type=3,icmp_code=4)\n # LOG.warning('WARNING: Device with IP: '+match['ipv4_src']+' sent fragmented sndcp packet')\n # return\n\n ##ARP request recieved - send 'I'm here' response\n if match['eth_type'] == 0x0806 and match['arp_op'] == 1:\n LOG.debug(\"ARP request accepted\")\n _arp_send(dp=dp, port_out=match['in_port'], arp_code=2, eth_dst=match['eth_src'], eth_target=match['arp_sha'],\n ip_target=match['arp_spa'], ip_sender=match['arp_tpa'])\n LOG.debug('Reply to '+match['arp_spa'] +': Host '+match['arp_tpa']+' is at forwarder '+str(dp.id) + \" with ethX source MAC address\")\n return\n\n ##ARP response with target_ip==DISCOVERY_ARP_IP recieved - we found APN\n #\n # FIXED: All ARP responses are replied, regardless of the target IP\n #\n # TODO : At this point only ARPs belonging to the APNs networks subnet should\n # be answered\n if match['eth_type'] == 0x0806 and match['arp_op'] == 2:\n LOG.debug('TUNNEL MNGR: ARP response with target APN discovery IP recieved at controller, processing for APN extraction')\n pkt = packet.Packet(array.array('B', ev.msg.data))\n arp_pkt=pkt.get_protocol(arp.arp)\n apn_ip = arp_pkt.src_ip\n apn_mac= arp_pkt.src_mac\n port = match['in_port']\n\n ##Search for apn in APN_POOL to add mac addr. and update topology\n for sApn in APN_POOL:\n if sApn.ip_addr == apn_ip:\n LOG.debug('Recieved ARP response was from ' + sApn.name + ' APN')\n sApn.eth_addr = apn_mac\n sApn.port = port\n sApn.dpid = dp.id\n # Links towards APNs will not be measured\n topo.add_link(dp.id,str(sApn.name),port)\n topo.add_link(str(sApn.name),dp.id,0)\n topo.reload_topology()\n LOG.debug('TUNNEL MNGR: APN '+str(sApn.name)+' found at forwarder: '+str(dp.id)+', port: '+str(port) + ' by ARP search')\n\n ##Add special rules to edge forwarder\n self.on_edge_inet_dp_join(dp, port, sApn)\n\n # FIX: We do not handle bss as a special APN\n # For greater extensibility, BSS/UTRAN/LAN APNs (exit/enter) points\n # will be handled in a generic manner\n #\n ##Create MAC-tunnels between APN and all BSSs\n #for bss in BSS_POOL:\n # self.add_tunnel(bss,apn)\n #break\n\n ### WMNC: In this case, we are not making tunnels between\n # two types of ingress/egress point, but actually same type\n\n for dApn in APN_POOL:\n # we are cycling through all possible APNs, looking for different APN tupples\n # with filled HW addresses (already found by APN search)\n if sApn != dApn and dApn.eth_addr != None:\n LOG.debug('TUNNEL MNGR: Different APNs with filled HW address found, lets find out if there is tunnel between them')\n\n paths = False\n try:\n paths = nx.all_simple_paths(topo.DynamicGraph, source=sApn.name, target=dApn.name)\n except:\n LOG.debug('TUNNEL MNGR: No path between: ' + sApn.name + ' and ' + dApn.name + '. Retry when next APN discovered.')\n\n LOG.debug('TUNNEL MNGR: These are the paths between them (possible tunnels):')\n if paths:\n for path in paths:\n LOG.debug('TUNNEL MNGR: Calling add_plainMacTunnel for ' + sApn.name + ' and ' + dApn.name + ' with path: ' + str(path))\n self.add_plainMacTunnel(sApn, dApn, path)\n else:\n LOG.debug('TUNNEL MNGR: PATHS == 0 ????????????????')\n\n\n return\n\n ##ICMP echo with dst_ip==DISCOVERY_IP_DST recieved - new link between forwarders is up\n if match['eth_type'] == 0x0800 and match['ipv4_dst'] == DISCOVERY_IP_DST and match['ip_proto'] == 1:\n #LOG.debug('TOPO MNGR: ICMP echo recieved at controller, processing for link extraction or latency measurement')\n\n pkt = packet.Packet(array.array('B', ev.msg.data))\n\n ##Discovery pings carry information about sending datapath in payload of icmp packet\n ##these information are in Dictionary format, we parse the out with _icmp_parse_payload() method\n body = _icmp_parse_payload(pkt)\n neighbourDPID=body['dpid']\n neighbourPort=body['port_out']\n\n ## measurement\n ## currentClock moved way up to improve precision\n receivedClock=float(body['clock'])\n currentClock = time.clock()\n latency = currentClock - receivedClock\n\n currentDate = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n ##Update latency or add new edges to topology.\n if topo.DynamicGraph.has_edge(dp.id, neighbourDPID) and topo.DynamicGraph.has_edge(neighbourDPID, dp.id):\n topo.StaticGraph[neighbourDPID][dp.id]['pdv'] = topo.StaticGraph[neighbourDPID][dp.id]['lat'] - latency\n topo.StaticGraph[neighbourDPID][dp.id]['lat'] = latency\n topo.StaticGraph[neighbourDPID][dp.id]['upt'] = currentDate\n #topo.StaticGraph[neighbourDPID][dp.id]['upt'] = currentDate\n loss = self.loss_update(neighbourDPID, dp.id, currentDate)\n #LOG.debug('TOPO MNGR: Updating latency ' + str(latency) + ' and date ' + str(currentDate) + ' LOSS: ' + str(loss))\n topo.reload_topology()\n else:\n ## latency not correct for both directions when adding links\n ## update occurs on receive of next measurement packet from oposite direction\n topo.add_link(dp.id, neighbourDPID, ev.msg.match['in_port'], latency, currentDate)\n topo.add_link(neighbourDPID, dp.id, neighbourPort , latency, currentDate)\n LOG.debug('TOPO MNGR: Topology changed: New link between forwarder ID '+str(dp.id)+ ' via port ' + str(ev.msg.match['in_port'])\n +' and forwarder ID '+str(neighbourDPID)+ ' via port ' + str(neighbourPort) + ' was discovered.')\n\n topo.reload_topology()\n ## retry to create tunnels\n ## find better paths between APNs\n for sApn in APN_POOL:\n for dApn in APN_POOL:\n if sApn != dApn:\n LOG.debug('TOPO MNGR: Topology changed: trying to re-build inactive tunnel between:' + sApn.name + ' and ' + dApn.name)\n paths = False\n try:\n paths = nx.all_simple_paths(topo.DynamicGraph, source=sApn.name, target=dApn.name)\n except:\n LOG.debug('No path between: ' + sApn.name + ' and ' + dApn.name + '. Retry when next fwd connects.')\n\n LOG.debug('TUNNEL MNGR: These are the paths between them (possible tunnels):')\n if paths:\n for path in paths:\n LOG.debug('TUNNEL MNGR: Calling add_plainMacTunnel for ' + sApn.name + ' and ' + dApn.name + ' with path: ' + str(path))\n self.add_plainMacTunnel(sApn, dApn, path)\n else:\n LOG.debug('TUNNEL MNGR: PATHS == 0 ????????????????')\n return\n\n # flow of last resort (process for routing)\n if match['eth_type'] == 0x0800:\n # LOG.debug('*****************Flow of last resort matched(plain IP), process for routing********'\n # + ' match[ipv4_dst]: ' + str(match['ipv4_dst'] + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(match['ip_dscp'])))\n ## Not very proud of myself, but it will do the trick\n ## Turbo lumberjack routing logic\n ## TODO: Implement a longest prefix match routing\n\n candidates = []\n\n for source, destination, ip_dscp in routesList:\n if ((source == match['ipv4_dst'] and destination == match['ipv4_src']) or (source == match['ipv4_src'] and destination == match['ipv4_dst'])) and ip_dscp == match['ip_dscp']:\n # LOG.debug('ROUTING: route source: ' + str(source) + 'destination: ' + str(destination)\n # + ' match[ipv4_dst]: ' + str(match['ipv4_dst'])\n # + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(ip_dscp)\n # + ' already exists, aborting addition of new route')\n return\n\n for tunnel in TUNNELS:\n if (tunnel.sApn.ip_addr == match['ipv4_dst'] and tunnel.dApn.ip_addr == match['ipv4_src']) or (tunnel.sApn.ip_addr == match['ipv4_src'] and tunnel.dApn.ip_addr == match['ipv4_dst']):\n LOG.debug('ROUTING: Tunnel candidate found in list of tunnels. Adding tunnel path: ' + str(tunnel.po_edges) + ' to candidates.')\n candidates.append(tunnel)\n\n trafficClass = self.TC_selection(match['ip_dscp'])\n\n if len(candidates) == 0:\n LOG.debug('ROUTING: match[ipv4_dst]: ' + str(match['ipv4_dst'])\n + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(match['ip_dscp']))\n LOG.debug('ROUTING: ERROR, NO feasible tunnels for such route.')\n return\n\n LOG.debug('Looking for tunnels: DST_IP: ' + match['ipv4_dst'] + ' SRC_IP: ' + match['ipv4_src'] + ' DSCP: ' + str(match['ip_dscp']) + '(traffic class: ' + str(trafficClass) + ')' + ' Incoming from FWD: ' + str(dp.id))\n tunnel = self.tunnel_selection(trafficClass, candidates)\n LOG.debug('TE MNGR: Selected tunnel Path out: ' + str(tunnel.path_out_str) + ' meter_id: ' + str(tunnel.meter_id))\n\n dscp = match['ip_dscp']\n\n ## meter_id\n ## 2,4,6,8,10 = 500kbps, 1,3,5,7,9 = 1000kbps ...\n ## 0 = 100Gbps\n meter_id = tunnel.meter_id\n\n #\n # FIXME: incomplete set of rules installed on LAN Access forwarders\n # TODO : Philosophy of table IDs should be clarified, as now it total mess!!!\n # TODO : this should be done only once, from that moment, all user plane packets\n # should travelse only forwarder and should not be sent to controller\n\n\n\n #WAY OUT\n dp = dpset.get(tunnel.sApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_type=0x0800, ipv4_dst=tunnel.dApn.ip_addr, ip_dscp=dscp)\n actions = [parser.OFPActionSetField(eth_src=tunnel.tid_in), parser.OFPActionSetField(eth_dst=tunnel.tid_out)]\n inst = [parser.OFPInstructionGotoTable(MAC_TUNNEL_TABLE), parser.OFPInstructionMeter(meter_id), parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=100, match=match, instructions=inst, table_id=INGRESS_TABLE)\n dp.send_msg(req)\n\n LOG.debug('ROUTING: Installing flow ON WAY OUT to forwarderID: ' + str(dp.id) + ',Table: ' + str(INGRESS_TABLE) + ' DP ID: ' + str(tunnel.dApn.dpid) + ' Tunel dApn IP addr: ' + str(tunnel.dApn.ip_addr) + ' Tunnel ID: ' + str(tunnel.tid_out))\n\n dp = dpset.get(tunnel.dApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_dst=tunnel.tid_out)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.dApn.eth_addr), parser.OFPActionOutput(tunnel.path_out[-1].port_out)]\n inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=300, match=match, instructions=inst, table_id=ACCESS_ADAPTATION_TABLE_OUT)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY OUT to forwarderID: ' + str(dp.id) + ',Table: ' + str(ACCESS_ADAPTATION_TABLE_OUT) + ' DP ID: ' + str(tunnel.dApn.dpid)+ ' Tunel ID: ' + str(tunnel.tid_out)+ ' dApn ETH addr: ' + str(tunnel.dApn.eth_addr))\n\n #WAY IN\n dp = dpset.get(tunnel.dApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_type=0x0800, ipv4_dst=tunnel.sApn.ip_addr, ip_dscp=dscp)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.tid_in), parser.OFPActionSetField(eth_src=tunnel.tid_out)]\n inst = [parser.OFPInstructionGotoTable(MAC_TUNNEL_TABLE), parser.OFPInstructionMeter(meter_id), parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=100, match=match, instructions=inst, table_id = INGRESS_TABLE)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY IN to forwarderID: ' + str(dp.id) + ',Table: ' + str(INGRESS_TABLE) + ' DP ID: ' + str(tunnel.sApn.dpid) + ' Tunel dApn IP addr: ' + str(tunnel.sApn.ip_addr) + ' Tunnel ID: ' + str(tunnel.tid_in))\n\n\n dp = dpset.get(tunnel.sApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_dst=tunnel.tid_in)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.sApn.eth_addr), parser.OFPActionOutput(tunnel.path_in[-1].port_out)]\n inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=300, match=match, instructions=inst, table_id=ACCESS_ADAPTATION_TABLE_OUT)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY IN to forwarderID: ' + str(dp.id) + ',Table: ' + str(ACCESS_ADAPTATION_TABLE_OUT) + ' DP ID: ' + str(tunnel.sApn.dpid)+ ' Tunel ID: ' + str(tunnel.tid_in)+ ' sApn ETH addr: ' + str(tunnel.sApn.eth_addr))\n\n\n LOG.debug('ROUTING: Rules on access edge forwarders installed')\n LOG.debug('ROUTING: Adding route: DST_IP: ' + tunnel.dApn.ip_addr + ' SRC_IP: ' + tunnel.sApn.ip_addr + ' dscp: ' + str(dscp) + ' path out str: ' + tunnel.path_out_str )\n routesList.append( ( tunnel.sApn.ip_addr, tunnel.dApn.ip_addr, dscp) )\n\n parser = dp.ofproto_parser\n\n for dpid in LAN_TYPE_FORWARDERS:\n ## DUNNO why this rule with low priority still hits traffic which is also matched by rules with IP address matches\n ## Here I delete the rule, it is added on FWD when it connects to controoller\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dpid) + ' is a LAN edge forwarder, deleting rules')\n dp = dpset.get(dpid)\n priority = 2\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.mod_flow(dp, command=dp.ofproto.OFPFC_DELETE_STRICT,\n table_id=0, actions=actions,\n match=match, priority=priority)\n\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dp.id) + ' is a LAN edge forwarder, installing rules again :)')\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.add_flow(dp, 2, match, actions)", "def process(self):\n fp = open(self.pcap, \"rb\")\n pkts = dpkt.pcap.Reader(fp)\n if self.sip and self.dip and self.sp and self.dp:\n self.process_pkts(pkts)", "def filter(self):\n # outfile = open(self.newpcap, 'wb')\n # writer = dpkt.pcap.Writer(outfile)\n f = open(self.pcapfile, 'rb')\n packets = dpkt.pcap.Reader(f)\n\n for timestamp, buf in packets:\n eth = dpkt.ethernet.Ethernet(buf)\n if not isinstance(eth.data, dpkt.ip.IP): # 确保以太网数据包含一个IP数据包, Non IP Packet type not supported\n continue # 过滤空IP包\n ip = eth.data # 获取以太网帧(IP数据包)\n if not isinstance(ip.data, dpkt.tcp.TCP): # 在传输层中检查TCP\n continue\n tcp = ip.data # 获取tcp数据\n # print('-->TCP Data: ', repr(tcp))\n\n \"\"\" 过滤三次握手后的首包\"\"\"\n seq = self.seq_pattern.findall(repr(tcp))\n ack = self.ack_pattern.findall(repr(tcp))\n if not (seq or ack): # seq、ack必须有一个, 一真即真\n continue\n if ack:\n ack = ack[0]\n if seq:\n seq = seq[0]\n\n if not ack and seq: # 一次握手请求\n self.hash_table[seq] = {}\n self.stream_table[seq] = [buf]\n if ack and seq: # 二次、三次、交流包\n if str(int(ack) - 1) in self.hash_table.keys(): # 有一次握手记录\n number = str(int(ack) - 1)\n if 'second' not in self.hash_table[number].keys(): # 新增二次握手\n self.hash_table[number]['second'] = {'seq': seq, 'ack': ack}\n self.stream_table[number].append(buf) # 将二次握手添加到buf\n self.resp_relation[seq] = ack # 新增关系表\n\n # 存在二次握手记录, 看hash表有无第三次握手记录, 有就保存stream流\n # 基本就是traffic响应包了\n elif 'three' in self.hash_table[number].keys():\n if number not in self.stream_table.keys():\n self.stream_table[number] = []\n self.stream_table[number].append(buf)\n else:\n self.stream_table[number].append(buf)\n\n # ack-1没有对应的hash表, 可能是三次握手或traffic请求包\n elif str(int(seq) - 1) in self.hash_table.keys():\n number = str(int(seq) - 1)\n if 'second' not in self.hash_table[number]:\n pass\n elif 'three' not in self.hash_table[number]: # 三次包\n self.hash_table[number]['three'] = {'seq': seq, 'ack': ack}\n self.stream_table[number].append(buf)\n # 否则就是traffic包了\n else:\n if number not in self.stream_table.keys():\n self.stream_table[number] = []\n self.stream_table[number].append(buf)\n else:\n self.stream_table[number].append(buf)\n # traffic响应包\n elif str(int(seq) - 1) in self.resp_relation.keys():\n number = str(int(seq) - 1)\n second_ack = self.resp_relation[number]\n number = str(int(second_ack) - 1)\n if number not in self.stream_table.keys():\n self.stream_table[number] = []\n self.stream_table[number].append(buf)\n else:\n self.stream_table[number].append(buf)\n else:\n continue # seq不存在\n\n # outfile.close()\n f.close()", "def get_connections(capture):\n ip_dict = dict()\n for pkt in capture:\n\n if not hasattr(pkt, \"ip\") and not hasattr(pkt, \"ipv6\"):\n continue\n\n protocol = pkt.highest_layer\n\n tcp_dst_port = None\n tcp_src_port = None\n if hasattr(pkt, \"tcp\"):\n tcp_src_port = pkt.tcp.srcport\n tcp_dst_port = pkt.tcp.dstport\n\n if hasattr(pkt, \"ip\"):\n if pkt.ip.src.startswith(\"192.168.178\"):\n ip, dst = pkt.ip.src, pkt.ip.dst\n else:\n ip, dst = pkt.ip.dst, pkt.ip.src\n tcp_dst_port = tcp_src_port\n else:\n # TODO: how to discern src and dst in IPv6?\n ip, dst = pkt.ipv6.src, pkt.ipv6.dst\n\n ip = \"%s\" % ip\n dkey = (\n \"%s\" % protocol,\n int(tcp_dst_port) if tcp_dst_port else None,\n \"%s\" % dst\n )\n if ip not in ip_dict:\n ip_dict[ip] = {dkey: 1}\n else:\n ip_dict[ip][dkey] = ip_dict[ip].get(dkey, 0) + 1\n return ip_dict", "def process_pcap(self):\n fp = open(self.pcap, \"rb\")\n pkts = dpkt.pcap.Reader(fp)\n self.process_pkts(pkts)", "def handle_tcp(pkt, packets, i, start_point):\r\n src_port = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n dest_port = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n sequence_num = int(pkt[start_point:start_point+8], 16)\r\n start_point += 8\r\n acknowledgment = int(pkt[start_point:start_point+8], 16)\r\n start_point += 8\r\n data_offset = int(pkt[start_point], 16) * 4\r\n start_point += 2\r\n flags = pkt[start_point:start_point+2]\r\n flags_str = \"\"\r\n for f in flags:\r\n flags_str += str(format(int(f), '04b'))\r\n start_point += 2\r\n window_size = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n checksum_value = pkt[start_point:start_point+4]\r\n start_point += 4\r\n urgent_pointer = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n options = int((2 * packets[i][0][0] - start_point)/2)\r\n\r\n packets[i][2].append(src_port)\r\n packets[i][2].append(dest_port)\r\n packets[i][2].append(sequence_num)\r\n packets[i][2].append(acknowledgment)\r\n packets[i][2].append(data_offset)\r\n packets[i][2].append(flags_str)\r\n packets[i][2].append(window_size)\r\n packets[i][2].append(checksum_value)\r\n packets[i][2].append(urgent_pointer)\r\n packets[i][2].append(options)\r\n return packets", "def reader(self):\n while self.alive:\n try:\n data = self.serial.read_until(b'~')[:-1]\n packet = ethernet.Ethernet(data)\n if packet[icmp.ICMP]:\n packet[ethernet.Ethernet].dst_s = \"dc:a6:32:00:a7:8b\"\n packet[ip.IP].dst_s = \"192.168.1.35\"\n packet[icmp.ICMP].sum = b'0x1783'\n print(\"\\n\\n__________________RESPONSE FROM VISIBLE PI__________________\")\n print(packet)\n if data:\n self.write(packet.bin())\n except socket.error as msg:\n break\n self.alive = False", "def udp_iterator(pc):\n\tfor time,pkt in pc:\n\t\teth = dpkt.ethernet.Ethernet(pkt)\n\t\tif eth.type == dpkt.ethernet.ETH_TYPE_IP:\n\t\t ip = eth.data\n\t\t # if the IP protocol is UDP, process it further\n\t\t if ip.p == dpkt.ip.IP_PROTO_UDP :\n\t\t\tudp = ip.data\n\t\t\tyield( ip.src, udp.sport, ip.dst, udp.dport, udp.data )", "def read_and_store_pcap(file_name):\r\n file = open(file_name, \"rb\")\r\n global_header = file.read(24).hex()\r\n byte = file.read(16)\r\n packets = []\r\n bytes = []\r\n sizes = []\r\n while byte:\r\n packet_header = byte.hex()\r\n # parse the size for each packet\r\n size = struct.unpack(\"<L\", codecs.decode(str(packet_header[16:24]), \"hex\"))[0]\r\n sizes.append(size)\r\n # read the whole packet by its size from the bytes\r\n byte = file.read(size).hex()\r\n bytes.append(byte)\r\n byte = file.read(16)\r\n for size in sizes:\r\n packets.append(([size], [], []))\r\n i = 0\r\n\r\n for pkt in bytes:\r\n packets = handle_pkt_header(pkt, packets, i)\r\n packets, start_point = handle_ip_header(pkt, packets, i)\r\n protocol = packets[i][1][7]\r\n if protocol == 1:\r\n packets = handle_icmp(pkt, packets, i, start_point)\r\n elif protocol == 6:\r\n packets = handle_tcp(pkt, packets, i, start_point)\r\n elif protocol == 17:\r\n packets = handle_udp(pkt, packets, i, start_point)\r\n i += 1\r\n # print(packets)\r\n return packets", "def testParse(self):\n parser = pcap.PcapParser()\n storage_writer = self._ParseFile(['test.pcap'], parser)\n\n # PCAP information:\n # Number of streams: 96 (TCP: 47, UDP: 39, ICMP: 0, Other: 10)\n #\n # For each stream 2 events are generated one for the start\n # and one for the end time.\n\n self.assertEqual(storage_writer.number_of_events, 192)\n\n events = list(storage_writer.GetEvents())\n\n # Test stream 3 (event 6).\n # Protocol: TCP\n # Source IP: 192.168.195.130\n # Dest IP: 63.245.217.43\n # Source Port: 1038\n # Dest Port: 443\n # Stream Type: SSL\n # Starting Packet: 4\n # Ending Packet: 6\n\n event = events[6]\n self.assertEqual(event.packet_count, 3)\n self.assertEqual(event.protocol, 'TCP')\n self.assertEqual(event.source_ip, '192.168.195.130')\n self.assertEqual(event.dest_ip, '63.245.217.43')\n self.assertEqual(event.dest_port, 443)\n self.assertEqual(event.source_port, 1038)\n self.assertEqual(event.stream_type, 'SSL')\n self.assertEqual(event.first_packet_id, 4)\n self.assertEqual(event.last_packet_id, 6)\n\n # Test stream 6 (event 12).\n # Protocol: UDP\n # Source IP: 192.168.195.130\n # Dest IP: 192.168.195.2\n # Source Port: 55679\n # Dest Port: 53\n # Stream Type: DNS\n # Starting Packet: 4\n # Ending Packet: 6\n # Protocol Data: DNS Query for wpad.localdomain\n\n event = events[12]\n self.assertEqual(event.packet_count, 5)\n self.assertEqual(event.protocol, 'UDP')\n self.assertEqual(event.source_ip, '192.168.195.130')\n self.assertEqual(event.dest_ip, '192.168.195.2')\n self.assertEqual(event.dest_port, 53)\n self.assertEqual(event.source_port, 55679)\n self.assertEqual(event.stream_type, 'DNS')\n self.assertEqual(event.first_packet_id, 11)\n self.assertEqual(event.last_packet_id, 1307)\n self.assertEqual(\n event.protocol_data, 'DNS Query for wpad.localdomain')\n\n expected_message = (\n 'Source IP: 192.168.195.130 '\n 'Destination IP: 192.168.195.2 '\n 'Source Port: 55679 '\n 'Destination Port: 53 '\n 'Protocol: UDP '\n 'Type: DNS '\n 'Size: 380 '\n 'Protocol Data: DNS Query for wpad.localdomain '\n 'Stream Data: \\'\\\\xb8\\\\x9c\\\\x01\\\\x00\\\\x00\\\\x01\\\\x00\\\\x00\\\\x00\\\\x00'\n '\\\\x00\\\\x00\\\\x04wpad\\\\x0blocaldomain\\\\x00\\\\x00\\\\x01\\\\x00\\\\x01\\\\xb8'\n '\\\\x9c\\\\x01\\\\x00\\\\x00\\\\x01\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00\\\\x04wpa\\' '\n 'First Packet ID: 11 '\n 'Last Packet ID: 1307 '\n 'Packet Count: 5')\n expected_short_message = (\n 'Type: DNS '\n 'First Packet ID: 11')\n\n self._TestGetMessageStrings(event, expected_message, expected_short_message)", "def convert_pcap_to_dataframe(input_file):\r\n if not os.path.exists(input_file):\r\n raise IOError(\"File \" + input_file + \" does not exist\")\r\n\r\n tshark_fields = \"-e frame.time_epoch \" \\\r\n \"-e _ws.col.Source \" \\\r\n \"-e _ws.col.Destination \" \\\r\n \"-e _ws.col.Protocol \" \\\r\n \"-e frame.len \" \\\r\n \"-e ip.ttl \" \\\r\n \"-e ip.flags.mf \" \\\r\n \"-e ip.frag_offset \" \\\r\n \"-e icmp.type \" \\\r\n \"-e tcp.srcport \" \\\r\n \"-e tcp.dstport \" \\\r\n \"-e udp.srcport \" \\\r\n \"-e udp.dstport \" \\\r\n \"-e dns.qry.name \" \\\r\n \"-e dns.qry.type \" \\\r\n \"-e http.request \" \\\r\n \"-e http.response \" \\\r\n \"-e http.user_agent \" \\\r\n \"-e tcp.flags.str \" \\\r\n \"-e ntp.priv.reqcode \"\r\n\r\n temporary_file = tempfile.TemporaryFile(\"r+b\")\r\n\r\n # print(shutil.which(command))\r\n\r\n p = subprocess.Popen([settings.TSHARK + \" -n -r \\\"\" + input_file + \"\\\" -E separator='\\x03' -E header=y -T fields \" + tshark_fields],\r\n shell=True, stdout=temporary_file) #\\x03 is ETX\r\n p.communicate()\r\n p.wait()\r\n\r\n # Reset file pointer to start of file\r\n temporary_file.seek(0)\r\n\r\n df = pd.read_csv(temporary_file, sep=\"\\x03\", low_memory=False, error_bad_lines=False)\r\n\r\n temporary_file.close()\r\n\r\n if ('tcp.srcport' in df.columns) and ('udp.srcport' in df.columns) and ('tcp.dstport' in df.columns) and \\\r\n ('udp.dstport' in df.columns):\r\n # Combine source and destination ports from tcp and udp\r\n df['srcport'] = df['tcp.srcport'].fillna(df['udp.srcport'])\r\n df['dstport'] = df['tcp.dstport'].fillna(df['udp.dstport'])\r\n\r\n df['srcport'] = df['srcport'].apply(lambda x: int(x) if str(x).replace('.', '', 1).isdigit() else 0)\r\n df['dstport'] = df['dstport'].apply(lambda x: int(x) if str(x).replace('.', '', 1).isdigit() else 0)\r\n\r\n # Remove columns: 'tcp.srcport', 'udp.srcport','tcp.dstport', 'udp.dstport'\r\n df.drop(['tcp.srcport', 'udp.srcport', 'tcp.dstport', 'udp.dstport'], axis=1, inplace=True)\r\n\r\n # Drop all empty columns (for making the analysis more efficient! less memory.)\r\n df.dropna(axis=1, how='all', inplace=True)\r\n df = df.fillna(0)\r\n\r\n if 'icmp.type' in df.columns:\r\n df['icmp.type'] = df['icmp.type'].astype(str)\r\n\r\n if 'ip.frag_offset' in df.columns:\r\n df['ip.frag_offset'] = df['ip.frag_offset'].astype(str)\r\n\r\n if 'ip.flags.mf' in df.columns:\r\n df['ip.flags.mf'] = df['ip.flags.mf'].astype(str)\r\n\r\n if ('ip.flags.mf' in df.columns) and ('ip.frag_offset' in df.columns):\r\n # Analyse fragmented packets\r\n df['fragmentation'] = (df['ip.flags.mf'] == '1') | (df['ip.frag_offset'] != '0')\r\n df.drop(['ip.flags.mf', 'ip.frag_offset'], axis=1, inplace=True)\r\n\r\n if 'tcp.flags.str' in df.columns:\r\n df['tcp.flags.str'] = df['tcp.flags.str'].str.encode(\"utf-8\") \r\n\r\n return df", "def print_info(self):\n \n i = 1\n for pcap_packet in self.pcap_packets:\n print '----------------frame: %d------------' % i\n i += 1\n pcap_packet.ethernet.print_info()\n \n #skip the packet that is not ip packet\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n \n print '################# packet in the frame ################'\n pcap_packet.ip.print_info()\n \n #skp the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n print '@@@@@@@@@@@@@@@@@@@ tcp fields @@@@@@@@@@@@@@@@@@@@'\n pcap_packet.tcp.print_info()\n \n print\n #endof for", "def print_info(self):\n \n i = 1\n for pcap_packet in self.pcap_packets:\n print '----------------frame: %d------------' % i\n i += 1\n pcap_packet.ethernet.print_info()\n \n #skip the packet that is not ip packet\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n \n print '################# packet in the frame ################'\n pcap_packet.ip.print_info()\n \n #skp the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n print '@@@@@@@@@@@@@@@@@@@ tcp fields @@@@@@@@@@@@@@@@@@@@'\n pcap_packet.tcp.print_info()\n \n print\n #endof for", "def _read_packets(self, reader: Par2FileReader):\n start_count = len(self)\n pointers = reader.get_pointers()\n # Create RecoverySets if needed\n for set_id, pointer_set in packets.by_set_id(pointers).items():\n print(set_id.hex(), pointer_set)\n if set_id not in self.recovery_sets.keys():\n # Create a RecoverySet if needed\n self.recovery_sets[set_id] = RecoverySet(set_id)\n for pointer in pointer_set:\n self.recovery_sets[set_id].packets.add(pointer)\n logger.info(\"Added {} new packets\".format(len(self) - start_count))", "def locator(pcap_obj,kml_file):\r\n ip_list = []\r\n for ts, buf in pcap_obj:\r\n eth = dpkt.ethernet.Ethernet(buf)\r\n ip = eth.data\r\n try: # extract all unique IPs\r\n src_ip = str(socket.inet_ntoa(ip.src))\r\n dst_ip = str(socket.inet_ntoa(ip.dst))\r\n if src_ip in ip_list:\r\n pass\r\n else:\r\n ip_list.append(src_ip)\r\n if dst_ip in ip_list:\r\n pass\r\n else:\r\n ip_list.append(dst_ip)\r\n except AttributeError:\r\n pass\r\n\r\n try:\r\n reader = geoip2.database.Reader('GeoLite2-City_20190129.mmdb') # reading from db(can be redacted)\r\n except FileNotFoundError:\r\n print(f'[!]DB file not in current directory or with a different file name')\r\n sys.exit(1)\r\n area = []\r\n longitude = []\r\n latitude = []\r\n ips = []\r\n for ip_addr in ip_list:\r\n try:\r\n rec = reader.city(ip_addr) # reading IP\r\n country = rec.country.iso_code # assigning country and city\r\n city = rec.city.name\r\n if city is None and country is None:\r\n area.append('Unknown')\r\n elif city is None:\r\n area.append(f'Unknown city:{country}') # looking for unknown country\r\n elif country is None:\r\n area.append(f'Unknown country:{city}') # looking for unknown city\r\n else:\r\n area.append(f'{city} {country}')\r\n\r\n longitude.append(rec.location.longitude)\r\n latitude.append(rec.location.latitude)\r\n ips.append(ip_addr)\r\n except geoip2.errors.AddressNotFoundError:\r\n pass\r\n\r\n try:\r\n kml = simplekml.Kml()\r\n final_path = str(os.getcwd() + os.sep + kml_file) # defining full canonical path\r\n for i in range(0, len(ips)):\r\n kml.newpoint(name=(area[i]),\r\n coords=[(longitude[i], latitude[i])],\r\n description=f'[+] Location = {area[i]}\\n IP: {ips[i]}')\r\n kml.save(final_path)\r\n print(f\"[+] Writing IP locations to {kml_file}\") # writing data to a KML file\r\n print(f\"[+] Opening Google Earth with:{kml_file}\\n\") # reading file with google earth\r\n try:\r\n os.startfile(final_path)\r\n except OSError:\r\n print(f'[!] Warning: Google Earth must be installed to open the kml')\r\n except FileNotFoundError:\r\n pass", "def capture_packets(self, interface, count=1, timeout=None):\n if interface not in self.packet_captures:\n raise ObjectNotFoundException(\n 'No packet capture is running or was run on host/interface' +\n self.name + '/' + interface)\n tcpd = self.packet_captures[interface]\n return tcpd.wait_for_packets(count, timeout)", "def process_pkts(self, pkts: list):\n pkt_count = 0\n for ts, buf in pkts:\n eth = dpkt.ethernet.Ethernet(buf)\n if not isinstance(eth.data, dpkt.ip.IP):\n continue\n ip = eth.data\n if ((inet_to_str(ip.src) == self.sip and inet_to_str(ip.dst) == self.dip) or\n (inet_to_str(ip.src) == self.dip and inet_to_str(ip.dst) == self.sip)):\n if isinstance(ip.data, dpkt.tcp.TCP):\n tcp = ip.data\n if ((tcp.sport == self.sp and tcp.dport == self.dp) or\n (tcp.dport == self.sp and tcp.sport == self.dp)):\n pkt_count += 1\n self._process(buf, ts, pkt_count)\n if self._c_state == self._s_state and self._c_state == TCPState.CLOSED:\n logger.info(\"Session finished.\")\n logger.info(\"Number of packets in the session id: {} is {}\".format(\n self.session_count, len(self.sessions[self.session_count])))\n self.__reset_state__()", "def extract_from_pcap(device=None, pcap=None, flags=\"-v -r\", path_to_chaosreader=\"/tmp/\"):\n if device is None or pcap is None:\n raise Exception(\"device and pcap are mandatory arguments\")\n device.shell(command=\"cd /tmp\")\n\n cmd = path_to_chaosreader + \"chaosreader0.94 \" + flags + \" \" + pcap\n output = device.shell(command=cmd)\n\n if not re.match(\".*Creating files.*\", output.response(), re.DOTALL):\n device.log(level=\"ERROR\", message=\"Chaosreader ran into an error\")\n raise Exception(\"Chaosreader ran into an error\")\n\n return True", "def handle_icmp(pkt, packets, i, start_point):\r\n icmp_type = int(pkt[start_point:start_point+2], 16)\r\n start_point = start_point + 2\r\n icmp_code = int(pkt[start_point:start_point+2], 16)\r\n start_point = start_point + 2\r\n icmp_checksum = pkt[start_point:start_point+4]\r\n packets[i][2].append(icmp_type)\r\n packets[i][2].append(icmp_code)\r\n packets[i][2].append(icmp_checksum)\r\n return packets", "def get_total_and_retrans_frames(pcap_filepath, connections):\n # First init values to avoid strange errors if connection is empty\n for conn_id, conn in connections.iteritems():\n for direction in co.DIRECTIONS:\n connections[conn_id].flow.attr[direction][co.FRAMES_TOTAL] = 0\n connections[conn_id].flow.attr[direction][co.BYTES_FRAMES_TOTAL] = 0\n connections[conn_id].flow.attr[direction][co.FRAMES_RETRANS] = 0\n connections[conn_id].flow.attr[direction][co.BYTES_FRAMES_RETRANS] = 0\n\n stats_filename = os.path.basename(pcap_filepath)[:-5] + \"_tshark_total\"\n stats_file = open(stats_filename, 'w')\n co.tshark_stats(None, pcap_filepath, print_out=stats_file)\n stats_file.close()\n\n stats_file = open(stats_filename)\n data = stats_file.readlines()\n stats_file.close()\n for line in data:\n split_line = \" \".join(line.split()).split(\" \")\n if len(split_line) == 11:\n # Manage case with ipv6\n ip_src, port_src = get_ip_port_tshark(split_line[0])\n ip_dst, port_dst = get_ip_port_tshark(split_line[2])\n for conn_id, conn in connections.iteritems():\n if conn.flow.attr[co.SADDR] == ip_src and conn.flow.attr[co.SPORT] == port_src and \\\n conn.flow.attr[co.DADDR] == ip_dst and conn.flow.attr[co.DPORT]:\n connections[conn_id].flow.attr[co.S2C][co.FRAMES_TOTAL] = int(split_line[3])\n connections[conn_id].flow.attr[co.S2C][co.BYTES_FRAMES_TOTAL] = int(split_line[4])\n connections[conn_id].flow.attr[co.C2S][co.FRAMES_TOTAL] = int(split_line[5])\n connections[conn_id].flow.attr[co.C2S][co.BYTES_FRAMES_TOTAL] = int(split_line[6])\n break\n\n stats_file.close()\n os.remove(stats_filename)\n\n stats_filename = os.path.basename(pcap_filepath)[:-5] + \"_tshark_retrans\"\n stats_file = open(stats_filename, 'w')\n co.tshark_stats('tcp.analysis.retransmission', pcap_filepath, print_out=stats_file)\n stats_file.close()\n\n stats_file = open(stats_filename)\n data = stats_file.readlines()\n stats_file.close()\n for line in data:\n split_line = \" \".join(line.split()).split(\" \")\n if len(split_line) == 11:\n ip_src, port_src = get_ip_port_tshark(split_line[0])\n ip_dst, port_dst = get_ip_port_tshark(split_line[2])\n for conn_id, conn in connections.iteritems():\n if conn.flow.attr[co.SADDR] == ip_src and conn.flow.attr[co.SPORT] == port_src and \\\n conn.flow.attr[co.DADDR] == ip_dst and conn.flow.attr[co.DPORT]:\n connections[conn_id].flow.attr[co.S2C][co.FRAMES_RETRANS] = int(split_line[3])\n connections[conn_id].flow.attr[co.S2C][co.BYTES_FRAMES_RETRANS] = int(split_line[4])\n connections[conn_id].flow.attr[co.C2S][co.FRAMES_RETRANS] = int(split_line[5])\n connections[conn_id].flow.attr[co.C2S][co.BYTES_FRAMES_RETRANS] = int(split_line[6])\n break\n\n stats_file.close()\n os.remove(stats_filename)", "def next_connection_packets(piter, linktype=1):\n first_ft = None\n\n for ts, raw in piter:\n ft = flowtuple_from_raw(raw, linktype)\n if not first_ft: first_ft = ft\n\n sip, dip, sport, dport, proto = ft\n if not (first_ft == ft or first_ft == (dip, sip, dport, sport, proto)):\n break\n\n yield {\n \"src\": sip, \"dst\": dip, \"sport\": sport, \"dport\": dport, \"proto\": proto,\n \"raw\": payload_from_raw(raw, linktype).encode(\"base64\"), \"direction\": first_ft == ft,\n }", "def get_pcap_traffic_series(self):\n parsed_pcap_data = {}\n\n if (self.mac_address_binary is not None):\n parsed_pcap_data[self.mac_address_binary] = []\n\n with open(self.pcap_file_path, 'rb') as pcap_file:\n try:\n pcap = dpkt.pcap.Reader(pcap_file)\n for ts, buf in pcap:\n # Skip non ethernet frames\n try:\n eth = dpkt.ethernet.Ethernet(buf)\n except:\n continue\n\n # Skip non-IP packets\n if eth.type != 2048:\n continue\n \n # Apply eth filter\n if (self.mac_address_binary is not None):\n self.append_data(parsed_pcap_data, self.mac_address_binary, eth, ts)\n else:\n if (eth.src not in parsed_pcap_data):\n parsed_pcap_data[eth.src] = []\n if (eth.dst not in parsed_pcap_data):\n parsed_pcap_data[eth.dst] = []\n\n self.append_data(parsed_pcap_data, eth.src, eth, ts)\n self.append_data(parsed_pcap_data, eth.dst, eth, ts)\n except:\n print \"Error parsing file: %s\" % pcap_file\n \n # Remove mac addresses that didn't send data\n receivers_only = []\n for mac_addr in parsed_pcap_data:\n data_sent = False\n for data in parsed_pcap_data[mac_addr]:\n if (data[1] > 0):\n data_sent = True\n break\n if (not data_sent):\n receivers_only.append(mac_addr)\n\n for mac_addr in receivers_only:\n parsed_pcap_data.pop(mac_addr, None)\n\n # Sort the data \n for mac_addr in parsed_pcap_data:\n series = sorted(parsed_pcap_data[mac_addr], key=operator.itemgetter(0))\n parsed_pcap_data[mac_addr] = series\n\n return parsed_pcap_data", "def process_pcap(pcap):\n\n print \"Processing\", pcap\n pcap_path, _ = os.path.splitext(pcap)\n # strip_payload_from_pcap(pcap)\n os.system(\"tshark -nn -T fields -E separator=/t -e frame.time_epoch\"\n \" -e ip.src -e ip.dst -e tcp.srcport -e tcp.dstport\"\n \" -e ip.proto -e ip.len -e ip.hdr_len -e tcp.hdr_len -e data.len\"\n \" -e tcp.flags -e tcp.options.timestamp.tsval\"\n \" -e tcp.options.timestamp.tsecr -e tcp.seq -e tcp.ack\"\n \" -e tcp.window_size_value -e expert.message \"\n \" -r %s > %s.tshark\" % (pcap, pcap_path))\n # tcpdump command from Panchenko's raw-to-tcp script\n os.system(\"\"\"tcpdump -r {0} -n -l -tt -q -v | sed -e 's/^[ ]*//' |\n awk '/length ([0-9][0-9]*)/{{printf \"%s \",$0;next}}{{print}}' > {1}\"\"\".\\\n format(pcap, pcap_path + '.tcpdump'))", "def packetSniff():\n\n packets = psutil.net_io_counters(pernic=True)\n interfaces = {}\n x = 0\n for p in packets.items():\n values = {}\n values['name'] = p[0]\n values['bytes_sent'] = p[1][0]\n values['bytes_recv'] = p[1][1]\n values['pckt_sent'] = p[1][2]\n values['pckt_recv'] = p[1][3]\n values['errin'] = p[1][4]\n values['errout'] = p[1][5]\n values['dropin'] = p[1][6]\n values['dropout'] = p[1][7]\n\n if ((values['bytes_sent'] or values['bytes_recv'] or\n values['pckt_sent'] or values['pckt_recv']) != 0):\n\n interfaces[x] = values\n x += 1\n else:\n pass\n\n return interfaces", "def start_tcpdump(self):\n\t\tlog_file = os.path.join(self.cfg.file_log_dir,self.info[\"hash_md5\"]+\".pcap\")\n\t\tself.info[\"tcpdump_log_path\"] = log_file\n\t\tcmd = [\"/usr/sbin/tcpdump\", \"-iany\", \"-w\"+self.info[\"tcpdump_log_path\"], \"-c%d\"%(self.cfg.tcpdump_limit)]\n\t\tself.p_tcpdump = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\tself.log.info(\"tcpdump starts, logfile:%s\",self.info[\"tcpdump_log_path\"] )", "def tcpdump(timeout, q, interface):\t\n\tlogging.debug('tcpdump -s 1024 -lqnAt tcp port 80 -i eth0')\n\t# tcpdump -s 1024 -lqnAt tcp port 80\n\t\t\n\tcommand = Command(['/usr/sbin/tcpdump', '-s 1024', '-lnAq', '-i', interface], timeout)\n\tcommand.run()\n\n\t# when it's executing here, the results have been available\n\t# print command.out\n\n\tif command.out is not None:\n\t\t# pattern = \"time=([0-9]+\\.[0-9]+) ms\"\n\t\tip_pattern = \"IP ([0-9]+.[0-9]+.[0-9]+.[0-9]+).[0-9]+ > [0-9]+.[0-9]+.[0-9]+.[0-9]+.[0-9]\"\n\t\tgoogle_pattern = \"domain=.google.com\"\n\t\tlines = command.out.split('\\n')\n\t\tlast_ip = None\n\n\t\t# first time scan for google's return ip\n\t\tfor line in lines:\n\t\t\tip_src = re.search(ip_pattern, line)\n\t\t\tif ip_src is not None:\n\t\t\t\tlast_ip = ip_src.group(1)\n\t\t\tif re.search(google_pattern, line):\n\t\t\t\tprint last_ip\n\t\t\t\tbreak\n\n\t\tgEntries = []\n\t\tif last_ip is not None:\n\t\t\t\n\t\t\t# second time scan parse tcpdump for query entries\n\t\t\tfor line in lines:\n\t\t\t\tlast_ip_pos = re.search(last_ip, line)\n\t\t\t\tif last_ip_pos is None:\n\t\t\t\t\tcontinue\n\t\t\t\n\t\t\t\tif line.index('>') > last_ip_pos.start():\n\t\t\t\t\t# from remote to this place\n\t\t\t\t\ttraffic_type = 1\t\t\t\n\t\t\t\telse:\n\t\t\t\t\t# out to remote\n\t\t\t\t\ttraffic_type = 0\n\t\t\t\n\t\t\t\ttime_pattern = \"([0-9]+:[0-9]+:[0-9]+.[0-9]+) IP\"\n\t\t\t\ttimestamp = re.search(time_pattern, line)\n\t\t\t\tif timestamp is not None:\n\t\t\t\t\ttime_str = timestamp.group(1)\n\t\t\t\t\th, m, s, ms = map(int, re.split(r'[.:]+', time_str))\n\t\t\t\t\ttimestamp_delta = timedelta(hours=h, minutes=m, seconds=s, microseconds=ms)\n\t\t\t\t\tgEntries.append( (timestamp_delta, traffic_type) )\n\t\t\t\telse:\n\t\t\t\t\tgEntries.append( (None, -1))\n\n\t\tq.put((command.returncode, last_ip, gEntries))\n\t\treturn", "def download_pcap(self, args=None):\r\n result = {\"Task\": \"DownloadPCAP\", \"Error\": \"NoError\", \"Status\": \"FileNotFound\", \"FileName\": args['filename'],\r\n \"FileSize\": 0, \"FileType\": \"UnKnown\", \"FileURL\": 'UnKnown', \"FileUser\": 'UnKnown'}\r\n with EndaceWebSession(app_url=self.applianceurl, username=self.username, password=self.password,\r\n cert_verify=self.cert_verify) as sess:\r\n api = EndaceVisionAPIAdapter(sess)\r\n path = \"files\"\r\n rf = api.get(path)\r\n if rf.status_code == 200:\r\n try:\r\n response = rf.json()\r\n except json.decoder.JSONDecodeError:\r\n raise Exception(f\"JsonDecodeError - path {path}\")\r\n else:\r\n meta = response.get(\"meta\", {})\r\n payload = response.get(\"payload\")\r\n if meta:\r\n meta_error = meta[\"error\"]\r\n if meta_error is not None:\r\n if meta_error is not False:\r\n result['Status'] = \"FileNotFound\"\r\n result['Error'] = str(meta_error)\r\n else:\r\n # Download PCAP File\r\n for file in payload:\r\n if result['FileName'] == file['name'] and len(file[\"id\"]):\r\n file_numerical_part = float(re.findall(r'[\\d\\.]+', file['usage'])[0])\r\n\r\n if 'KB' in file['usage']:\r\n filesize = file_numerical_part * 0.001\r\n elif 'GB' in file['usage']:\r\n filesize = file_numerical_part * 1000\r\n elif 'TB' in file['usage']:\r\n filesize = file_numerical_part * 1000000\r\n else:\r\n filesize = file_numerical_part * 1\r\n\r\n if filesize <= int(args['filesizelimit']):\r\n result['FileName'] = file['name'] + \".pcap\"\r\n if not file['status']['inUse']:\r\n # File available to download\r\n pcapfile_url_path = (\"files/%s/stream?format=pcap\" % file[\"id\"])\r\n d = api.get(pcapfile_url_path)\r\n if d.status_code == 200:\r\n demisto.results(fileResult(f'{result[\"FileName\"]}', d.content,\r\n file_type=entryTypes['entryInfoFile']))\r\n\r\n result['FileURL'] = f'[Endace PCAP URL]'\\\r\n f'({self.applianceurl}/vision2/data/'\\\r\n f'{pcapfile_url_path})'\r\n\r\n result['FileSize'] = file['usage']\r\n result['Status'] = \"DownloadFinished\"\r\n result['FileType'] = file['type']\r\n result['FileUser'] = file['user']\r\n else:\r\n result['Status'] = \"FileNotFound\"\r\n result['Error'] = f\"ServerError - HTTP {rf.status_code} to /{path}\"\r\n else:\r\n result['Status'] = \"FileInUse\"\r\n else:\r\n result['Status'] = \"FileExceedsSizeLimit\"\r\n else:\r\n result['Status'] = \"Failed\"\r\n result['Error'] = f\"ServerError - empty meta data from {path}\"\r\n else:\r\n result['Status'] = \"Failed\"\r\n result['Error'] = f\"ServerError - HTTP {rf.status_code} to /{path}\"\r\n\r\n if result['Status'] == 'Failed':\r\n self.handle_error_notifications(result['Error'])\r\n return result", "def retrieve(self, trace_filter={}, limit=0):\n\n if isinstance(limit, int) and limit > 0:\n max_r = limit\n else:\n max_r = 0\n\n try:\n packets = self.__db.find_packets(self._collection, trace_filter, max_r)\n except MemoryError:\n print(\"Warning: cannot allocate sufficient memory for packets, perhaps you are using Windows?\")\n return []\n except:\n return []\n\n # Attempt to decode base64 payloads.\n for packet in packets:\n if packet[\"tcp_info\"] is not None:\n if isinstance(packet[\"tcp_info\"][\"payload\"], bytes):\n try:\n packet[\"tcp_info\"][\"payload\"] = b64decode(packet[\"tcp_info\"][\"payload\"])\n except:\n continue\n\n if packet[\"tls_info\"] is not None:\n for i, data in enumerate(packet[\"tls_info\"][\"data\"]):\n if isinstance(data, bytes):\n try:\n packet[\"tls_info\"][\"data\"][i] = b64decode(data)\n except:\n continue\n\n return packets", "def loadpts(skip=40, filt=None, ref_frame=None):\n pts = []\n for i in range(42):\n print('loading file: ', i)\n if filt is not None:\n traj = md.load(DCD_PROT(i), top=PDB_PROT, atom_indices=filt, stride=skip)\n else:\n traj = md.load(DCD_PROT(i), top=PDB_PROT, stride=skip)\n traj.center_coordinates()\n if ref_frame is not None:\n traj.superpose(ref_frame)\n for i in traj.xyz:\n pts.append(i)\n return np.array(pts)", "def create_stream(cls, packet_count=test_packet_count):\n for i in range(0, packet_count):\n info = cls.create_packet_info(cls.src_dst_if, cls.src_dst_if)\n payload = cls.info_to_payload(info)\n p = (\n Ether(dst=cls.src_dst_if.local_mac, src=cls.src_dst_if.remote_mac)\n / IP(\n id=info.index,\n src=cls.src_dst_if.remote_ip4,\n dst=cls.src_dst_if.local_ip4,\n )\n / ICMP(type=\"echo-request\", id=1234)\n / Raw(payload)\n )\n cls.extend_packet(p, 1518, cls.padding)\n info.data = p", "def accumulate_packets():\n l = []\n packets = sniff(count=NUMBER_OF_SNIFFING_ROUNDS, lfilter=fltr, prn=printing)\n print(\"Processing packets!\")\n for packet in packets:\n l.append({\"ip\": get_ip(packet),\n \"country\": get_country(packet),\n \"entering\": is_entering(packet),\n \"port\": get_partner_port(packet),\n \"size\": packet[IP].len, #the len of the ip layer is the len of the entire packet\n \"program\": get_program(packet)})\n return l", "def sniff_online(args):\n print('viewer: listening on ' + args.interface)\n\n try:\n sniffer = pcapy.open_live(args.interface, 65536, 1, 1)\n sniffer.setfilter('icmp')\n except Exception as e:\n print(e)\n sys.exit(-1)\n\n if not args.count:\n count = True\n else:\n count = args.count\n\n while count:\n (header, packet) = sniffer.next()\n if header:\n tts = header.getts()\n ret = parse_ICMP_Echo(tts[0] + tts[1] / 1000000, packet)\n\n if ret and args.count:\n count -= 1", "def tcp_traceflow(packet, *, count=NotImplemented):\n if 'TCP' in packet:\n ip = packet['IP'] if 'IP' in packet else packet['IPv6']\n tcp = packet['TCP']\n data = dict(\n protocol=LINKTYPE.get(packet.name.upper()), # data link type from global header\n index=count, # frame number\n frame=packet2dict(packet), # extracted packet\n syn=bool(tcp.flags.S), # TCP synchronise (SYN) flag\n fin=bool(tcp.flags.F), # TCP finish (FIN) flag\n src=ipaddress.ip_address(ip.src), # source IP\n dst=ipaddress.ip_address(ip.dst), # destination IP\n srcport=tcp.sport, # TCP source port\n dstport=tcp.dport, # TCP destination port\n timestamp=time.time(), # timestamp\n )\n return True, data\n return False, None", "def process_pcap(self):\n # Create Core Controller\n controller = Controller(self.args.input, self.args.extraTests, self.args.non_verbose, self.args.output,\n self.args.debug)\n\n if not self.args.skip:\n # Load PCAP statistics\n recalculate_intervals = None\n if self.args.recalculate_delete:\n recalculate_intervals = True\n elif self.args.recalculate_yes:\n recalculate_intervals = True\n self.args.recalculate = True\n elif self.args.recalculate_no:\n recalculate_intervals = False\n self.args.recalculate = True\n controller.load_pcap_statistics(self.args.export, self.args.recalculate, self.args.statistics,\n self.args.statistics_interval, self.args.recalculate_delete,\n recalculate_intervals)\n\n if self.args.list_intervals:\n controller.list_interval_statistics()\n\n # Create statistics plots\n if self.args.plot is not None:\n do_entropy = False\n if self.args.extraTests:\n do_entropy = True\n controller.create_statistics_plot(self.args.plot, do_entropy)\n\n # Check rng seed\n if not isinstance(self.args.rngSeed, list):\n self.args.rngSeed = [self.args.rngSeed]\n\n # Process attack(s) with given attack params\n if self.args.attack is not None:\n # If attack is present, load attack with params\n controller.process_attacks(self.args.attack, self.args.rngSeed, self.args.time, self.args.inject_empty)\n\n # Parameter -q without arguments was given -> go into query loop\n if self.args.query == [None]:\n controller.enter_query_mode()\n # Parameter -q with arguments was given -> process query\n elif self.args.query is not None:\n controller.process_db_queries(self.args.query, True)", "def next_batch(self):\n\n while self.cap.isOpened():\n flag, frame = self.cap.read()\n yield frame", "def main():\n connection = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.htons(0x03))\n\n # Start the main loop.\n while True:\n # 65536 is the biggest buffer size that can be used.\n raw_data, addr = connection.recvfrom(65536)\n dest_mac, src_mac, eth_proto, data = ethernet_frame(raw_data)\n print('\\nEthernet Frame:')\n print('Destination: {}, Source: {}, Protocol: {}'.format(dest_mac, src_mac, eth_proto))", "def read_packets(serial_input):\n while 1:\n header = scan_to_headerword(serial_input)\n yield header.read_packet(serial_input)", "def sniffing():\n sniff(store=False, prn=lambda p: threading.Thread(target=next, args=(p,)).start(), iface=IFACE)", "def read_pkt_seq(self):\n pkt = self.read_pkt_line()\n while pkt:\n yield pkt\n pkt = self.read_pkt_line()", "def sniff_continuously(self, packet_count=None):\n \n self.lcapture_tshark = (self.lcapture_tshark or \n self.eventloop.run_until_complete(self._get_tshark_process()))\n\n self._running_processes.add(self.lcapture_tshark)\n\n # Retained for backwards compatibility and to add documentation.\n return self._packets_from_tshark_sync(packet_count=packet_count, \n tshark_process=self.lcapture_tshark)", "def preprocess_capture(data, ip_version=4, transp_layer=\"TCP\"):\n #SEE: https://www.winpcap.org/ntar/draft/PCAP-DumpFileFormat.html\n\n #TODO Implement ipv6, udp and ICMP\n if ip_version == 4:\n pass\n else:\n raise ValueError('IP version must be \"4\"')\n\n if transp_layer == \"TCP\":\n pass\n else:\n raise ValueError('transport layer must be TCP')\n\n try:\n capt = pyshark.FileCapture(data, keep_packets=False, display_filter='tcp')\n except:\n exit(\"Could not open pcap file\")\n\n ip_fields = ['src', 'dst', 'flags_df', 'flags_mf', 'hdr_len', 'len', 'ttl']\n tcp_fields = ['srcport', 'dstport', 'flags_ack', 'flags_fin', 'flags_push',\n 'flags_reset', 'flags_syn', 'flags_urg', 'hdr_len', 'len']\n\n #Temporary list to feed the final DataFrame (Performance)\n tmp = []\n counter = 0\n logging.info(\"Starting packet processing\")\n for pkt in capt:\n filtered = {}\n #First field is a empty string (ignoring)\n if hasattr(pkt, 'ip'):\n for field in ip_fields:\n #Changing field names for disambiguation in columns\n filtered[\"ip_\"+field] = pkt[\"ip\"].get_field(field)\n else:\n continue\n if hasattr(pkt, 'tcp'):\n for field in tcp_fields:\n #Changing field names for disambiguation in columns\n filtered[\"tcp_\"+field] = pkt[\"tcp\"].get_field(field)\n else:\n continue\n tmp.append(filtered)\n counter += 1\n if counter % 1000 == 0:\n logging.info(\"Processed %d packets\", counter)\n logging.info(\"Ended packet processing\")\n logging.info(\"Converting list to DataFrame\")\n X = pd.DataFrame(tmp)\n logging.info(\"Ended list conversion\")\n return X", "def process_trace(pcap_filepath, graph_dir_exp, stat_dir_exp, failed_conns_dir_exp, acksize_tcp_dir_exp, tcpcsm, mptcp_connections=None, print_out=sys.stdout, light=False, return_dict=False):\n cmd = ['tstat', '-s', os.path.basename(pcap_filepath[:-5]), pcap_filepath]\n\n keep_tstat_log = False if return_dict else True\n\n try:\n connections = process_tstat_cmd(cmd, pcap_filepath, keep_log=keep_tstat_log, graph_dir_exp=graph_dir_exp)\n except TstatError as e:\n print(str(e) + \": skip process\", file=sys.stderr)\n return\n\n # Directory containing all TCPConnections that tried to be MPTCP subflows, but failed to\n failed_conns = {}\n\n if tcpcsm:\n retransmissions_tcpcsm(pcap_filepath, connections)\n\n acksize_all = {co.C2S: {}, co.S2C: {}}\n\n if not light:\n inverse_conns = create_inverse_tcp_dictionary(connections)\n\n acksize_all = compute_tcp_acks_retrans(pcap_filepath, connections, inverse_conns)\n\n acksize_all_mptcp = {co.C2S: {}, co.S2C: {}}\n\n if mptcp_connections:\n fast_conns = get_preprocessed_connections(mptcp_connections)\n for flow_id in connections:\n # Copy info to mptcp connections\n copy_info_to_mptcp_connections(connections, mptcp_connections, failed_conns, acksize_all, acksize_all_mptcp, flow_id,\n fast_conns=fast_conns)\n\n if not light:\n for conn_id, conn in mptcp_connections.iteritems():\n for direction in co.DIRECTIONS:\n max_ack = timedelta(0)\n max_payload = timedelta(0)\n for flow_id, flow in conn.flows.iteritems():\n if co.TIME_LAST_ACK_TCP in flow.attr[direction] and (flow.attr[direction][co.TIME_LAST_ACK_TCP] - max_ack).total_seconds() > 0.0:\n max_ack = flow.attr[direction][co.TIME_LAST_ACK_TCP]\n\n if co.TIME_LAST_PAYLD_TCP in flow.attr[direction] and (flow.attr[direction][co.TIME_LAST_PAYLD_TCP] - max_payload).total_seconds() > 0.0:\n max_payload = flow.attr[direction][co.TIME_LAST_PAYLD_TCP]\n\n mptcp_connections[conn_id].attr[direction][co.TIME_LAST_ACK_TCP] = max_ack\n mptcp_connections[conn_id].attr[direction][co.TIME_LAST_PAYLD_TCP] = max_payload\n\n try:\n compute_mptcp_dss_retransmissions(pcap_filepath, mptcp_connections, fast_conns)\n except dpkt.NeedData as e:\n print(e, \": trying to continue...\", file=sys.stderr)\n\n if return_dict:\n if mptcp_connections:\n return connections, acksize_all_mptcp\n else:\n return connections, acksize_all\n else:\n # Save connections info\n if mptcp_connections:\n co.save_data(pcap_filepath, acksize_tcp_dir_exp, acksize_all_mptcp)\n # Also save TCP connections that failed to be MPTCP subflows\n co.save_data(pcap_filepath, failed_conns_dir_exp, failed_conns)\n else:\n co.save_data(pcap_filepath, acksize_tcp_dir_exp, acksize_all)\n co.save_data(pcap_filepath, stat_dir_exp, connections)", "def ingest_packet(self, pkt, pkt_receive_timestamp):\n #*** Packet length on the wire:\n self.packet_length = len(pkt)\n #*** Read into dpkt:\n eth = dpkt.ethernet.Ethernet(pkt)\n eth_src = _mac_addr(eth.src)\n eth_dst = _mac_addr(eth.dst)\n eth_type = eth.type\n #*** We only support IPv4 (TBD: add IPv6 support):\n if eth_type != 2048:\n self.logger.error(\"Non IPv4 packet, eth_type is %s\", eth_type)\n return 0\n ip = eth.data\n self.ip_src = socket.inet_ntop(socket.AF_INET, ip.src)\n self.ip_dst = socket.inet_ntop(socket.AF_INET, ip.dst)\n #*** We only support TCP:\n if ip.p != 6:\n self.logger.error(\"Non TCP packet, ip_proto=%s\",\n ip.p)\n return 0\n proto = 'tcp'\n tcp = ip.data\n self.tcp_src = tcp.sport\n self.tcp_dst = tcp.dport\n self.tcp_seq = tcp.seq\n self.tcp_acq = tcp.ack\n self.tcp_flags = tcp.flags\n self.payload = tcp.data\n #*** Generate a hash unique to flow for packets in either direction\n self.fcip_hash = _hash_5tuple(self.ip_src, self.ip_dst, self.tcp_src,\n self.tcp_dst, proto)\n #*** Check to see if we already know this identity:\n db_data = {'hash': self.fcip_hash}\n self.fcip_doc = self.fcip.find_one(db_data)\n if not self.fcip_doc:\n #*** Get flow direction (which way is TCP initiated). Client is\n #*** the end that sends the initial TCP SYN:\n if _is_tcp_syn(tcp.flags):\n self.logger.debug(\"Matched TCP SYN first pkt, src_ip=%s\",\n self.ip_src)\n self.client = self.ip_src\n self.server = self.ip_dst\n self.packet_direction = 'c2s'\n self.verified_direction = 'verified-SYN'\n elif _is_tcp_synack(tcp.flags):\n self.logger.debug(\"Matched TCP SYN+ACK first pkt, src_ip=%s\",\n self.ip_src)\n self.client = self.ip_dst\n self.server = self.ip_src\n self.packet_direction = 's2c'\n self.verified_direction = 'verified-SYNACK'\n else:\n self.logger.debug(\"Unmatch state first pkt, tcp_flags=%s\",\n tcp.flags)\n self.client = self.ip_src\n self.server = self.ip_dst\n self.packet_direction = 'c2s'\n self.verified_direction = 0\n #*** Neither direction found, so add to FCIP database:\n self.fcip_doc = {'hash': self.fcip_hash,\n 'ip_A': self.ip_src,\n 'ip_B': self.ip_dst,\n 'port_A': self.tcp_src,\n 'port_B': self.tcp_dst,\n 'proto': proto,\n 'finalised': 0,\n 'packet_count': 1,\n 'latest_timestamp' : pkt_receive_timestamp,\n 'packet_timestamps': [pkt_receive_timestamp,],\n 'tcp_flags': [tcp.flags,],\n 'packet_lengths': [self.packet_length,],\n 'client': self.client,\n 'server': self.server,\n 'packet_directions': [self.packet_direction,],\n 'verified_direction': self.verified_direction,\n 'suppressed': 0}\n self.logger.debug(\"FCIP: Adding record for %s to DB\",\n self.fcip_doc)\n db_result = self.fcip.insert_one(self.fcip_doc)\n self.packet_count = 1\n\n elif self.fcip_doc['finalised']:\n #*** The flow is already finalised just increment packet count:\n self.fcip_doc['packet_count'] += 1\n #*** Write updated FCIP data back to database:\n db_result = self.fcip.update_one({'hash': self.fcip_hash},\n {'$set': {'packet_count': self.fcip_doc['packet_count']},})\n self.packet_count = self.fcip_doc['packet_count']\n\n else:\n #*** We've found the flow in the FCIP database, now update it:\n self.logger.debug(\"FCIP: found existing record %s\", self.fcip_doc)\n #*** Rate this packet as c2s or s2c direction:\n if self.client == self.ip_src:\n self.packet_direction = 'c2s'\n elif self.client == self.ip_dst:\n self.packet_direction = 's2c'\n else:\n self.packet_direction = 'unknown'\n #*** Increment packet count. Is it at max?:\n self.fcip_doc['packet_count'] += 1\n self.packet_count = self.fcip_doc['packet_count']\n if self.fcip_doc['packet_count'] >= self.max_packet_count:\n #*** TBD:\n self.fcip_doc['finalised'] = 1\n self.logger.debug(\"Finalising...\")\n #*** Read suppressed status to variable:\n self.suppressed = self.fcip_doc['suppressed']\n #*** Read verified_direction status to variable:\n self.verified_direction = self.fcip_doc['verified_direction']\n #*** Add packet timestamps, tcp flags etc:\n self.fcip_doc['latest_timestamp'] = pkt_receive_timestamp\n self.fcip_doc['packet_timestamps'].append(pkt_receive_timestamp)\n self.fcip_doc['tcp_flags'].append(tcp.flags)\n self.fcip_doc['packet_lengths'].append(self.packet_length)\n self.fcip_doc['packet_directions'].append(self.packet_direction)\n #*** Write updated FCIP data back to database:\n db_result = self.fcip.update_one({'hash': self.fcip_hash},\n {'$set': {'packet_count': self.fcip_doc['packet_count'],\n 'finalised': self.fcip_doc['finalised'],\n 'packet_timestamps': self.fcip_doc['packet_timestamps'],\n 'tcp_flags': self.fcip_doc['tcp_flags'],\n 'packet_lengths': self.fcip_doc['packet_lengths'],\n 'packet_directions': self.fcip_doc['packet_directions']\n },})\n #*** Tests:\n self.logger.debug(\"max_packet_size is %s\", self.max_packet_size())\n self.logger.debug(\"max_interpacket_interval is %s\",\n self.max_interpacket_interval())\n self.logger.debug(\"min_interpacket_interval is %s\",\n self.min_interpacket_interval())", "def get_available_portoffset(target=\"localhost\"):\n target_ip = socket.gethostbyname(target)\n for portoffset in range(10000, 61000, 1000):\n i = portoffset + 873\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex((target_ip, i))\n sock.close()\n if result != 0:\n logger.debug(\"port open {0}\".format(portoffset))\n return portoffset\n return None", "def iplayer_from_raw(raw, linktype=1):\n if linktype == 1: # ethernet\n pkt = dpkt.ethernet.Ethernet(raw)\n ip = pkt.data\n elif linktype == 101: # raw\n ip = dpkt.ip.IP(raw)\n else:\n raise Exception(\"unknown PCAP linktype\")\n return ip", "def mptcp_connections(self, pkts):\n\t\tcount = 0\n\t\t#MPTCP_Capable = 0x0\n\t\t#MPTCP_CapableACK ---> successful handshake\n\t\tprint \"======================================================================\"\n\t\tprint \"Successful Handshake --- Look for Ack packets with MPTCP option Header\"\n\t\tprint \"\"\"Token = connectionID = SHA1(key)[0-32] of Other party's key. (Capture from\n\t\t either step 2 or 3 in the first handshake)\"\"\"\n\t\tprint \"Total packets: %s\" % len(pkts)\n\t\tprint \"======================================================================\"\n\t\tprint \"Identifying MPTCP Connections....\"\n\t\tfor i in range(len(pkts)):\n\t\t\tif(MPTCP_CapableACK in pkts[i] and pkts[i][TCPOption_MP].mptcp.subtype == 0):\n\t\t\t\tcount +=1 #Count the number of distinct MPTCP connections\n\t\t\t\t\n\t\t\t\t#Compute the receiver's token\n\t\t\t\tself.key_rcv = pkts[i][TCPOption_MP].mptcp.rcv_key\n\t\t\t\tself.rcv_token, self.rcv_dsn = self.key2tokenAndDSN(self.key_rcv)\n\n\t\t\t\t#Compute the sender's token\n\t\t\t\tself.key_snd = pkts[i][TCPOption_MP].mptcp.snd_key\n\t\t\t\tself.snd_token, self.snd_dsn = self.key2tokenAndDSN(self.key_snd)\n\n\t\t\t\tprint (\"%i. New MPTCP Connection (Successful Handshake) src: %s; dest: %s; Sender's key: %s; Receiver's key: %s; Receivers Token (connectionID): %s; Sender's Token: %s\" % (count, pkts[i][IP].src, pkts[i][IP].dst, pkts[i][TCPOption_MP].mptcp.snd_key, pkts[i][TCPOption_MP].mptcp.rcv_key, self.rcv_token, self.snd_token))\n\t\tprint \"Total MPTCP Connections: %i\" % count", "def sniff_ip(time_to_sniff):\r\n ip_dict = dict()\r\n port_dict = dict()\r\n packets = sniff(timeout=time_to_sniff, filter=\"ip\")\r\n\r\n for i in packets:\r\n sport = 0\r\n src = i['IP'].src\r\n\r\n if \"TCP\" in i:\r\n sport = i['TCP'].sport\r\n\r\n elif \"UDP\" in i:\r\n sport = i['UDP'].sport\r\n\r\n if not src in ip_dict.keys():\r\n ip_dict[src] = 1\r\n\r\n else:\r\n ip_dict[src] += 1\r\n\r\n if sport:\r\n if not sport in port_dict.keys():\r\n port_dict[sport] = 1\r\n\r\n else:\r\n port_dict[sport] += 1\r\n\r\n return ip_dict, port_dict", "def handle_udp(pkt, packets, i, start_point):\r\n src_port = int(pkt[start_point:start_point + 4], 16)\r\n start_point += 4\r\n dest_port = int(pkt[start_point:start_point + 4], 16)\r\n start_point += 4\r\n length = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n checksum_value = pkt[start_point:start_point+4]\r\n packets[i][2].append(src_port)\r\n packets[i][2].append(dest_port)\r\n packets[i][2].append(length)\r\n packets[i][2].append(checksum_value)\r\n\r\n return packets", "def MatchIpAddressInArpPackets(self):\n return self._get_attribute('matchIpAddressInArpPackets')", "def group_packets(self, packets):\n sessions = packets.sessions() # groups connections from X to Y as a Scapy PacketList in a dict\n # example: dict['TCP 172.217.17.102:443 > 10.7.2.60:38386'] = PacketList\n\n session_keys = list(sessions.keys()) # force copy so we can alter the dictionary at runtime\n for key in session_keys:\n reversed_key = self.reverse_dict_key(key)\n if(reversed_key != key and sessions.__contains__(reversed_key)):\n sessions[key] += sessions.pop(reversed_key)\n session_keys.remove(reversed_key)\n\n return self.sort_grouped_packets(list(sessions.values()))", "def get_connections(self, kind='inet'):\r\n # Note: in case of UNIX sockets we're only able to determine the\r\n # local bound path while the remote endpoint is not retrievable:\r\n # http://goo.gl/R3GHM\r\n inodes = {}\r\n # os.listdir() is gonna raise a lot of access denied\r\n # exceptions in case of unprivileged user; that's fine:\r\n # lsof does the same so it's unlikely that we can to better.\r\n for fd in os.listdir(\"/proc/%s/fd\" % self.pid):\r\n try:\r\n inode = os.readlink(\"/proc/%s/fd/%s\" % (self.pid, fd))\r\n except OSError:\r\n continue\r\n if inode.startswith('socket:['):\r\n # the process is using a socket\r\n inode = inode[8:][:-1]\r\n inodes[inode] = fd\r\n\r\n if not inodes:\r\n # no connections for this process\r\n return []\r\n\r\n def process(fin, family, type_):\r\n retlist = []\r\n try:\r\n f = open(fin, 'r')\r\n except IOError:\r\n # IPv6 not supported on this platform\r\n err = sys.exc_info()[1]\r\n if err.errno == errno.ENOENT and fin.endswith('6'):\r\n return []\r\n else:\r\n raise\r\n try:\r\n f.readline() # skip the first line\r\n for line in f:\r\n # IPv4 / IPv6\r\n if family in (socket.AF_INET, socket.AF_INET6):\r\n _, laddr, raddr, status, _, _, _, _, _, inode = \\\r\n line.split()[:10]\r\n if inode in inodes:\r\n laddr = self._decode_address(laddr, family)\r\n raddr = self._decode_address(raddr, family)\r\n if type_ == socket.SOCK_STREAM:\r\n status = _TCP_STATES_TABLE[status]\r\n else:\r\n status = \"\"\r\n fd = int(inodes[inode])\r\n conn = nt_connection(fd, family, type_, laddr,\r\n raddr, status)\r\n retlist.append(conn)\r\n elif family == socket.AF_UNIX:\r\n tokens = line.split()\r\n _, _, _, _, type_, _, inode = tokens[0:7]\r\n if inode in inodes:\r\n\r\n if len(tokens) == 8:\r\n path = tokens[-1]\r\n else:\r\n path = \"\"\r\n fd = int(inodes[inode])\r\n type_ = int(type_)\r\n conn = nt_connection(fd, family, type_, path,\r\n None, \"\")\r\n retlist.append(conn)\r\n else:\r\n raise ValueError(family)\r\n return retlist\r\n finally:\r\n f.close()\r\n\r\n tcp4 = (\"tcp\" , socket.AF_INET , socket.SOCK_STREAM)\r\n tcp6 = (\"tcp6\", socket.AF_INET6, socket.SOCK_STREAM)\r\n udp4 = (\"udp\" , socket.AF_INET , socket.SOCK_DGRAM)\r\n udp6 = (\"udp6\", socket.AF_INET6, socket.SOCK_DGRAM)\r\n unix = (\"unix\", socket.AF_UNIX, None)\r\n\r\n tmap = {\r\n \"all\" : (tcp4, tcp6, udp4, udp6, unix),\r\n \"tcp\" : (tcp4, tcp6),\r\n \"tcp4\" : (tcp4,),\r\n \"tcp6\" : (tcp6,),\r\n \"udp\" : (udp4, udp6),\r\n \"udp4\" : (udp4,),\r\n \"udp6\" : (udp6,),\r\n \"unix\" : (unix,),\r\n \"inet\" : (tcp4, tcp6, udp4, udp6),\r\n \"inet4\": (tcp4, udp4),\r\n \"inet6\": (tcp6, udp6),\r\n }\r\n if kind not in tmap:\r\n raise ValueError(\"invalid %r kind argument; choose between %s\"\r\n % (kind, ', '.join([repr(x) for x in tmap])))\r\n ret = []\r\n for f, family, type_ in tmap[kind]:\r\n ret += process(\"/proc/net/%s\" % f, family, type_)\r\n # raise NSP if the process disappeared on us\r\n os.stat('/proc/%s' % self.pid)\r\n return ret", "def __iter__(self) -> Iterator[packets.Packet]:\n for packet in self._packets:\n yield packet\n for pointer in self._packet_pointers:\n yield pointer.get()", "def sniff_packets(iface=None):\n if iface: # (http)\n sniff(filter=\"port 80\", prn=process_packet, iface=iface, store=False)\n # 'process_packet' is the callback\n else:\n sniff(filter=\"port 80\", prn=process_packet, store=False)\n # default interface", "def start_pcap(host, pcap_file_name, interface, pcap_args='',\n func_ip=None, tool_path=None):\n with LydianClient(_get_host_ip(host, func_ip)) as client:\n client.pcap.start_pcap(pcap_file_name, interface, pcap_args, tool_path)", "def sniff_traffic(hs, count, timeout, traffic_type, pkt_type, exp_dst, step):\n iface = hs.ports['eth1']\n step('Scapy capture started')\n if (traffic_type == \"encap\"):\n packet = hs.libs.scapy.sniff2(\"iface='{}', count={}, timeout={}, \"\n \" filter='port 4789 and (!icmp or !ip6)', \"\n \" prn=lambda x:x.show()\".format(\n iface, count, timeout), True)\n parse_packet(packet, traffic_type, pkt_type, exp_dst, step=step)\n elif (traffic_type == \"decap\"):\n packet = hs.libs.scapy.sniff2(\"iface='{}', count={}, \"\n \" timeout={}, filter='!icmp or !ip6', \"\n \" prn=lambda x:x.show()\".format(\n iface, count, timeout), True)\n parse_packet(packet, traffic_type, pkt_type, exp_dst, step=step)", "def offline_pcap_input_to_pipe(pcap=None, p=None, quick=False):\n\n tdm_cnt = 0\n\n print(\"Offline mode: Reading TDMs from PCAP/PCAPNG file and writing to pipe.\")\n pkt_list = rdpcap(pcap) # read the PCAP/PCAPNG file and return a list of packets\n\n # Parse the Packet List for TmNS Data Messages (TDMs), and write them to the binary TDM file\n print(\"Named Pipe '{0}' has been opened for writing. Waiting for Pipe Reader to connect.\".format(p))\n pipeout = open(p, 'wb')\n print(\"Connected to Named Pipe '{0}'. Writing binary TDMs into pipe.\".format(p))\n\n delta_from_current_time = time.time() - pkt_list[0].time\n\n try:\n for pkt in pkt_list:\n if pkt[UDP].dport == TDM_PORT:\n if quick is False:\n while (pkt.time + delta_from_current_time) > time.time():\n sleep(0.0001)\n pipeout.write(bytes(pkt[UDP].payload))\n tdm_cnt += 1\n print(\"\\rTDM Count: {0}\".format(tdm_cnt), end=\" \")\n pipeout.close()\n except IOError as e:\n if e.errno == errno.EPIPE:\n print(\"\\nBroken Pipe: EPIPE\")\n print(\"\")\n\n if tdm_cnt == 0:\n print(\"ZERO TmNS Data Messages found in {0}. No data written to {1}.\".format(pcap, p))\n else:\n print(\"\\nThere were {0} TmNS Data Messages written to {1}.\".format(tdm_cnt, p))", "def iterate(cls, disc, track_number):\n\n assert track_number >= 0 and track_number < len(disc.tracks)\n\n track = disc.tracks[track_number]\n\n packet_frame_size = (\n disc.audio_format.rate / cls.PACKETS_PER_SECOND)\n\n # Mock up a packet that ends at the start of index 1, so the\n # first packet generated starts at that position\n p = cls(disc, track, track_number, track.pregap_offset, 0)\n\n while True:\n # Calculate offsets of next packet\n abs_pos = p.abs_pos + p.length\n\n if abs_pos < track.pregap_offset:\n length = min(track.pregap_offset - abs_pos, packet_frame_size)\n else:\n length = min(track.length - abs_pos, packet_frame_size)\n\n assert length >= 0\n\n if length == 0:\n # Reached end of track, switch to next. Simplify this\n # code by generating a dummy packet for the next\n # iteration to work on (but don't yield it!)\n\n track_number += 1\n\n try:\n track = disc.tracks[track_number]\n except IndexError:\n # That was the last track, no more packets\n return\n\n p = cls(disc, track, track_number, 0, 0)\n\n else:\n # Generate next packet\n flags = 0\n if (track.pause_after\n and abs_pos + length == track.length\n and track_number + 1 < len(disc.tracks)):\n flags |= p.PAUSE_AFTER\n\n p = cls(disc, track, track_number, abs_pos, length, flags)\n yield p", "def IperfTCP(target_src, target_dst, dst, length, window=None):\n iperf = IperfSet(target_src, target_dst, dst)\n iperf.Start(length, None, window)\n return iperf.Results()", "def _process(self, buf, ts=None, pkt_num=None):\n\n if not buf:\n return\n self.pkt_num = pkt_num\n eth = dpkt.ethernet.Ethernet(buf)\n ip = eth.data\n tcp = ip.data\n sip = inet_to_str(ip.src)\n dip = inet_to_str(ip.dst)\n fin_flag = tcp.flags & 0x001\n ack_flag = tcp.flags & 0x010\n syn_flag = tcp.flags & 0x002\n rst_flag = tcp.flags & 0x004\n syn_unacceptable_states = [TCPState.ESTABLISHED, TCPState.FIN_WAIT_1, TCPState.FIN_WAIT_2,\n TCPState.CLOSING, TCPState.LAST_ACK]\n data_acceptable_states = [TCPState.ESTABLISHED, TCPState.CLOSE_WAIT]\n tcp_opts = dpkt.tcp.parse_opts(tcp.opts) if tcp.opts else None\n tcp_opts = tcp_opts_tuple_list_to_dict(tcp_opts) if tcp_opts else None\n num_pkt_session_pkt = len(self.sessions[self.session_count]) if self.session_count else 0\n\n # Only Window size can change in ACKs (in other words - after SYNs), nothing else like - window-scaling, or\n # MSS, or Selective-SYN can't be changed. If present in options after SYN, should be ignored in my opinion\n # https://superuser.com/questions/966212/does-the-sequence-number-of-tcp-packet-headers-wrap-around\n # TODO: seq number in coming packet is ahead of the expected one, then it should be held for processing\n\n def slide_window():\n\n if len(self.sessions[self.session_count]):\n if sip == self.sip:\n if self._s_mss != -1 and get_tcp_packet_payload_len_with_options(eth) > self._s_mss:\n return\n prev_ip = dpkt.ethernet.Ethernet(self.get_last_c_pkt()).data\n rcv_nxt = self._s_rcv_next\n win_left_end = self._s_win_left_edge\n early_pkts = self._s_early_pkts\n other_end_win_size = self._s_win_size\n current_state = self._c_state\n else:\n if self._c_mss != -1 and get_tcp_packet_payload_len_with_options(ip) > self._c_mss:\n return\n prev_ip = dpkt.ethernet.Ethernet(self.get_last_s_pkt()).data\n rcv_nxt = self._c_rcv_next\n win_left_end = self._c_win_left_edge\n early_pkts = self._c_early_pkts\n other_end_win_size = self._c_win_size\n current_state = self._s_state\n if self._print_debug_info:\n logger.debug(self.client_server_next_rcv(), tcp_pkt_debug_info(ip))\n prev_tcp = prev_ip.data\n prev_tcp_data_offset = prev_tcp.off * 4\n prev_ip_header_len = prev_ip.hl * 4\n prev_tcp_payload_len = prev_ip.len - (prev_tcp_data_offset + prev_ip_header_len)\n tcp_payload_len = get_tcp_packet_payload_len(ip)\n if (tcp_seq_number_in_window(win_left_end, tcp.seq, other_end_win_size) or\n tcp_seq_number_in_window(win_left_end,\n inc_tcp_seq_number(tcp.seq, tcp_payload_len), other_end_win_size)):\n if inc_tcp_seq_number(tcp.seq, tcp_payload_len) == rcv_nxt:\n \"\"\"\n \n Since there is no new payload sent, just store the tcp packet with empty payload.\n This is going to increase the packet count but not going to add duplicated data\n in session data, by session data here it means actual data sent (after discarding\n the retransmission) to application layer. To do that - we will empty out the payload,\n if packets has some, then add the packet to the session, else add the empty packet as it is\n to the session. This logic will easily handle the TCP connections supporting\n TCP Timestamp options describe in https://tools.ietf.org/html/rfc1323\n \n \"\"\"\n # one case is when seq number is < rcv_nxt but sender want to ack more data\n # which means it is sending the same data again but its acking more received content\n \"\"\"\n 1. packet has Data\n a. prev_packet has data\n A. header change (change cur packet and change previous packet) add to list\n B. no header change retransmission ( sum check)\n b. prev_packete has no data\n A. header change (change cur packet only) add to list\n B. no header change retransmission (change cur packet only)\n 2. packet has no data\n a. prev_packet has data\n A. header change (change previous packet only) add to list\n B. no header change (change previous packet only)\n b. prev_packet has no data\n A. header change (sum check) add to list\n B. no header change retransmission (sum check)\n \"\"\"\n if prev_tcp.sum == tcp.sum:\n cur_sum = tcp_shasum_calc(ip.src, ip.dst, ip.p, ip.data.pack())\n prev_sum = tcp_shasum_calc(prev_ip.src, prev_ip.dst, prev_ip.p, prev_ip.data.pack())\n if cur_sum == prev_sum:\n # covers 1.a.B and 2.b.B\n return\n\n empty_prev_ip = copy.deepcopy(prev_ip)\n empty_prev_tcp = empty_prev_ip.data\n empty_prev_tcp.seq = rcv_nxt\n empty_prev_ip.len -= prev_tcp_payload_len\n empty_prev_tcp.data = b\"\"\n empty_prev_ip = tcp_fix_checksum(empty_prev_ip)\n new_part_ip = copy.deepcopy(ip)\n new_part_tcp = new_part_ip.data\n new_part_tcp.data = b\"\"\n new_part_tcp.seq = rcv_nxt\n new_part_ip.len -= tcp_payload_len\n new_part_ip.sum = 0\n new_part_tcp.sum = 0\n new_part_ip = tcp_fix_checksum(new_part_ip)\n eth.data = new_part_ip\n cur_pkt = eth.pack()\n new_pkt = dpkt.ethernet.Ethernet(cur_pkt)\n new_part_ip = new_pkt.data\n new_part_tcp = new_part_ip.data\n\n \"\"\"\n Checksum comparision logic is kept to discard the straight duplicates packets\n without Timestamp Options. These kind of packet will not serve any purposes.\n If removal of these checksum comparison code blocks felt necessary, it could\n be removed -- that will add few extra retransmitted packets -- but that would\n also requrie to update the testcases built around this code blocks.\n \"\"\"\n if new_part_tcp.sum == empty_prev_tcp.sum:\n # covers 1.b.B\n # covers case 2.a.B\n if tcp_shasum_calc(ip.src, ip.dst, ip.p, ip.data.pack()) == tcp_shasum_calc(\n prev_ip.src, prev_ip.dst, prev_ip.p, empty_prev_ip.data.pack()):\n return\n \"\"\"\n needs to added to list under cases 2.a.A, 2.b.A, 1.a.A and 1.b.A\n cur_pkt is updated earlier\n \"\"\"\n if sip == self.sip:\n if inc_tcp_seq_number(self._c_rcv_next, 1) <= new_part_tcp.ack:\n self._c_rcv_next = new_part_tcp.ack\n else:\n if inc_tcp_seq_number(self._s_rcv_next, 1) <= new_part_tcp.ack:\n self._s_rcv_next = new_part_tcp.ack\n elif (current_state in data_acceptable_states and\n tcp_seq_number_in_window(tcp.seq, rcv_nxt, tcp_payload_len)):\n stale_data_len = seq_numbers_diff(tcp.seq, rcv_nxt)\n win_right_end = inc_tcp_seq_number(win_left_end, other_end_win_size)\n if tcp_seq_number_in_window(rcv_nxt, inc_tcp_seq_number(tcp.seq, tcp_payload_len),\n seq_numbers_diff(rcv_nxt, win_right_end)):\n tcp.data = tcp.data[stale_data_len:]\n else:\n allowed_payload_size = seq_numbers_diff(rcv_nxt, win_right_end)\n remaining_eth = dpkt.ethernet.Ethernet(eth.pack())\n #remaining_ip = eth.data\n #remaining_tcp = remaining_ip.data\n remaining_eth.data.data.seq = inc_tcp_seq_number(tcp.seq, stale_data_len + allowed_payload_size)\n remaining_eth.data.data.data = tcp.data[stale_data_len + allowed_payload_size:]\n remaining_eth.data.len -= stale_data_len + allowed_payload_size\n remaining_eth.data = tcp_fix_checksum(remaining_eth.data)\n #remaining_eth.data = remaining_ip\n tcp.data = tcp.data[stale_data_len: stale_data_len + allowed_payload_size]\n if self.sip == sip:\n self._s_early_pkts.append(((ts, self.pkt_num), remaining_eth.pack()))\n else:\n self._c_early_pkts.append(((ts, self.pkt_num), remaining_eth.pack()))\n tcp.sum = 0\n # ip.len -= stale_data_len\n tcp.seq = rcv_nxt\n ip.data = tcp\n ip.sum = 0\n eth.data = ip\n cur_pkt = eth.pack()\n if sip == self.sip:\n self._s_rcv_next = inc_tcp_seq_number(self._s_rcv_next,\n (ip.len - (ip.hl * 4 + tcp.off * 4)))\n else:\n self._c_rcv_next = inc_tcp_seq_number(self._c_rcv_next,\n (ip.len - (ip.hl * 4 + tcp.off * 4)))\n elif (current_state in data_acceptable_states and\n tcp_seq_number_in_window(rcv_nxt, tcp.seq, other_end_win_size)):\n # hold it for further processing\n if self.sip == sip:\n self._s_early_pkts.append(((ts, self.pkt_num), buf))\n else:\n self._c_early_pkts.append(((ts, self.pkt_num), buf))\n return\n else:\n return\n self.sessions[self.session_count].append(((ts, self.pkt_num), cur_pkt))\n # as this packet is accepted, might need to update the rwnd size and left end of rwnd\n if sip == self.sip:\n self._c_payload_size += len(eth.data.data.data)\n logger.debug(\"Client send data size: {}. Accepted data size is: {}.\"\n \" Total data sent from client is: {}\".format(\n len(tcp.data), len(eth.data.data.data), self._c_payload_size))\n self._c_prev_pkt_ind = len(self.sessions[self.session_count]) - 1\n rcv_nxt = self._s_rcv_next\n if (not tcp.ack == self._c_win_left_edge and\n tcp_seq_number_in_window(inc_tcp_seq_number(self._c_win_left_edge, 1),\n tcp.ack, self._c_win_size)):\n self._c_win_left_edge = tcp.ack\n self._c_win_size = tcp.win << self._c_win_scaling_factor\n else:\n self._s_payload_size += len(eth.data.data.data)\n logger.debug(\"Server send data of size: {}. Accepted data size is: {}.\"\n \" Total data sent from server is: {}\".format(\n len(tcp.data), len(eth.data.data.data), self._s_payload_size))\n self._s_prev_pkt_ind = len(self.sessions[self.session_count]) - 1\n rcv_nxt = self._c_rcv_next\n # left edge is incremented by one becuase in_window function checks for inclusive seq number\n # starting at left edge but ACK tells what's the next expected seq number, which could be 1 next\n # to the end of window\n if (not tcp.ack == self._s_win_left_edge and\n tcp_seq_number_in_window(inc_tcp_seq_number(self._s_win_left_edge, 1),\n tcp.ack, self._s_win_size)):\n self._s_win_left_edge = tcp.ack\n self._s_win_size = tcp.win << self._s_win_scaling_factor\n # check if packet at the head of queue is ready to be processed\n while True:\n if len(early_pkts) == 0:\n break\n (_ts, _pkt_num), _buf = early_pkts.popleft()\n early_eth = dpkt.ethernet.Ethernet(_buf)\n early_ip = early_eth.data\n early_tcp = early_ip.data\n if tcp_seq_number_in_window(early_tcp.seq, rcv_nxt, get_tcp_packet_payload_len(early_ip)):\n # if early_tcp.seq <= rcv_nxt:\n self._process(early_eth.pack(), _ts, _pkt_num)\n else:\n early_pkts.appendleft(((_ts, _pkt_num), early_eth.pack()))\n break\n\n \"\"\"\n TCP flags:0x000 (12 bits)\n [11 10 9 8 7 6 5 4 3 2 1 0]\n - Bit 11 10 9: reserved\n - Bit 8: nonce\n - Bit 7: CWR (Congestion window reduced)\n - Bit 6: ECN-Echo (Explicit Congestion Notification)\n - Bit 5: Urgent\n - Bit 4: ACK\n - Bit 3: Push\n - Bit 2: Reset\n - Bit 1: SYN\n - Bit 0: FIN\n \"\"\"\n\n \"\"\"TCP flags for SYN [000000010111]\"\"\"\n\n prev_c_pkt = dpkt.ethernet.Ethernet(self.get_last_c_pkt()) if self.get_last_c_pkt() else None\n prev_c_tcp = prev_c_pkt.data.data if prev_c_pkt else None\n prev_s_pkt = dpkt.ethernet.Ethernet(self.get_last_s_pkt()) if self.get_last_s_pkt() else None\n prev_s_tcp = prev_s_pkt.data.data if prev_s_pkt else None\n logger.debug(tcp_pkt_debug_info(ip))\n logger.debug(tcp_pkt_options_debug_info(tcp))\n logger.debug(\"Processing packet number: {} in the current session\".format(self.pkt_num))\n if rst_flag:\n logger.info(\"Received a RESET flag, packet info: {}\".format(tcp_pkt_debug_info(ip)))\n logger.info(\"TCP state before processing of packet: {}\".format(self.get_printable_state()))\n if self._c_state == TCPState.CLOSED and self._s_state == TCPState.LISTENING:\n self.session_count += 1\n self.sessions[self.session_count] = [((ts, self.pkt_num), buf)]\n self._c_state = self._s_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n return\n self._c_state = self._s_state = TCPState.CLOSED\n if self.sip == sip:\n self._c_prev_pkt_ind = len(self.sessions[self.session_count])\n else:\n self._s_prev_pkt_ind = len(self.sessions[self.session_count])\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif syn_flag and (self._c_state in syn_unacceptable_states or self._s_state in syn_unacceptable_states):\n logger.info(\"Received a unacceptable SYN flag, packet info: {}\".format(tcp_pkt_debug_info(ip)))\n logger.info(\"TCP state before processing of packet: {}\".format(self.get_printable_state()))\n self._s_state = self._c_state = TCPState.CLOSED\n self.sessions[self.session_count].append(((ts,self.pkt_num), buf))\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif (self._c_state == TCPState.CLOSED and self._s_state == TCPState.LISTENING and\n self.sip == sip):\n if tcp.flags & 0x017 == 0x002:\n self.session_count += 1\n logger.info(\"number of sessions so far: {}\".format(self.session_count - 1))\n logger.info(\"starting a new session, pkt info: {}\".format(tcp_pkt_debug_info(ip)))\n logger.info(\"TCP state before processing of packet: {}\".format(self.get_printable_state()))\n self.sessions[self.session_count] = []\n self._c_prev_pkt_ind = len(self.sessions[self.session_count])\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n self._c_state = TCPState.SYN_SENT\n self._s_state = TCPState.SYN_RECEIVED\n self._c_seq = tcp.seq\n if tcp_opts:\n if dpkt.tcp.TCP_OPT_WSCALE in tcp_opts:\n self._c_win_scaling_factor = int.from_bytes(tcp_opts[dpkt.tcp.TCP_OPT_WSCALE], \"big\")\n if dpkt.tcp.TCP_OPT_MSS in tcp_opts:\n self._c_mss = int.from_bytes(tcp_opts[dpkt.tcp.TCP_OPT_MSS], \"big\")\n else:\n self._c_win_scaling_factor = 0\n self._c_mss = -1\n self._c_win_size = tcp.win << self._c_win_scaling_factor\n logger.info(\"SYN flag from: {}:{}. Full TCP Flag is: {}\".format(self.sip, self.sp, hex(tcp.flags)))\n logger.info(\"TCP options in the packet: {}\".format(tcp_pkt_options_debug_info(tcp)))\n\n elif self._c_state == TCPState.SYN_SENT and self._s_state == TCPState.SYN_RECEIVED:\n logger.info(\"TCP packet info: {}\".format(tcp_pkt_debug_info(ip)))\n logger.info(\"TCP state before processing of packet: {}\".format(self.get_printable_state()))\n if self.sip == dip:\n exp_ack = inc_tcp_seq_number(prev_c_tcp.seq, 1)\n if not (tcp.flags & 0x017 == 0x012):\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n self._s_state = self._c_state = TCPState.CLOSED\n logger.info(\"SYN-ACK flag is not set in the TCP flags: {} from: {}:{}\".format(hex(tcp.flags),\n self.dip, self.dp))\n return\n if tcp.ack == exp_ack:\n self._s_prev_pkt_ind = len(self.sessions[self.session_count])\n self._s_rcv_next = exp_ack\n self._s_win_left_edge = exp_ack\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n if tcp_opts:\n if dpkt.tcp.TCP_OPT_WSCALE in tcp_opts:\n self._s_win_scaling_factor = int.from_bytes(tcp_opts[dpkt.tcp.TCP_OPT_WSCALE], \"big\")\n if dpkt.tcp.TCP_OPT_MSS in tcp_opts:\n self._s_mss = int.from_bytes(tcp_opts[dpkt.tcp.TCP_OPT_MSS], \"big\")\n else:\n self._s_win_scaling_factor = 0\n self._s_mss = -1\n self._s_win_size = tcp.win << self._s_win_scaling_factor\n logger.info(\"SYN-ACK flag from: {}:{}. Full TCP flag is: {}\".format(\n self.dip, self.dp, hex(tcp.flags)))\n logger.info(\"TCP options in the packet: {}\".format(tcp_pkt_options_debug_info(tcp)))\n elif prev_s_tcp:\n exp_ack = inc_tcp_seq_number(prev_s_tcp.seq, 1)\n if tcp.flags & 0x017 == 0x010:\n if tcp.ack == exp_ack and tcp.seq == prev_s_tcp.ack:\n self._s_state = self._c_state = TCPState.ESTABLISHED\n self._c_seq = tcp.seq\n self._c_prev_pkt_ind = len(self.sessions[self.session_count])\n self._c_rcv_next = exp_ack\n self._c_win_left_edge = exp_ack\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n self._c_win_size = tcp.win << self._c_win_scaling_factor\n logger.info(\"TCP handshake complete.\")\n else:\n self._s_state = self._c_state = TCPState.CLOSED\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n logger.info(\"TCP handshake was not completed.\")\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._c_state == TCPState.ESTABLISHED and self._s_state == TCPState.ESTABLISHED:\n if ack_flag:\n \"\"\" if ACK flag is off drop the segment as per:\n https://tools.ietf.org/html/rfc793#page-37\n \"\"\"\n logger.debug(tcp_pkt_debug_info(ip))\n logger.debug(tcp_pkt_options_debug_info(tcp))\n num_pkt_session_pkt = len(self.sessions[self.session_count])\n slide_window()\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and fin_flag:\n logger.info(\"Received a FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n if self.sip == sip:\n self._c_state = TCPState.FIN_WAIT_1\n else:\n self._s_state = TCPState.FIN_WAIT_1\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._c_state == TCPState.FIN_WAIT_1 and self._s_state == TCPState.ESTABLISHED:\n if ack_flag:\n slide_window()\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and sip == self.dip:\n if inc_tcp_seq_number(prev_c_tcp.seq, max(get_tcp_packet_payload_len(prev_c_pkt), 1)) == tcp.ack:\n logger.info(\"Received a ACK for FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._c_state = TCPState.FIN_WAIT_2\n self._s_state = TCPState.CLOSE_WAIT\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n if fin_flag:\n logger.info(\"Received FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n if self._c_state == TCPState.FIN_WAIT_1:\n self._s_state = self._c_state = TCPState.CLOSING\n else:\n self._s_state = TCPState.LAST_ACK\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._s_state == TCPState.FIN_WAIT_1 and self._c_state == TCPState.ESTABLISHED:\n if ack_flag:\n slide_window()\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and sip == self.sip:\n if inc_tcp_seq_number(prev_s_tcp.seq, max(get_tcp_packet_payload_len(prev_s_pkt), 1)) == tcp.ack:\n logger.info(\"Received a ACK for FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._s_state = TCPState.FIN_WAIT_2\n self._c_state = TCPState.CLOSE_WAIT\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n if fin_flag:\n logger.info(\"Received FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n if self._s_state == TCPState.FIN_WAIT_1:\n self._s_state = self._c_state = TCPState.CLOSING\n else:\n self._c_state = TCPState.LAST_ACK\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._c_state == TCPState.FIN_WAIT_2:\n if sip == self.sip:\n if ack_flag:\n slide_window()\n if self._s_state == TCPState.LAST_ACK:\n if (num_pkt_session_pkt < len(self.sessions[self.session_count]) and\n inc_tcp_seq_number(prev_s_tcp.seq,\n max(get_tcp_packet_payload_len(prev_s_pkt), 1)) == tcp.ack):\n logger.info(\"ACKed FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._c_state = self._s_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n else:\n if self._s_state == TCPState.CLOSE_WAIT and ack_flag:\n slide_window()\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and fin_flag:\n logger.info(\"Received FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._s_state = TCPState.LAST_ACK\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._s_state == TCPState.FIN_WAIT_2:\n if sip == self.dip:\n if ack_flag:\n slide_window()\n if (self._c_state == TCPState.LAST_ACK and\n num_pkt_session_pkt < len(self.sessions[self.session_count]) and\n inc_tcp_seq_number(prev_c_tcp.seq,\n max(get_tcp_packet_payload_len(prev_c_pkt), 1)) == tcp.ack):\n logger.info(\"ACKed FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._s_state = self._c_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n else:\n if self._c_state == TCPState.CLOSE_WAIT and ack_flag:\n slide_window()\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and fin_flag:\n logger.info(\"Received FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._c_state = TCPState.LAST_ACK\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._c_state == TCPState.CLOSING or self._s_state == TCPState.CLOSING:\n if ack_flag:\n slide_window()\n if sip == self.sip and num_pkt_session_pkt < len(self.sessions[self.session_count]):\n if inc_tcp_seq_number(ack_flag and prev_s_tcp.seq, 1) == tcp.ack:\n logger.info(\"ACKed FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._s_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n else:\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and \\\n inc_tcp_seq_number(ack_flag and prev_c_tcp.seq, 1) == tcp.ack:\n logger.info(\"ACKed FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._c_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n else:\n logger.info(\"Packet didn't match any valid state: {}\".format(tcp_pkt_debug_info(ip)))\n #self._s_state = self._c_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n logger.debug(self.get_printable_state())", "def start_capture(self, interface, count=0, ptype='', pfilter=None,\n callback=None, callback_args=None, save_dump_file=False,\n save_dump_filename=None):\n tcpd = (self.packet_captures[interface]\n if interface in self.packet_captures\n else TCPDump())\n \"\"\" :type: TCPDump \"\"\"\n\n self.LOG.debug('Starting tcpdump on host: ' + self.name)\n\n old_log = self.cli.log_cmd\n if self.debug:\n self.cli.log_cmd = True\n\n tcpd.start_capture(cli=self.cli, interface=interface, count=count,\n packet_type=ptype, pcap_filter=pfilter,\n callback=callback, callback_args=callback_args,\n save_dump_file=save_dump_file,\n save_dump_filename=save_dump_filename,\n blocking=False)\n\n self.cli.log_cmd = old_log\n\n self.packet_captures[interface] = tcpd", "def tcp_pkt_debug_info(pkt: dpkt.ip.IP) -> str:\n if isinstance(pkt, dpkt.ip.IP):\n paylod_len = pkt.len - (4 * pkt.hl) - (4 * pkt.data.off)\n return \"{}:{}-> {}:{}, seq: {}, ack:{}, flag:{}, payload len: {}, payload: {}, sum: {}\".format(\n inet_to_str(pkt.src), pkt.data.sport, inet_to_str(pkt.dst), pkt.data.dport, hex(pkt.data.seq),\n hex(pkt.data.ack), hex(pkt.data.flags), hex(paylod_len), pkt.data.data, hex(pkt.data.sum))", "def process_pkt_from_client(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, fin_flag):\n if acks[saddr, sport, daddr, dport][co.S2C] >= 0:\n conn_id = acks[saddr, sport, daddr, dport][co.CONN_ID]\n connections[conn_id].flow.attr[co.S2C][co.TIME_LAST_ACK_TCP] = ts_delta\n if fin_flag:\n connections[conn_id].flow.attr[co.S2C][co.TIME_FIN_ACK_TCP] = ts_delta\n\n bytes_acked = (tcp.ack - acks[saddr, sport, daddr, dport][co.S2C]) % 4294967296\n if bytes_acked >= 2000000000:\n # Ack of 2GB or more is just not possible here\n return\n\n increment_value_dict(nb_acks[co.S2C][conn_id], bytes_acked)\n size_payload = ip.len - ip.hl * 4 - tcp.off * 4\n\n # If SOCKS command\n if size_payload == 7 and connections[conn_id].attr.get(co.SOCKS_PORT, None) is None:\n crypted_socks_cmd = tcp.data\n # This is possible because of packet stripping\n if len(crypted_socks_cmd) == 7:\n decrypted_socks_cmd = socks_parser.decode(crypted_socks_cmd)\n if decrypted_socks_cmd[0] == b'\\x01': # Connect\n connections[conn_id].attr[co.SOCKS_DADDR] = socks_parser.get_ip_address(decrypted_socks_cmd)\n connections[conn_id].attr[co.SOCKS_PORT] = socks_parser.get_port_number(decrypted_socks_cmd)\n\n if size_payload > 0 and tcp.seq in acks[saddr, sport, daddr, dport][SEQ_C2S]:\n # This is a retransmission! (take into account the seq overflow)\n connections[conn_id].flow.attr[co.C2S][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = ts_delta\n connections[conn_id].flow.attr[co.C2S][co.TIMESTAMP_RETRANS].append((ts_delta,\n ts_delta - acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq][0],\n ts_delta - acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq][1],\n ts_delta - acks[saddr, sport, daddr, dport][co.TIMESTAMP][CLIENT]))\n acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq][1] = ts_delta\n elif size_payload > 0:\n acks[saddr, sport, daddr, dport][SEQ_C2S].add(tcp.seq)\n connections[conn_id].flow.attr[co.C2S][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = ts_delta\n connections[conn_id].flow.attr[co.C2S][co.TIME_LAST_PAYLD_TCP] = ts_delta\n acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq] = [ts_delta, ts_delta]\n # Don't think will face this issue\n# if len(acks[saddr, sport, daddr, dport][SEQ][co.C2S]) >= 3000000:\n# for x in range(50000):\n# acks[saddr, sport, daddr, dport][SEQ][co.C2S].popleft()\n\n acks[saddr, sport, daddr, dport][co.S2C] = tcp.ack\n acks[saddr, sport, daddr, dport][co.TIMESTAMP][CLIENT] = ts_delta", "def query_sniff(pkt):\n if IP in pkt:\n ip_src = pkt[IP].src\n ip_dst = pkt[IP].dst\n\n if pkt.haslayer(DNS) and pkt.getlayer(DNS).qr == 0:\n domain = pkt.getlayer(DNS).qd.qname.decode(\"utf-8\")\n now = datetime.now()\n stored_dns_requests.update({datetime.timestamp(now): domain})\n print(\"SRC: {} - DST: {} : {}\".format(ip_src, ip_dst, domain))", "def _get_next_packet(self):\n raise NotImplementedError(\"Do not instantiate csvAbstractReader directly.\")", "def receive_captured_list(self):\n reply = self.socket.recv(4096)\n print(\"Pokemon capturados\")\n print(reply[1:].decode())", "def scan(ip):\n arp_request = scapy.ARP(pdst=ip)\n broadcast = scapy.Ether(dst=\"ff:ff:ff:ff:ff:ff\")\n arp_request_broadcast = broadcast/arp_request\n answered_list = scapy.srp(arp_request_broadcast , timeout = 1, verbose=False)[0]\n target_list=[]\n for element in answered_list:\n target_dict = {\"ip\":element[1].psrc, \"mac\":element[1].hwsrc}\n target_list.append(target_dict)\n return target_list", "def processpacket(p):\n\n\tglobal SynSentToTCPService\n\tglobal SynAckSentToTCPClient\n\tglobal LiveTCPService\n\tglobal LiveTCPClient\n\tglobal LiveUDPService\n\tglobal LiveUDPClient\n\tglobal NmapServerDescription\n\tglobal ManualServerDescription\n\tglobal ClientDescription\n\tglobal MacAddr\n\tglobal OSDescription\n\tglobal ServiceFPs\n\tglobal SipPhoneMatch\n\tglobal Devel\n\tglobal IsRouter\n\tglobal DNSRecord\n\tglobal HostIPs\n\n\tif (type(p) == Dot3) and (type(p['LLC']) == LLC):\n\t\tUnhandledPacket(p)\n\t\t#Spanning Tree Protocol\n\t\t#Debug(\"802.3\")\n\t\t#p.show()\n\t\t#print type(p['LLC'])\n\telif (p['Ethernet'] == None):\n\t\tDebug(\"non-ethernet packet\")\t\t#Need more details on how to handle.\n\t\tUnhandledPacket(p)\n\t\t#p.show()\n\t\t#print type(p)\n\t\t#quit()\n\telif p['Ethernet'].type == 0x0806:\t\t#ARP\n\t\t#pull arp data from here instead of tcp/udp packets, as these are all local\n\t\tif (p['ARP'].op == 1):\t\t\t#1 is request (\"who-has\")\n\t\t\tpass\n\t\tif (p['ARP'].op == 2):\t\t\t#2 is reply (\"is-at\")\n\t\t\tif (p['ARP.psrc'] != None) and (p['ARP.hwsrc'] != None):\n\t\t\t\tIPAddr=p['ARP.psrc']\n\t\t\t\tMyMac=p['ARP.hwsrc'].upper()\n\t\t\t\tif (not MacAddr.has_key(IPAddr)) or (MacAddr[IPAddr] != MyMac):\n\t\t\t\t\tReportId(\"MA\", IPAddr, 'Ethernet', MyMac, '')\n\t\t\telse:\n\t\t\t\tUnhandledPacket(p)\n\t\telse:\n\t\t\tUnhandledPacket(p)\n\telif p['Ethernet'].type == 0x0800:\t\t#IP\n\t\tsIP=str(p['IP'].src)\n\t\tdIP=str(p['IP'].dst)\n\t\t#Best to get these from arps instead; if we get them from here, we get router macs for foreign addresses.\n\t\t#if not MacAddr.has_key(sIP):\n\t\t#\tReportId(\"MA\", sIP, \"Ethernet\", p['Ethernet'].src, '')\n\t\t#if not MacAddr.has_key(dIP):\n\t\t#\tReportId(\"MA\", dIP, \"Ethernet\", p['Ethernet'].dst, '')\n\n\t\tif p['IP'].proto == 1:\t\t\t#ICMP\n\t\t\tType = p['ICMP'].type\n\t\t\tCode = p['ICMP'].code\n\n\t\t\tif (Type == 0):\t\t\t\t\t\t#Echo reply\n\t\t\t\tif (not(OSDescription.has_key(sIP))):\n\t\t\t\t\tReportId(\"IP\", sIP, \"IP\", \"live\", 'icmp echo reply')\n\t\t\telif (Type == 3) and (type(p[IPerror]) == IPerror):\t#Unreachable, check that we have an actual embedded packet\n\t\t\t\t#if (type(p[IPerror]) != IPerror):\n\t\t\t\t#\tp.show()\n\t\t\t\t#\tprint type(p[IPerror])\n\t\t\t\t#\tquit()\n\t\t\t\tOrigdIP = p[IPerror].dst\n\t\t\t\tif (Code == 0):\t\t\t\t\t#Net unreachable\n\t\t\t\t\tif (not(OSDescription.has_key(OrigdIP))):\n\t\t\t\t\t\tReportId(\"IP\", OrigdIP, \"IP\", \"dead\", 'net unreachable')\n\t\t\t\t\tif (not(IsRouter.has_key(sIP))):\n\t\t\t\t\t\tReportId(\"RO\", sIP, \"NetUn\", \"router\", \"\")\n\t\t\t\telif (Code == 1):\t\t\t\t#Host unreachable\n\t\t\t\t\tif (not(OSDescription.has_key(OrigdIP))):\n\t\t\t\t\t\tReportId(\"IP\", OrigdIP, \"IP\", \"dead\", 'host unreachable')\n\t\t\t\t\tif (not(IsRouter.has_key(sIP))):\n\t\t\t\t\t\tReportId(\"RO\", sIP, \"HostUn\", \"router\", \"\")\n\t\t\t\telif (Code == 3) and (p[IPerror].proto == 17):\t#Port unreachable and embedded protocol = 17, UDP, as it should be\n\t\t\t\t\tDNSServerLoc = p[IPerror].src + \",UDP_53\"\n\t\t\t\t\tif (p[UDPerror].sport == 53) and (ManualServerDescription.has_key(DNSServerLoc)) and (ManualServerDescription[DNSServerLoc] == \"dns/server\"):\n\t\t\t\t\t\t#If orig packet coming from 53 and coming from a dns server, don't do anything (closed port on client is a common effect)\n\t\t\t\t\t\t#Don't waste time on port unreachables going back to a dns server; too common, and ephemeral anyways.\n\t\t\t\t\t\tpass\n\t\t\t\t\telse:\n\t\t\t\t\t\t#If orig packet coming from something other than 53, or coming from 53 and NOT coming from a dns server, log as closed\n\t\t\t\t\t\tOrigDPort = str(p[UDPerror].dport)\n\t\t\t\t\t\tOrigDstService = OrigdIP + \",UDP_\" + OrigDPort\n\t\t\t\t\t\tif ((not LiveUDPService.has_key(OrigDstService)) or (LiveUDPService[OrigDstService] == True)):\n\t\t\t\t\t\t\tLiveUDPService[OrigDstService] = False\n\t\t\t\t\t\t\tReportId(\"US\", OrigdIP, \"UDP_\" + OrigDPort, \"closed\", \"port unreachable\")\n\t\t\t\telif (Code == 3) and (p[IPerror].proto == 6) and (p[TCPerror].dport == 113):\t#Port unreachable and embedded protocol = 6, TCP, which it shouldn't. May be the same firewall providing the TCP FR's\n\t\t\t\t\tpass\n\t\t\t\telif (Code == 6):\t\t\t\t#Net unknown\n\t\t\t\t\tif (not(OSDescription.has_key(OrigdIP))):\n\t\t\t\t\t\tReportId(\"IP\", OrigdIP, \"IP\", \"dead\", 'net unknown')\n\t\t\t\telif (Code == 7):\t\t\t\t#Host unknown\n\t\t\t\t\tif (not(OSDescription.has_key(OrigdIP))):\n\t\t\t\t\t\tReportId(\"IP\", OrigdIP, \"IP\", \"dead\", 'host unknown')\n\t\t\t\telif (Code == 9):\t\t\t\t#Network Administratively Prohibited\n\t\t\t\t\tpass\t\t\t\t\t#Can't tell much from this type of traffic. Possibly list as firewall?\n\t\t\t\telif (Code == 10):\t\t\t\t#Host Administratively Prohibited\n\t\t\t\t\tpass\n\t\t\t\telif (Code == 11):\t\t\t\t#Network unreachable for TOS\n\t\t\t\t\tpass\n\t\t\t\telif (Code == 12):\t\t\t\t#Host unreachable for TOS\n\t\t\t\t\tpass\n\t\t\t\telif (Code == 13):\t\t\t\t#Communication Administratively prohibited\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (Type == 8):\t\t\t\t\t#ping\n\t\t\t\t#FIXME - check payload for ping sender type, perhaps\n\t\t\t\tpass\n\t\t\telif (Type == 11):\t\t\t\t\t#Time exceeded\n\t\t\t\tif (Code == 0):\t\t\t\t\t#TTL exceeded\n\t\t\t\t\tif (not(IsRouter.has_key(sIP))):\n\t\t\t\t\t\t#FIXME - put original target IP as column 5?\n\t\t\t\t\t\tReportId(\"RO\", sIP, \"TTLEx\", \"router\", \"\")\n\t\t\t\telse:\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\telse:\n\t\t\t\tUnhandledPacket(p)\n\t\telif p['IP'].proto == 2:\t\t#IGMP\n\t\t\tUnhandledPacket(p)\n\t\telif p['IP'].proto == 6:\t\t#TCP\n\t\t\tsport=str(p['TCP'].sport)\n\t\t\tdport=str(p['TCP'].dport)\n\t\t\t#print p['IP'].src + \":\" + sport + \" -> \", p['IP'].dst + \":\" + dport,\n\t\t\tif (p['TCP'].flags & 0x17) == 0x12:\t#SYN/ACK (RST and FIN off)\n\t\t\t\tCliService = dIP + \",TCP_\" + sport\n\t\t\t\tif not SynAckSentToTCPClient.has_key(CliService):\n\t\t\t\t\tSynAckSentToTCPClient[CliService] = True\n\n\t\t\t\t#If we've seen a syn sent to this port and have either not seen any SA/R, or we've seen a R in the past:\n\t\t\t\t#The last test is for a service that was previously closed and is now open; report each transition once.\n\t\t\t\tService = sIP + \",TCP_\" + sport\n\t\t\t\tif ( (SynSentToTCPService.has_key(Service)) and ((not LiveTCPService.has_key(Service)) or (LiveTCPService[Service] == False)) ):\n\t\t\t\t\tLiveTCPService[Service] = True\n\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", '')\n\t\t\telif (p['TCP'].flags & 0x17) == 0x02:\t#SYN (ACK, RST, and FIN off)\n\t\t\t\tService = dIP + \",TCP_\" + dport\n\t\t\t\tif not SynSentToTCPService.has_key(Service):\n\t\t\t\t\tSynSentToTCPService[Service] = True\n\t\t\t\t#Debug(\"trying to fingerprint \" + sIP)\n\t\t\t\ttry:\n\t\t\t\t\tp0fdata = p0f(p)\n\t\t\t\t\t#FIXME - reasonably common occurence, don't whine, just fix it.\n\t\t\t\t\t#if (len(p0fdata) >1):\n\t\t\t\t\t#\tDebug(\"More than one OS fingerprint for \" + sIP + \", using the first.\")\n\t\t\t\t\tif (len(p0fdata) >=1):\n\t\t\t\t\t\tPDescription = p0fdata[0][0] + \" \" + p0fdata[0][1] + \" (\" + str(int(p0fdata[0][2]) + 1)\t#FIXME - Grabbing just the first candidate, may need to compare correlation values; provided?\n\t\t\t\t\t\tif (p0fdata[0][2] == 0):\n\t\t\t\t\t\t\tPDescription = PDescription + \" hop away)\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tPDescription = PDescription + \" hops away)\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t#[N][2] param appears to be distance away in hops (but add 1 to this to get real hop count?)\n\t\t\t\t\t\tPDescription = PDescription.replace(',', ';')\t\t#Commas are delimiters in output\n\t\t\t\t\t\tif (not(OSDescription.has_key(sIP))) or (OSDescription[sIP] != PDescription):\n\t\t\t\t\t\t\tOSDescription[sIP] = PDescription\n\t\t\t\t\t\t\tReportId(\"IP\", sIP, \"IP\", \"live\", PDescription)\n\t\t\t\texcept:\n\t\t\t\t\tPDescription = 'p0f failure'\n\t\t\t\t\tif (not(OSDescription.has_key(sIP))) or (OSDescription[sIP] != PDescription):\n\t\t\t\t\t\tDebug(\"P0f failure in \" + sIP + \":\" + sport + \" -> \" + dIP + \":\" + dport)\n\t\t\t\t\t\tOSDescription[sIP] = PDescription\n\t\t\t\t\t\tReportId(\"IP\", sIP, \"IP\", \"live\", PDescription)\n\t\t\telif (p['TCP'].flags & 0x07) == 0x01:\t#FIN (SYN/RST off)\n\t\t\t\tCliService = sIP + \",TCP_\" + dport\n\t\t\t\tif ( (SynAckSentToTCPClient.has_key(CliService)) and ((not LiveTCPClient.has_key(CliService)) or (LiveTCPClient[CliService] == False)) ):\n\t\t\t\t\tLiveTCPClient[CliService] = True\n\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", '')\n\t\t\telif (p['TCP'].flags & 0x07) == 0x04:\t#RST (SYN and FIN off)\n\t\t\t\t#FIXME - handle rst going in the other direction?\n\t\t\t\tService = sIP + \",TCP_\" + sport\n\t\t\t\tif ( (SynSentToTCPService.has_key(Service)) and ((not LiveTCPService.has_key(Service)) or (LiveTCPService[Service] == True)) ):\n\t\t\t\t\tLiveTCPService[Service] = False\n\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"closed\", '')\n\t\t\telif ((p['TCP'].flags & 0x3F) == 0x15) and (sport == \"113\"):\t#FIN, RST, ACK (SYN, PSH, URG off)\n\t\t\t\t#This may be a firewall or some other device stepping in for 113 with a FIN/RST.\n\t\t\t\tpass\n\t\t\telif (p['TCP'].flags & 0x17) == 0x10:\t#ACK (RST, SYN, and FIN off)\n\t\t\t\t#FIXME - check for UnhandledPacket placement in ACK\n\t\t\t\tFromPort = sIP + \",TCP_\" + sport\n\t\t\t\tToPort = dIP + \",TCP_\" + dport\n\t\t\t\tPayload = str(p['Raw.load'])\t\t\t#For some reason this doesn't handle p['Raw'].load\n\t\t\t\tif ( (LiveTCPService.has_key(FromPort)) and (LiveTCPService[FromPort] == True) and (LiveTCPService.has_key(ToPort)) and (LiveTCPService[ToPort] == True)):\n\t\t\t\t\tprint \"Logic failure: both \" + FromPort + \" and \" + ToPort + \" are listed as live services.\"\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\telif ((LiveTCPService.has_key(FromPort)) and (LiveTCPService[FromPort] == True)):\t#If the \"From\" side is a known TCP server:\n\t\t\t\t\tif (not NmapServerDescription.has_key(FromPort) ):\t\t#Check nmap fingerprint strings for this server port\n\t\t\t\t\t\tif (ServiceFPs.has_key(int(sport))):\n\t\t\t\t\t\t\tfor OneTuple in ServiceFPs[int(sport)]:\n\t\t\t\t\t\t\t\tMatchObj = OneTuple[0].search(Payload)\n\t\t\t\t\t\t\t\tif (MatchObj != None):\n\t\t\t\t\t\t\t\t\t#Debugging:\n\t\t\t\t\t\t\t\t\t#FIXME - removeme once understood:\n\t\t\t\t\t\t\t\t\t#File \"/home/wstearns/med/programming/python/passer/passer.py\", line 504, in processpacket\n\t\t\t\t\t\t\t\t\t#OutputDescription = OutputDescription.replace('$' + str(Index), MatchObj.group(Index))\n\t\t\t\t\t\t\t\t\t#TypeError: expected a character buffer object\n\t\t\t\t\t\t\t\t\tif (OneTuple[1] == None):\n\t\t\t\t\t\t\t\t\t\tDebug(\"Null description for \" + OneTuple[0])\n\t\t\t\t\t\t\t\t\t\t#quit()\n\t\t\t\t\t\t\t\t\tOutputDescription = OneTuple[1]\n\t\t\t\t\t\t\t\t\tif len(MatchObj.groups()) >= 1:\n\t\t\t\t\t\t\t\t\t\t#We have subexpressions matched, these need to be inserted into the description string\n\t\t\t\t\t\t\t\t\t\tfor Index in range(1,len(MatchObj.groups())+1):\n\t\t\t\t\t\t\t\t\t\t\t#Example: Replace \"$1\" with MatchObj.group(1)\n\t\t\t\t\t\t\t\t\t\t\tOutputDescription = OutputDescription.replace('$' + str(Index), str(MatchObj.group(Index)))\n\t\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", OutputDescription)\n\t\t\t\t\t\t\t\t\tNmapServerDescription[sIP + \",TCP_\" + sport] = OutputDescription\n\t\t\t\t\t\t\t\t\tbreak\t\t\t\t\t#Exit for loop, no need to check any more fingerprints now that we've found a match\n\n\t\t\t\t\tif (not NmapServerDescription.has_key(FromPort)):\t\t#If the above loop didn't find a server description\n\t\t\t\t\t\tif (ServiceFPs.has_key('all')):\t\t\t\t#Now recheck against regexes not associated with a specific port (port 'all').\n\t\t\t\t\t\t\tfor OneTuple in ServiceFPs['all']:\n\t\t\t\t\t\t\t\tMatchObj = OneTuple[0].search(Payload)\n\t\t\t\t\t\t\t\tif (MatchObj != None):\n\t\t\t\t\t\t\t\t\tOutputDescription = OneTuple[1]\n\t\t\t\t\t\t\t\t\tif len(MatchObj.groups()) >= 1:\n\t\t\t\t\t\t\t\t\t\t#We have subexpressions matched, these need to be inserted into the description string\n\t\t\t\t\t\t\t\t\t\tfor Index in range(1,len(MatchObj.groups())+1):\n\t\t\t\t\t\t\t\t\t\t\tOutputDescription = OutputDescription.replace('$' + str(Index), MatchObj.group(Index))\n\t\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", OutputDescription)\n\t\t\t\t\t\t\t\t\tNmapServerDescription[sIP + \",TCP_\" + sport] = OutputDescription\n\t\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\tif (not ManualServerDescription.has_key(FromPort) ):\n\t\t\t\t\t\tif (sport == \"22\") and (Payload != None) and (Payload.find('SSH-') > -1):\n\t\t\t\t\t\t\tif ( (Payload.find('SSH-1.99-OpenSSH_') > -1) or (Payload.find('SSH-2.0-OpenSSH_') > -1) ):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"ssh/openssh\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"ssh/openssh\"\n\t\t\t\t\t\t\telif (Payload.find('SSH-1.5-') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"ssh/generic\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"ssh/generic\"\n\t\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\telif (sport == \"25\") and (Payload != None) and (Payload.find(' ESMTP Sendmail ') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"smtp/sendmail\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"smtp/sendmail\"\n\t\t\t\t\t\telif (sport == \"25\") and (Payload != None) and (Payload.find(' - Welcome to our SMTP server ESMTP') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"smtp/generic\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"smtp/generic\"\n\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\t#Check for port 80 and search for \"Server: \" once\n\t\t\t\t\t\telif (sport == \"80\") and (Payload != None) and (Payload.find('Server: ') > -1):\n\t\t\t\t\t\t\tif (Payload.find('Server: Apache') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/apache\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/apache\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Embedded HTTP Server') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/embedded\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/embedded\"\n\t\t\t\t\t\t\telif (Payload.find('Server: gws') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/gws\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/gws\"\n\t\t\t\t\t\t\telif (Payload.find('Server: KFWebServer') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/kfwebserver\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/kfwebserver\"\n\t\t\t\t\t\t\telif (Payload.find('Server: micro_httpd') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/micro-httpd\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/micro-httpd\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Microsoft-IIS') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/iis\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/iis\"\n\t\t\t\t\t\t\telif (Payload.find('Server: lighttpd') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/lighttpd\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/lighttpd\"\n\t\t\t\t\t\t\telif (Payload.find('Server: MIIxpc') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/mirrorimage\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/mirrorimage\"\n\t\t\t\t\t\t\telif (Payload.find('Server: mini_httpd') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/mini-httpd\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/mini-httpd\"\n\t\t\t\t\t\t\telif (Payload.find('Server: nc -l -p 80') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/nc\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/nc\"\n\t\t\t\t\t\t\telif (Payload.find('Server: nginx/') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/nginx\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/nginx\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Nucleus') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/nucleus\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/nucleus\"\n\t\t\t\t\t\t\telif (Payload.find('Server: RomPager') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/rompager\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/rompager\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Server') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/server\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/server\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Sun-ONE-Web-Server/') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/sun-one\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/sun-one\"\n\t\t\t\t\t\t\telif (Payload.find('Server: TrustRank Frontend') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/trustrank\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/trustrank\"\n\t\t\t\t\t\t\telif (Payload.find('Server: YTS/') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/yahoo\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/yahoo\"\n\t\t\t\t\t\t\telif (Payload.find('HTTP/1.0 404 Not Found') > -1) or (Payload.find('HTTP/1.1 200 OK') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/generic\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/generic\"\n\t\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\telif (sport == \"110\") and (Payload != None) and (Payload.find('POP3 Server Ready') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"pop3/generic\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"pop3/generic\"\n\t\t\t\t\t\telif (sport == \"143\") and (Payload != None) and (Payload.find('* OK dovecot ready') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"imap/dovecot\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"imap/dovecot\"\n\t\t\t\t\t\telif (sport == \"143\") and (Payload != None) and (Payload.find(' IMAP4rev1 ') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"imap/generic\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"imap/generic\"\n\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\telif (sport == \"783\") and (Payload != None) and (Payload.find('SPAMD/1.1 ') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"spamd/spamd\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"spamd/spamd\"\n\t\t\t\t\t\telif ( (sport == \"3128\") or (sport == \"80\") ) and (Payload != None) and (Payload.find('Via: ') > -1) and (Payload.find(' (squid/') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"proxy/squid\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"proxy/squid\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\telif ((LiveTCPService.has_key(ToPort)) and (LiveTCPService[ToPort] == True)):\t\t#If the \"To\" side is a known TCP server:\n\t\t\t\t\tClientKey = sIP + \",TCP_\" + dport\t#Note: CLIENT ip and SERVER port\n\t\t\t\t\tif (not ClientDescription.has_key(ClientKey)):\n\t\t\t\t\t\tif (dport == \"22\") and (Payload != None) and ( (Payload.find('SSH-2.0-OpenSSH_') > -1) or (Payload.find('SSH-1.5-OpenSSH_') > -1) ):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"ssh/openssh\")\n\t\t\t\t\t\t#As cute as it is to catch this, it miscatches any relay that's carrying a pine-generated mail.\n\t\t\t\t\t\t#elif (dport == \"25\") and (Payload != None) and (Payload.find('Message-ID: <Pine.') > -1):\n\t\t\t\t\t\t#\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"smtp/pine\")\n\t\t\t\t\t\telif ( (dport == \"80\") or (dport == \"3128\") ) and (Payload != None) and (Payload.find('User-Agent: libwww-perl/') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"http/libwww-perl\")\n\t\t\t\t\t\telif ( (dport == \"80\") or (dport == \"3128\") ) and (Payload != None) and (Payload.find('User-Agent: Lynx') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"http/lynx\")\n\t\t\t\t\t\telif ( (dport == \"80\") or (dport == \"3128\") ) and (Payload != None) and (Payload.find('User-Agent: Mozilla') > -1) and (Payload.find(' Firefox/') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"http/firefox\")\n\t\t\t\t\t\telif ( (dport == \"80\") or (dport == \"3128\") ) and (Payload != None) and (Payload.find('User-Agent: Wget/') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"http/wget\")\n\t\t\t\t\t\telif (dport == \"143\") and (Payload != None) and (Payload.find('A0001 CAPABILITY') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"imap/generic\")\n\t\t\t\t\t\t\t#LogNewPayload(ClientPayloadDir, ClientKey, Payload)\n\t\t\t\t\t\telif (dport == \"783\") and (Payload != None) and (Payload.find('PROCESS SPAMC') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"spamd/spamc\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t#LogNewPayload(ClientPayloadDir, ClientKey, Payload)\n\t\t\t\telse:\t#Neither port pair is known as a server\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t#Following is debugging at best; it should only show up early on as the sniffer listens to conversations for which it didn't hear the SYN/ACK\n\t\t\t\t\t#print \"note: neither \" + FromPort + \" nor \" + ToPort + \" is listed as a live service.\"\n\t\t\telse:\t#Other TCP flag combinations here\n\t\t\t\tUnhandledPacket(p)\n\t\telif p['IP'].proto == 17 and (type(p['UDP']) == UDP):\t\t#UDP. We have to check the object type as well as we do get (corrupted? truncated?) packets with type 17 that aren't udp: AttributeError: 'NoneType' object has no attribute 'sport'\n\t\t\t#FIXME - possibly run udp packets through ServiceFPs as well?\n\t\t\tsport=str(p['UDP'].sport)\n\t\t\tdport=str(p['UDP'].dport)\n\t\t\tSrcService = sIP + \",UDP_\" + sport\n\t\t\tDstService = dIP + \",UDP_\" + dport\n\t\t\tSrcClient = sIP + \",UDP_\" + dport\n\t\t\tPayload = p['Raw.load']\n\n\t\t\t#Multicast DNS: http://files.multicastdns.org/draft-cheshire-dnsext-multicastdns.txt\n\t\t\t#- usually sent to 224.0.0.251 (or FF02::FB) (link-local multicast).\n\t\t\t#\t- if \".local.\" in query, these MUST be the target IPs\n\t\t\t#\t- non-local queries may be sent to these or normal dns servers\n\t\t\t#\t- rdns queries for \"254.169.in-addr.arpa.\" MUST be sent to 224.0.0.251\n\t\t\t#\t- rdns queries for \"8.e.f.ip6.arpa.\", \"9.e.f.ip6.arpa.\",\"a.e.f.ip6.arpa.\", and \"b.e.f.ip6.arpa.\" MUST be sent to the IPv6 mDNS link-local multicast address FF02::FB.\n\t\t\t#- sent to udp port 5353\n\t\t\t#- generic clients may use \"single-dns-object.local.\", such as \"sparrow.local.\"\n\t\t\t#- responses have IP TTL = 255 to check that packet originated on-lan\n\n\t\t\t#Multicast DNS, placed next to normal dns, out of numerical order\n\t\t\tif (dport == \"5353\") and ( (p['IP'].ttl == 1) or (p['IP'].ttl == 255) ):\n\t\t\t\tif ((not LiveUDPService.has_key(SrcClient)) or (LiveUDPService[SrcClient] == False)):\n\t\t\t\t\tLiveUDPService[SrcClient] = True\n\t\t\t\t\tif (dIP == \"224.0.0.251\"):\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"mdns/broadcastclient\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"mdns/client\")\n\n\t\t\t\t\t#Extract dns answers like with 53; change elif to if and add 5353 to ports on next if?\n\t\t\t\t\t#At the moment, no; scapy does not appear to parse 5353 as dns.\n\t\t\t\t\t#else:\n\t\t\t\t\t#\tUnhandledPacket(p)\n\t\t\t#FIXME - add check for \"if isinstance(p['DNS'], whatevertype):\there and at all p[] accesses.\n\t\t\telif (sport == \"53\") and (isinstance(p['DNS'], DNS)) and (p['DNS'].qr == 1):\t\t#qr == 1 is a response\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t#FIXME - Also report the TLD from one of the query answers to show what it's willing to answer for?\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"dns/server\")\n\t\t\t\t#Now we extract dns answers. First, check that there's no dns error:\n\t\t\t\tif (p['DNS'].rcode == 0):\t\t\t#No error\n\t\t\t\t\tDNSBlocks = [ ]\n\t\t\t\t\tCNAMERecs = [ ]\t\t\t\t#We hold onto all cnames until we've processed all PTR's and A's here\n\t\t\t\t\tif (p['DNS'].ancount > 0):\t\t#If we have at least one answer from the answer block, process it\n\t\t\t\t\t\tDNSBlocks.append(p[DNS].an)\n\t\t\t\t\tif (p['DNS'].arcount > 0):\t\t#Likewise for the \"additional\" block\n\t\t\t\t\t\tDNSBlocks.append(p[DNS].ar)\n\t\t\t\t\tfor OneAn in DNSBlocks:\n\t\t\t\t\t\t#Thanks to Philippe Biondi for showing me how to extract additional records.\n\t\t\t\t\t\t#Debug(\"Start dns extract\" + str(p['DNS'].ancount))\n\t\t\t\t\t\t#OneAn = p[DNS].an\n\t\t\t\t\t\t#while OneAn is not NoPayload:\t\t#This doesn't seem to stop at the end of the list; incorrect syntax.\n\t\t\t\t\t\twhile isinstance(OneAn,DNSRR):\t\t#Somewhat equivalent:\twhile not isinstance(an, NoPayload):\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t#print \"Type: \" + str(type(OneAn))\t\t#All of type scapy.DNSRR\n\t\t\t\t\t\t\tif (OneAn.rclass == 1) and (OneAn.type == 1):\t\t#\"IN\" class and \"A\" type answer\n\t\t\t\t\t\t\t\tDNSIPAddr = OneAn.rdata\n\t\t\t\t\t\t\t\tDNSHostname = OneAn.rrname.lower()\n\t\t\t\t\t\t\t\t#Check new hostname to see if it's in the list.\n\t\t\t\t\t\t\t\tif (not DNSRecord.has_key(DNSIPAddr + \",A\")) or (not(DNSHostname in DNSRecord[DNSIPAddr + \",A\"])):\n\t\t\t\t\t\t\t\t\tReportId(\"DN\", DNSIPAddr, \"A\", DNSHostname, \"\")\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 2):\t\t\t#\"IN\" class and \"NS\" answer\n\t\t\t\t\t\t\t\tpass\t\t\t\t\t\t\t#Perhaps later\n\t\t\t\t\t\t\t\t#Like cnames, this is object -> nameserver hostname, so these would need to be queued like cnames until we're done with A's and PTR's.\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 5):\t\t\t#\"IN\" class and \"CNAME\" answer\n\t\t\t\t\t\t\t\tCNAMERecs.append(OneAn)\t\t\t\t\t#Remember the record; we'll process these after the PTR's and A's\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 6):\t\t\t#\"IN\" class and \"SOA\" answer\n\t\t\t\t\t\t\t\tpass\t\t\t\t\t\t\t#Not immediately useful, perhaps later\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 12):\t\t#\"IN\" class and \"PTR\" type answer\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#For input of '182.111.59.66.in-addr.arpa.' :\n\t\t\t\t\t\t\t\tDNSIPAddr = OneAn.rrname.replace(\".in-addr.arpa.\", \"\")\t\t# '182.111.59.66'\n\t\t\t\t\t\t\t\tDNSIPAddr = DNSIPAddr.split('.')\t\t\t\t# ['182', '111', '59', '66']\n\t\t\t\t\t\t\t\tDNSIPAddr.reverse()\t\t\t\t\t\t# ['66', '59', '111', '182']\n\t\t\t\t\t\t\t\tDNSIPAddr = string.join(DNSIPAddr, '.')\t\t\t\t# '66.59.111.182'\n\t\t\t\t\t\t\t\t#Check that we end up with a legal IPv4 address before continuing; we're getting garbage.\n\t\t\t\t\t\t\t\tif (re.search('^[1-9][0-9\\.]*[0-9]$', DNSIPAddr) == None):\n\t\t\t\t\t\t\t\t\tDebug(\"Odd PTR rrname: \" + OneAn.rrname)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tDNSHostname = OneAn.rdata.lower()\n\t\t\t\t\t\t\t\t\tif (not DNSRecord.has_key(DNSIPAddr + \",PTR\")) or (not(DNSHostname in DNSRecord[DNSIPAddr + \",PTR\"])):\n\t\t\t\t\t\t\t\t\t\tReportId(\"DN\", DNSIPAddr, \"PTR\", DNSHostname, \"\")\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 15):\t\t#\"IN\" class and \"MX\" answer\n\t\t\t\t\t\t\t\tpass\t\t\t\t\t\t\t#Possibly later\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 28):\t\t#\"IN\" class and \"AAAA\" answer\n\t\t\t\t\t\t\t\tDNSIPAddr = OneAn.rdata.upper()\n\t\t\t\t\t\t\t\tDNSHostname = OneAn.rrname.lower()\n\t\t\t\t\t\t\t\tif (not DNSRecord.has_key(DNSIPAddr + \",AAAA\")) or (not(DNSHostname in DNSRecord[DNSIPAddr + \",AAAA\"])):\n\t\t\t\t\t\t\t\t\tReportId(\"DN\", DNSIPAddr, \"AAAA\", DNSHostname, \"\")\n\n\t\t\t\t\t\t\t#Move to the next DNS object in the \"an\" block\n\t\t\t\t\t\t\tOneAn = OneAn.payload\n\t\t\t\t\tfor OneCNAME in CNAMERecs:\t\t#Now that we have all A/PTR's, go back and turn cname records into pseudo-A's\n\t\t\t\t\t\tif isinstance(OneCNAME,DNSRR):\n\t\t\t\t\t\t\tAlias = OneCNAME.rrname.lower()\n\t\t\t\t\t\t\tExisting = OneCNAME.rdata.lower()\n\t\t\t\t\t\t\tif isFQDN(Alias) and isFQDN(Existing):\n\t\t\t\t\t\t\t\tif HostIPs.has_key(Existing):\n\t\t\t\t\t\t\t\t\tfor OneIP in HostIPs[Existing]:\t\t\t\t#Loop through each of the IPs for the canonical name, and\n\t\t\t\t\t\t\t\t\t\tif (not DNSRecord.has_key(OneIP + \",CNAME\")) or (not(Alias in DNSRecord[OneIP + \",CNAME\"])):\n\t\t\t\t\t\t\t\t\t\t\tReportId(\"DN\", OneIP, \"CNAME\", Alias, \"\")\t#report them as kind-of A records for the Alias.\n\t\t\t\t\t\t\t\t#If we don't have a A/PTR record for \"Existing\", just ignore it. Hopefully we'll get the Existing A/PTR in the next few answers, and will re-ask for the CNAME later, at which point we'll get a full cname record.\n\t\t\t\t\t\t\t\t#else:\n\t\t\t\t\t\t\t\t#\tDebug(\"CNAME \" + Alias + \" -> \" + Existing + \" requested, but no IP's for the latter, skipping.\")\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tDebug(\"One of \" + Alias + \" and \" + Existing + \" isn't an FQDN, skipping cname processing.\")\n\t\t\t\telif (p['DNS'].rcode == 1):\t\t\t#FormErr: server responding to an improperly formatted request\n\t\t\t\t\tpass\n\t\t\t\telif (p['DNS'].rcode == 2):\t\t\t#ServFail: domain exists, root nameservers list authoritative name servers, but authNS's won't answer queries\n\t\t\t\t\tpass\n\t\t\t\telif (p['DNS'].rcode == 3):\t\t\t#NXDOMAIN: root nameservers don't have any listing (domain doesn't exist or is on hold)\n\t\t\t\t\tpass\n\t\t\t\telif (p['DNS'].rcode == 5):\t\t\t#Query refused\n\t\t\t\t\tpass\n\t\t\t\telse:\t#rcode indicates an error\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (dport == \"53\") and (type(p['DNS']) == DNS) and (p['DNS'].qr == 0):\t#dns query\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"dns/client\")\n\t\t\telif (sport == \"67\") and (dport == \"68\"):\t\t#Bootp/dhcp server talking to client\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"bootpordhcp/server\")\n\t\t\telif (sport == \"68\") and (dport == \"67\"):\t\t#Bootp/dhcp client talking to server\n\t\t\t\tif (sIP != \"0.0.0.0\"):\t\t\t\t#If the client is simply renewing an IP, remember it.\n\t\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"bootpordhcp/client\")\n\t\t\t\t#else:\t\t\t\t\t\t#If you want to record which macs are asking for addresses, do it here.\n\t\t\t\t#\tpass\n\t\t\telif (sport == \"123\") and (dport == \"123\") and (p['NTP'].stratum != ''):\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"ntp/generic\")\n\t\t\telif (dport == \"123\") and ( (dIP == \"216.115.23.75\") or (dIP == \"216.115.23.76\") or (dIP == \"69.59.240.75\") ):\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"ntp/vonageclient\")\n\t\t\telif (sport == \"123\") and ( (sIP == \"216.115.23.75\") or (sIP == \"216.115.23.76\") or (sIP == \"69.59.240.75\") ):\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"ntp/vonageserver\")\n\t\t\telif (dport == \"137\"):\t\t\t#netbios-ns\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (p['Ethernet'].dst.upper() == \"FF:FF:FF:FF:FF:FF\"):\t\t\t#broadcast\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"netbios-ns/broadcastclient\")\n\t\t\t\t\telif (Payload != None) and (Payload.find('CKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA') > -1):\t#wildcard\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"netbios-ns/wildcardclient\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"netbios-ns/unicastclient\")\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"500\") and (dport == \"500\") and (p['ISAKMP'].init_cookie != ''):\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"isakmp/generic\")\n\t\t\telif (dport == \"512\"):\t\t\t#BIFF\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('@') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"biff/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif ( (dport == \"1026\") or (dport == \"1027\") or (dport == \"1028\") ):\t#winpopup spam client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and ( (Payload.find('Download Registry Update from:') > -1) or (Payload.find('CRITICAL ERROR MESSAGE! - REGISTRY DAMAGED AND CORRUPTED.') > -1) or (Payload.find('Your system registry is corrupted and needs to be cleaned immediately.') > -1) or (Payload.find('CRITICAL SYSTEM ERRORS') > -1) ):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"winpopup/spamclient\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (dport == \"1434\"):\t\t#Probable mssql attack\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('Qh.dll') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"mssql/clientattack\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"1900\") and (dport == \"1900\") and (dIP == \"239.255.255.250\"):\t\t#SSDP\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('NOTIFY') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"ssdp/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (dport == \"3865\") and (dIP == \"255.255.255.255\"):\t\t#XPL, http://wiki.xplproject.org.uk/index.php/Main_Page\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"xpl/client\")\n\t\t\telif (sport == \"5061\") and (dport == \"5061\") and ( (dIP == \"216.115.30.28\") or (dIP == \"69.59.227.77\") or (dIP == \"69.59.232.33\") or (dIP == \"69.59.240.84\") ):\t\t#Vonage SIP client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('.vonage.net:5061 SIP/2.0') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tSipMatch = SipPhoneMatch.search(Payload)\n\t\t\t\t\t\tif (SipMatch != None) and (len(SipMatch.groups()) >= 1):\n\t\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"sip/vonage_client, phone number: \" + SipMatch.group(1))\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"sip/vonage_client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"5061\") and (dport == \"5061\") and ( (sIP == \"216.115.30.28\") or (sIP == \"69.59.227.77\") or (sIP == \"69.59.232.33\") or (sIP == \"69.59.240.84\") ):\t#Vonage SIP server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('.vonage.net:5061>') > -1):\n\t\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"sip/vonage_server\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"6515\") and (dport == \"6514\") and (dIP == \"255.255.255.255\"):\t\t#mcafee ASaP broadcast, looking for a proxy out. http://www.myasap.de/intl/EN/content/virusscan_asap/faq_new.asp\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('<rumor version=') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"asap/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif ( (sport == \"9052\") or (sport == \"9053\") or (sport == \"9054\") ) and ( (sIP == \"205.188.146.72\") or (sIP == \"205.188.157.241\") or (sIP == \"205.188.157.242\") or (sIP == \"205.188.157.243\") or (sIP == \"205.188.157.244\") or (sIP == \"64.12.51.145\") or (sIP == \"64.12.51.148\") or (sIP == \"149.174.54.131\") ):\t#Possibly AOL dns response\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('dns-01') > -1):\n\t\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"aoldns/server\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"27005\") and ( (dport == \"27016\") or (dport == \"27017\") ):\t\t\t\t#Halflife client live game\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\telif (dport == \"27013\") and (dIP == \"207.173.177.12\"):\t\t\t\t#variable payload, so can't (Payload != None) and (Payload.find('Steam.exe') > -1)\t\t\t\t#Halflife client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\telif (sport == \"27013\") and (sIP == \"207.173.177.12\"):\t\t\t\t\t\t\t#halflife server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"halflife/server\")\n\t\t\telif ( (sport == \"27016\") or (sport == \"27017\") ) and (dport == \"27005\"):\t\t\t\t#halflife server live game\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"halflife/server\")\n\t\t\telif ( (dport == \"27015\") or (dport == \"27016\") or (dport == \"27025\") or (dport == \"27026\") ):\t\t#Variable payload, so can't: (Payload != None) and (Payload.find('basic') > -1)\t#Halflife client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\telif (dport == \"27017\") and ( (dIP == \"69.28.148.250\") or (dIP == \"69.28.156.250\") or (dIP == \"72.165.61.161\") or (dIP == \"72.165.61.185\") or (dIP == \"72.165.61.186\") or (dIP == \"72.165.61.188\") or (dIP == \"68.142.64.164\") or (dIP == \"68.142.64.165\") or (dIP == \"68.142.64.166\") ):\t#Steamfriends client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('VS01') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"steamfriends/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"27017\") and ( (sIP == \"69.28.148.250\") or (sIP == \"69.28.156.250\") or (sIP == \"72.165.61.161\") or (sIP == \"72.165.61.185\") or (sIP == \"72.165.61.186\") or (sIP == \"72.165.61.188\") or (sIP == \"68.142.64.164\") or (sIP == \"68.142.64.165\") or (sIP == \"68.142.64.166\") ):\t#Steamfriends server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('VS01') > -1):\n\t\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"steamfriends/server\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif ( (sport == \"21020\") or (sport == \"21250\") or (sport == \"27016\") or (sport == \"27017\") or (sport == \"27018\") or (sport == \"27030\") or (sport == \"27035\") or (sport == \"27040\") or (sport == \"28015\") ):\t\t\t\t\t\t\t#halflife server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('Team Fortress') > -1):\n\t\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"halflife/server\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"27019\"):\t\t\t\t\t\t\t#halflife server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"halflife/server\")\n\t\t\telif ( (dport == \"1265\") or (dport == \"20100\") or (dport == \"21550\") or (dport == \"27000\") or (dport == \"27017\") or (dport == \"27018\") or (dport == \"27019\") or (dport == \"27022\") or (dport == \"27030\") or (dport == \"27035\") or (dport == \"27050\") or (dport == \"27078\") or (dport == \"27080\") or (dport == \"28015\") or (dport == \"28100\") or (dport == \"45081\") ):\t\t#Halflife client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('Source Engine Query') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (dport == \"24441\"):\t\t\t#Pyzor\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('User:') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"pyzor/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t#FIXME - interesting issue; the ttl<5 test will catch traceroutes coming into us, but not ones we're creating to go out. Hmmm.\n\t\t\telif ( (dport >= \"33434\") and (dport <= \"33524\") ) and (p['IP'].ttl <= 5):\t#udptraceroute client\n\t\t\t\tif ((not LiveUDPClient.has_key(sIP + \"UDP_33434\")) or (LiveUDPClient[sIP + \"UDP_33434\"] == False)):\n\t\t\t\t\tLiveUDPClient[sIP + \"UDP_33434\"] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_33434\", \"open\", \"udptraceroute/client\")\n\t\t\telif (dport == \"40348\"):\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('HLS') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (p['IP'].frag > 0):\n\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sIP == \"207.46.51.74\") or (sIP == \"65.55.251.10\"):\t\t\t\t#Bigfish.com - dns?\n\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sIP == \"61.215.106.146\"):\t\t\t\t#junk\n\t\t\t\tUnhandledPacket(p)\n\t\t\telse:\n\t\t\t\tUnhandledPacket(p)\n\t\telse:\n\t\t\tDebug(\"Other IP protocol (\" + str(p['IP'].src) + \"->\" + str(p['IP'].dst) + \"): \" + str(p['IP'].proto))\n\t\t\tUnhandledPacket(p)\n\telif p['Ethernet'].type == 0x86DD:\t\t#IPv6\n\t\tUnhandledPacket(p)\n\telse:\n\t\tprint \"Unregistered ethernet type:\", p['Ethernet'].type\n\t\tUnhandledPacket(p)", "def _add_pkt_into_tcp_stream(self, pcap_packet, num):\n \n # the src is server, remote(dst) is client\n if (pcap_packet.ip.dst == _device_ip): # HUA use ip (not 80 port) as direction judgement\n server_addr = pcap_packet.ip.src\n server_port = pcap_packet.tcp.src_port\n client_addr = pcap_packet.ip.dst\n client_port = pcap_packet.tcp.dst_port\n else:\n server_addr = pcap_packet.ip.dst\n server_port = pcap_packet.tcp.dst_port\n client_addr = pcap_packet.ip.src\n client_port = pcap_packet.tcp.src_port\n socket_tuple = (client_addr, client_port, server_addr, server_port)\n if (socket_tuple not in self.tcp_stream_container):\n self.tcp_stream_container[socket_tuple] = Tcp_stream()\n pcap_packet.tcp.stream_index = self.tcp_stream_container[socket_tuple].stream_index\n self.tcp_stream_container[socket_tuple].pcap_num_list.append(num)", "def get_route(self, srcip, daddr): #destinations add of this packet\n # TODO fill in peer?\n peer = None\n\n pos_routes = self.lookup_routes(daddr)\n\n #prefix stuff\n\n pos_routes = self.prefix_stuff(daddr, pos_routes) #look through possible\n #and find prefix matching destination ip\n\n # Rules go here\n #if pos_routes:\n # 1. Highest Preference\n #pos_routes = self.get_highest_preference(pos_routes)\n # 2. Self Origin\n # pos_routes = self.get_self_origin(pos_routes)\n # 3. Shortest ASPath\n # pos_routes = self.get_shortest_as_path(pos_routes)\n # 4. EGP > IGP > UNK\n # pos_routes = self.get_origin_routes(pos_routes)\n # 5. Lowest IP Address\n\n #daddrbit = self.ip_to_bits(daddr)\n\n # Final check: enforce peering relationships\n\n #route = self.filter_relationships(srcip, pos_routes)\n #lowestip = 0;\n\n\n peer = pos_routes[0]\n\n return self.sockets[peer] if peer else None", "def routePacket(self, packet):\n\n for rule in self._rules:\n # check rule mask vs packet ip\n ip = IPHelper.ipToLong(packet._ip)\n\n if rule._raw_ip == '*' or (rule._ip_mask_val & ip == rule._good_ip):\n if rule._direction == packet._direction:\n for p in rule._ports:\n if p == packet._port or p == '*':\n if rule._flag is None:\n #packet is non-established connection\n return rule\n elif rule._flag == 'established' and packet._flag == '1':\n #packet is established connection and flag has been set for established connection\n return rule\n else:\n pass\n else:\n pass\n else:\n pass\n else:\n pass\n\n return None", "def test_overlap1(self):\n\n fragments = []\n for _, frags_400, frags_300 in self.pkt_infos:\n if len(frags_300) == 1:\n fragments.extend(frags_400)\n else:\n for i, j in zip(frags_300, frags_400):\n fragments.extend(i)\n fragments.extend(j)\n\n dropped_packet_indexes = set(\n index for (index, _, frags) in self.pkt_infos if len(frags) > 1\n )\n\n self.pg_enable_capture()\n self.src_if.add_stream(fragments)\n self.pg_start()\n\n packets = self.dst_if.get_capture(\n len(self.pkt_infos) - len(dropped_packet_indexes)\n )\n self.verify_capture(packets, dropped_packet_indexes)\n self.src_if.assert_nothing_captured()", "def main(self):\n self.pid = os.getpid()\n self.fdp.close() # Close fdp on child side\n if self.datasock is None:\n # Create session's data socket and load file\n self.datasock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.load_file()\n logging.info(\"Child process finished loading file\")\n port = UDP_START + self.meta.sessionid # Port used by the session\n poller = select.poll() # poll fdc and datasock\n poller.register(self.fdc.fileno(), select.POLLIN)\n poller.register(self.datasock.fileno(), select.POLLOUT)\n pkt_p = snc.snc_alloc_empty_packet(snc.snc_get_parameters(self.sc))\n while True:\n for fd, event in poller.poll():\n if fd == self.fdc.fileno() and event is select.POLLIN:\n pkt, ip = self.fdc.recv()\n logging.info(\"Session [%d] received msg <%s> from %s.\" %\n (self.meta.sessionid, iMSG[pkt.header.mtype], ip))\n if pkt.header.mtype == MSG['REQ_SEG']:\n self.add_client(HostInfo(ip, self.meta.sessionid))\n self.fdc.send(CCPacket(CCHeader(MSG['OK'])))\n elif pkt.header.mtype == MSG['HEARTBEAT']:\n self.client_heartbeat(ip)\n self.fdc.send(CCPacket(CCHeader(MSG['OK'])))\n elif pkt.header.mtype == MSG['REQ_STOP'] or pkt.header.mtype == MSG['EXIT']:\n self.remove_client(ip)\n self.fdc.send(CCPacket(CCHeader(MSG['OK'])))\n\n if fd == self.datasock.fileno() and event is select.POLLOUT:\n # writable datasock, send data packets to clients\n for cli in self.clients:\n snc.snc_generate_packet_im(self.sc, pkt_p)\n pktstr = pkt_p.contents.serialize(self.meta.sp.size_g,\n self.meta.sp.size_p,\n self.meta.sp.bnc)\n try:\n # Construct data packet with serialized snc_packet\n self.datasock.sendto(CCPacket(CCHeader(MSG['DATA']), pktstr).packed(), (cli.ip, port))\n except:\n logging.warning(\"Caught exception in session %s.\"\n % (self.meta.sessionid,))\n self.lastIdle = datetime.now() # Refresh idle time\n self.housekeeping()", "def read_packet(self):\n\n\t\t#self.debug(\"READ BUFFER SIZE: %d\" % len(self.buff))\n\t\tbackup = self.buff[:]\n\t\tpacket = Packet()\n\t\ttry:\n\t\t\tpacket.direction = self.node\n\t\t\tpacket.ident = self.unpack('ubyte')\n\t\t\t\n\t\t\t#Defined structs from huge dict\n\t\t\tfor datatype, name in self.get_struct(packet):\n\t\t\t\t# this populates packet.data with {name: value}\n\t\t\t\tpacket.data[name] = self.unpack(datatype)\n\n\t\t\t# I believe the following are packet-type specific fixes for variable-length packets.\n\n\t\t\t#0x17\n\t\t\tif packet.ident == 0x17:\n\t\t\t\tif packet.data['unknown'] > 0:\n\t\t\t\t\tpacket.data['x2'] = self.unpack('short')\n\t\t\t\t\tpacket.data['y2'] = self.unpack('short')\n\t\t\t\t\tpacket.data['z2'] = self.unpack('short')\n\t\t\n\t\t\t#0x33\n\t\t\tif packet.ident in (0x33, 0x34):\n\t\t\t\tpacket.data['data'] = self.unpack_array_fast('byte', packet.data['data_size'])\n\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\n#\t\t\t#0x34\n#\t\t\tif packet.ident == 0x34:\n#\t\t\t\tcoords = self.unpack_array_fast('short', packet.data['data_size'])\n#\t\t\t\tbtype = self.unpack_array_fast('byte', packet.data['data_size'])\n#\t\t\t\tmetadata = self.unpack_array_fast('byte', packet.data['data_size'])\n#\t\t\t\tpacket.data[\"blocks\"] = []\n#\t\t\t\tfor i in zip(coords, btype, metadata):\n#\t\t\t\t\tblock = {}\n#\t\t\t\t\tblock[\"x\"] =\t\ti[0] >> 12\n#\t\t\t\t\tblock[\"z\"] = 0x0F & i[0] >> 8\n#\t\t\t\t\tblock[\"y\"] = 0xFF & i[0]\n#\t\t\t\t\tblock[\"type\"] = i[1]\n#\t\t\t\t\tblock[\"metadata\"] = i[2]\n#\t\t\t\t\tpacket.data[\"blocks\"].append(block)\n#\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\n\t\t\t#0x3C\n\t\t\tif packet.ident == 0x3C:\n\t\t\t\trecords = self.unpack_array_fast('byte', packet.data['data_size']*3)\n\t\t\t\ti = 0\n\t\t\t\tpacket.data[\"blocks\"] = []\n\t\t\t\twhile i < packet.data['data_size']*3:\n\t\t\t\t\tpacket.data[\"blocks\"].append(dict(zip(('x','y','z'), records[i:i+3])))\n\t\t\t\t\ti+=3\n\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\n\t\t\t#0x68\n\t\t\tif packet.ident == 0x68:\n\t\t\t\tpacket.data[\"slots_data\"] = self.unpack_array('slot', packet.data[\"data_size\"])\n\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\t#0x82:\n\t\t\tif packet.ident == 0x82:\n\t\t\t\tpacket.data[\"text\"] = []\n\t\t\t\tfor i in range(4):\n\t\t\t\t\tpacket.data[\"text\"].append(packet.data[\"line_%s\" % (i+1)])\n\t\t\t\t\t\n\t\t\t#0x83\n\t\t\tif packet.ident == 0x83:\n\t\t\t\tpacket.data[\"data\"] = self.unpack_array_fast('byte', packet.data['data_size'])\n\t\t\t\tdel packet.data[\"data_size\"]\n\n\t\t\t# Sets packet.original to the byte string that the packet was decoded from.\n\t\t\tpacket.original = backup[:len(backup) - len(self.buff)]\n\n\t\t\treturn packet\n\n\t\texcept IncompleteData:\n\t\t\tself.buff = backup\n\t\t\treturn None\n\t\texcept Exception, ex:\n\t\t\tself.buff = backup\n\t\t\tex.args += (self.buff[20:],)\n\t\t\traise", "def _packets_from_tshark_sync(self, tshark_process, packet_count=None, timeout:float=3.0,\n max_data_length:int=10000):\n # NOTE: This has code duplication with the async version, think about how to solve this\n\n psml_structure, data = self.eventloop.run_until_complete(self._get_psml_struct(tshark_process.stdout))\n packets_captured = 0\n\n data = b\"\"\n try:\n while self.is_open.value:\n try:\n packet, data = self.eventloop.run_until_complete(\n self._get_packet_from_stream(tshark_process.stdout, \n data,\n psml_structure=psml_structure,\n got_first_packet=packets_captured > 0, \n timeout=timeout))\n except EOFError:\n echo(\"Caught EOF\", file=Interceptor.stdout)\n self._log.debug(\"EOF reached (sync)\")\n break\n\n if(packet is False): continue\n\n if packet:\n packets_captured += 1\n yield packet\n if packet_count and packets_captured >= packet_count:\n break\n if len(data) > max_data_length:\n data = b''\n finally:\n if tshark_process in self._running_processes:\n self.eventloop.run_until_complete(self._cleanup_subprocess(tshark_process))", "def agent_player(env_name, ip):\n\n # Create the main generator\n receiver_gen = atari_frames_generator(env_name, ip)\n\n # Loop\n while True:\n\n # Receive\n frame, termination = next(receiver_gen)\n\n # Skip if repeated\n assert termination in (\"continue\", \"last\", \"repeated_last\")\n if termination == \"repeated_last\":\n continue\n\n # Return\n yield frame", "def read_pkt_line(self):\n if self._readahead is None:\n read = self.read\n else:\n read = self._readahead.read\n self._readahead = None\n\n try:\n sizestr = read(4)\n if not sizestr:\n raise HangupException()\n size = int(sizestr, 16)\n if size == 0:\n if self.report_activity:\n self.report_activity(4, \"read\")\n return None\n if self.report_activity:\n self.report_activity(size, \"read\")\n pkt_contents = read(size - 4)\n except socket.error as e:\n raise GitProtocolError(e)\n else:\n if len(pkt_contents) + 4 != size:\n raise GitProtocolError(\n \"Length of pkt read %04x does not match length prefix %04x\"\n % (len(pkt_contents) + 4, size)\n )\n return pkt_contents", "def getPacket(self, index):\n\t\treturn self.packets[index.row()]", "def print_packet(self, pkt):\n ip_layer = pkt.getlayer(IP)\n print(\"[!] New Packet: {src} -> {dst}\".format(src=ip_layer.src, dst=ip_layer.dst))", "def sniffer():\n try:\n sniff(iface=INTERFACE, prn=print_frame, filter='udp and (port bootps or bootps)', store=0)\n except Exception as _e:\n print(\"ERROR - sniffer(): {} {}\".format(_e.args, _e.message))", "def from_physical_layer(conn, FRAME_LENGTH, FORMAT):\r\n frame = conn.recv(FRAME_LENGTH).decode(FORMAT)\r\n print(f\"[from_physical_layer] frame:{frame}\")\r\n return frame", "def recieve_can(offset):\n panda = Panda()\n while True:\n data = panda.can_recv()\n if data != []: \n for x in data:\n if x[0] == offset: \n if x[3] == 0: \n mes = f'{x[0]}, 0x{x[2].hex()}, {x[3]}'\n print(mes)", "def nat_openconn(destmac, destport, localip=None, localport=None, timeout = 5, forwarderIP=None,forwarderPort=None,usetimeoutsock=False): \r\n # cast the destmac to a string, internal methods may fail otherwise\r\n destmac = str(destmac)\r\n\r\n # use the forwarderIP and port provided\r\n if forwarderIP != None and forwarderPort != None:\r\n return _nat_try_connection_list([(forwarderIP,forwarderPort)],\r\n localip,localport,timeout,destmac,destport,usetimeoutsock) \r\n \r\n \r\n # lookup the destmac if forwarderIP and port are not provided\r\n else: \r\n # check the cache\r\n if destmac in NAT_SRV_CACHE:\r\n forwarders = NAT_SRV_CACHE[destmac]\r\n \r\n try:\r\n return _nat_try_connection_list(forwarders,localip,localport,timeout,destmac,destport,usetimeoutsock)\r\n \r\n except: # remove this entry from the cache\r\n del NAT_SRV_CACHE[destmac] \r\n \r\n # the cache failed, so do a fresh lookup\r\n forwarders = nat_server_list_lookup(destmac)\r\n socket = _nat_try_connection_list(forwarders,localip,localport,timeout,destmac,destport,usetimeoutsock)\r\n \r\n #this list succeded so add it to the cache\r\n NAT_SRV_CACHE[destmac] = forwarders\r\n \r\n return socket", "def payload_data(self, pkts):\n\n\t\t#Get all the payload bytes exchanged over MPTCP connections\n\t\tpayload_bytes = 0\n\t\tprint \"Determining the number of payload bytes excluding headers....\"\n\t\t#DSS = 0x2\n\t\tfor i in range(len(pkts)):\n\t\t\tif(TCPOption_MP in pkts[i] and pkts[i][TCPOption_MP].mptcp.subtype == 2 and Raw in pkts[i]):\n\t\t\t\tpayload_bytes += len(pkts[i][Raw].load)\n\t\t\t\t#print(\"DSN: %s; subflow_seqnum: %s; Data(bytes): %s\" % (pkts[i][TCPOption_MP].mptcp.dsn, pkts[i][TCPOption_MP].mptcp.subflow_seqnum, len(pkts[i][Raw].load)))\n\n\t\tprint \"Total Number of payload bytes in the file (entire MPTCP connections) excluding headers): %s\" % (payload_bytes)\n\t\t#MPTCP WITH SUBFLOW CONNECTIONS\n\t\t#MPTCP_JOINs = 0x1\n\t\tprint \"============================================================\"\n\t\tprint \"SUBFLOW Connections with their respective MPTCP connection (identified by connectionID)\"\n\t\tfor i in range(len(pkts)):\n\n\t\t\t#Initial Join Message\n\t\t\t#rcv_token Identifies the connection to which the subflow belongs: connectionID\n\t\t\tif(MPTCP_JoinSYN in pkts[i] and pkts[i][TCPOption_MP].mptcp.subtype == 1):\n\t\t\t\tprint(\"New subflow: connectionID: %s; src: %s; dest: %s; snd_nonce: %s\" % (pkts[i][TCPOption_MP].mptcp.rcv_token, pkts[i][IP].src, pkts[i][IP].dst, pkts[i][TCPOption_MP].mptcp.snd_nonce))\n\n\t\t#TODO: Now Need to track per-connection and per-subflow state", "def test_overlap1(self):\n\n fragments = []\n for _, _, frags_300, frags_200 in self.pkt_infos:\n if len(frags_300) == 1:\n fragments.extend(frags_300)\n else:\n for i, j in zip(frags_200, frags_300):\n fragments.extend(i)\n fragments.extend(j)\n\n self.pg_enable_capture()\n self.src_if.add_stream(fragments)\n self.pg_start()\n\n packets = self.dst_if.get_capture(len(self.pkt_infos))\n self.verify_capture(packets)\n self.src_if.assert_nothing_captured()\n\n # run it all to verify correctness\n self.pg_enable_capture()\n self.src_if.add_stream(fragments)\n self.pg_start()\n\n packets = self.dst_if.get_capture(len(self.pkt_infos))\n self.verify_capture(packets)\n self.src_if.assert_nothing_captured()", "def sniff_offline(args):\n print('viewer: reading from ' + args.read)\n\n try:\n with open(args.read, 'rb') as f:\n reader = dpkt.pcap.Reader(f)\n\n if not args.count:\n count = True\n else:\n count = args.count\n\n while count:\n ts, pkt = next(iter(reader))\n ret = parse_ICMP_Echo(ts, pkt)\n\n if ret and args.count:\n count -= 1\n except FileNotFoundError as e:\n print('File \\'{}\\' not found.'.format(args.read))\n sys.exit(1)\n except StopIteration:\n sys.exit(0)", "def packet_to_kml(packet, reader):\n\n try:\n src_ip = packet[IP].src\n src_kml = ip_to_kml(src_ip, reader)\n except:\n src_kml = None\n try:\n dest_ip = packet[IP].dest\n dest_kml = ip_to_kml(dest_ip, reader)\n except:\n dest_kml = None\n\n if src_kml is not None and dest_kml is not None:\n connect_kml = ips_to_line_kml(src_ip, dest_ip, reader)\n print(\"Added connection\")\n else:\n connect_kml = None\n\n return src_kml, dest_kml, connect_kml", "def create_stream(cls, packet_sizes, packet_count=test_packet_count):\n for i in range(0, packet_count):\n info = cls.create_packet_info(cls.src_if, cls.src_if)\n payload = cls.info_to_payload(info)\n p = (\n IP(id=info.index, src=cls.src_if.remote_ip4, dst=cls.dst_if.remote_ip4)\n / UDP(sport=1234, dport=5678)\n / Raw(payload)\n )\n size = packet_sizes[(i // 2) % len(packet_sizes)]\n cls.extend_packet(p, size, cls.padding)\n info.data = p", "def ReceiveMessageFromPacketInfo(self) -> IPPacketInformation:", "def give_packets(self, packets, verbose=False, cache=False, tunnel=None, source_sock_addr=None):\n assert isinstance(packets, list)\n assert all(isinstance(packet, str) for packet in packets)\n assert isinstance(verbose, bool)\n assert isinstance(cache, bool)\n assert tunnel is None, \"TUNNEL property is set using init_socket(...)\"\n assert source_sock_addr is None or isinstance(source_sock_addr, tuple), type(source_sock_addr)\n if verbose:\n logger.debug(\"giving %d bytes\", sum(len(packet) for packet in packets))\n if source_sock_addr is None:\n source_sock_addr = self.lan_address\n candidate = Candidate(source_sock_addr, self._tunnel)\n self._dispersy.on_incoming_packets([(candidate, packet) for packet in packets], cache=cache, timestamp=time())\n return packets" ]
[ "0.5710354", "0.5529603", "0.5436237", "0.5389503", "0.5370659", "0.5341635", "0.5319951", "0.5312948", "0.5296589", "0.52821374", "0.5232271", "0.5231817", "0.5208476", "0.52023923", "0.51755023", "0.5116855", "0.5112408", "0.5110333", "0.5092085", "0.5089987", "0.5086152", "0.5080938", "0.5051528", "0.5051528", "0.5011665", "0.50084186", "0.5007459", "0.49958622", "0.49733213", "0.49542275", "0.495417", "0.49498022", "0.49460396", "0.49303383", "0.48881605", "0.48586914", "0.48477185", "0.4837219", "0.48050243", "0.4789931", "0.47893512", "0.4788178", "0.47869468", "0.47613704", "0.4752663", "0.47500047", "0.47426605", "0.47332984", "0.47233394", "0.4714982", "0.47131625", "0.4697844", "0.4697158", "0.46857035", "0.46792635", "0.46711436", "0.4664068", "0.4660284", "0.46519208", "0.46505415", "0.46443844", "0.46024713", "0.45999685", "0.45896453", "0.4589577", "0.45727187", "0.45695475", "0.4561564", "0.45561558", "0.45406654", "0.45351273", "0.45255303", "0.4524231", "0.45210078", "0.4517039", "0.4505856", "0.44961685", "0.4495365", "0.448878", "0.44851944", "0.44837207", "0.44794542", "0.44755214", "0.44672287", "0.4463968", "0.44579118", "0.44571388", "0.44558287", "0.44551775", "0.44505128", "0.4447302", "0.4434669", "0.4434428", "0.4431039", "0.44281664", "0.44205582", "0.44168368", "0.4408865", "0.44071707", "0.4406922" ]
0.69073343
0
gathers selected cards in order to take action on selected cards (either discarding them or preparing them)
def gatherSelected(self): self.selected_list = [] for element in self.hand_info: if element.status == 1: self.selected_list.append(element) return self.selected_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pick(self, pack, cards_owned, draft_info):\n pass", "def card_sel(\n self, num=1, **kwargs\n ): # pylint: disable=too-many-locals, too-many-branches\n selectfrom = self.card_selSource(**kwargs)\n force = kwargs[\"force\"] if \"force\" in kwargs else False\n showdesc = kwargs[\"showdesc\"] if \"showdesc\" in kwargs else True\n verbs = kwargs.get(\"verbs\", (\"Select\", \"Unselect\"))\n\n if \"prompt\" in kwargs:\n self.output(kwargs[\"prompt\"])\n\n if \"anynum\" in kwargs and kwargs[\"anynum\"]:\n anynum = True\n num = 0\n else:\n anynum = False\n\n selected = []\n types = kwargs[\"types\"] if \"types\" in kwargs else {}\n types = self._type_selector(types)\n while True:\n options = []\n if (\n anynum\n or (force and num == len(selected))\n or (not force and num >= len(selected))\n ):\n o = Option(selector=\"0\", verb=\"Finish Selecting\", card=None)\n options.append(o)\n index = 1\n for c in sorted(selectfrom):\n if \"exclude\" in kwargs and c.name in kwargs[\"exclude\"]:\n continue\n if not self.select_by_type(c, types):\n continue\n sel = \"%d\" % index\n index += 1\n if c not in selected:\n verb = verbs[0]\n else:\n verb = verbs[1]\n o = Option(selector=sel, verb=verb, card=c, name=c.name)\n if showdesc:\n o[\"desc\"] = c.description(self)\n if kwargs.get(\"printcost\"):\n o[\"details\"] = str(self.card_cost(c))\n if kwargs.get(\"printtypes\"):\n o[\"details\"] = c.get_cardtype_repr()\n options.append(o)\n ui = self.user_input(options, \"Select which card?\")\n if not ui[\"card\"]:\n break\n if ui[\"card\"] in selected:\n selected.remove(ui[\"card\"])\n else:\n selected.append(ui[\"card\"])\n if num == 1 and len(selected) == 1:\n break\n return selected", "def card(bot, update):\n query = update.callback_query\n user = query.from_user\n chat_id = query.message.chat_id\n selected_card = query.data\n\n if (chats[chat_id].player1.card_played == []) and (chats[chat_id].player2.card_played == []):\n bot.send_message(text=Strings.CARD_SELECTED.format(user.first_name),\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n parse_mode=ParseMode.MARKDOWN,\n isgroup=True)\n if chats[chat_id].player1.user == user:\n chats[chat_id].player1.card_played = chats[chat_id].player1.hand[int(selected_card)]\n chats[chat_id].player1.hand.remove(chats[chat_id].player1.hand[int(selected_card)])\n\n elif chats[chat_id].player2.user == user:\n chats[chat_id].player2.card_played = chats[chat_id].player2.hand[int(selected_card)]\n chats[chat_id].player2.hand.remove(chats[chat_id].player2.hand[int(selected_card)])\n return CARD\n\n else:\n if chats[chat_id].player1.user == user and chats[chat_id].player1.card_played != []:\n bot.send_message(text=Strings.CARD_SELECTED2.format(user.first_name),\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n parse_mode=ParseMode.MARKDOWN, isgroup=True)\n return CARD\n elif chats[chat_id].player2.user == user and chats[chat_id].player2.card_played != []:\n bot.send_message(text=Strings.CARD_SELECTED2.format(user.first_name),\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n parse_mode=ParseMode.MARKDOWN, isgroup=True)\n return CARD\n else:\n if chats[chat_id].player1.user == user:\n chats[chat_id].player1.card_played = chats[chat_id].player1.hand[int(selected_card)]\n chats[chat_id].player1.hand.remove(chats[chat_id].player1.hand[int(selected_card)])\n\n elif chats[chat_id].player2.user == user:\n chats[chat_id].player2.card_played = chats[chat_id].player2.hand[int(selected_card)]\n chats[chat_id].player2.hand.remove(chats[chat_id].player2.hand[int(selected_card)])\n\n bot.edit_message_text(text=Strings.CARD_SELECTED.format(user.first_name),\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n parse_mode=ParseMode.MARKDOWN)\n bot.send_message(chat_id,\n Strings.SELECTION_COMPLETED,\n parse_mode=ParseMode.MARKDOWN, isgroup=True)\n\n reply_markup = ReplyKeyboardMarkup(c_b_keyboard, selective=False)\n bot.send_message(chat_id,\n Strings.QUESTION,\n reply_markup=reply_markup,\n parse_mode=ParseMode.MARKDOWN, isgroup=True)\n return BET_CHECK", "def choose_card(playable_cards):\r\n\r\n playing = playable_cards[0]\r\n print('\\n choosing \\n', playing)\r\n\r\n return playing # for now\r", "def next_card_selection(deck):\n # First grab the colors and format\n # Then generate the query, execute, and trim\n # Then get the selections, and return them!\n colors = deck['colors']\n deck_format = deck['format']\n seed = deck.get('seed')\n\n query_type = pick_next_card_query(deck_format, seed)\n trimmed_query_results = compile_execute_and_trim_query(colors, query_type, deck_format)\n selections = generate_pickable_selections(trimmed_query_results, deck_format, deck)\n print('QUERY: ', query_type, ' NUM RESULTS:', len(trimmed_query_results))\n\n return selections", "def choose_card_to_discard(self):\n random.choice(self.hand.card_list).use()", "def select_card(self, cards):\n idx = -1 # should not be inital value\n while True:\n print(\"Please select a card by index:\")\n inpt = self.input(list(enumerate(cards)))\n try:\n idx = int(inpt)\n except ValueError:\n print(f\"'{inpt}' is not a valid index.\")\n if idx < 0 or idx >= len(cards):\n print(f\"The index {idx} is not available.\")\n else:\n break\n assert idx != -1 # make sure it's not initial value\n return cards.pop(idx)", "def cards(self, cards):\n\n self._cards = cards", "def cards(self, cards):\n\n self._cards = cards", "def choose_card(self, state=None):\n # if self.at_last_stich():\n # allowed = yield self.cards[0]\n # else:\n self.observation_received.acquire()\n self.observation = self.build_observation(state, self.cards)\n logger.debug(f\"choose_card received observation: {self.observation}\")\n self.observation_received.notify_all() # notify all threads to be sure\n self.observation_received.release()\n\n self.action_received.acquire()\n received = self.action_received.wait()\n if not received:\n logger.debug(\"Timeout occurred. action_received condition has not been notified.\")\n logger.debug(f\"choose_card received action: {self.action}\")\n allowed_cards = self.allowed_cards(state=state)\n chosen_card = allowed_cards[0] # set chosen_card to the first allowed card in case anything goes south\n chosen_card = self.set_chosen_card(allowed_cards, chosen_card)\n self.action_received.release()\n\n allowed = yield chosen_card\n\n if allowed:\n yield None", "def set_chosen_card(self, allowed_cards, chosen_card):\n if self.action is not None:\n if self.action in allowed_cards:\n logger.info(f\"Successfully chose the card: {self.action}\")\n chosen_card = self.action\n else:\n logger.error(f\"{self.action} is not a valid card! Choosing the first allowed card now.\")\n else:\n logger.debug(\"chosen card is None\")\n return chosen_card", "def play_selected_card(_screen, player):\n card = Card(player.selected_card.card_id, 400, 350)\n card.image_of_card(_screen)", "def play_all(self):\n for _ in range(self._hand.size()):\n card = self._hand.pop()\n self._active.push(card)\n self._money = self._money + card.money\n self._attack = self._attack + card.attack\n print '\\nPlayed all cards!'", "def deal_cards():\n for _ in range(2):\n user_cards.append(random.choice(deck))\n dealer_cards.append(random.choice(deck))", "def cards(self) -> None:\n self._cards = []", "def Deal():\r\n cardsout = []\r\n cardoptions = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23]\r\n topcardoptions = [0,2,3,4,5,6]\r\n topcard = topcardoptions[random.randint(0,5)]\r\n cardoptions.pop(cardoptions.index(topcard))\r\n cardsout.append(topcard)\r\n\r\n if SHOWHAPPENINGS == True:\r\n disp = card_dict[topcard]\r\n print(\"Topcard is: {}\".format(disp)) \r\n\r\n for i in range(4):\r\n numcards = 0\r\n while numcards < 5:\r\n possiblerange = len(cardoptions) - 1\r\n cardindex = random.randint(0,possiblerange)\r\n card = cardoptions[cardindex]\r\n cardsout.append(card)\r\n cardoptions.pop(cardoptions.index(card))\r\n PlayerHands[i].append(card)\r\n numcards += 1\r\n PlayerHands[i] = sorted(PlayerHands[i]) #putting into ascending order\r\n if i == 0 or i == 2:\r\n PlayerHands[i].append(\"RedTeam\")\r\n else: \r\n PlayerHands[i].append(\"BlackTeam\")\r\n \r\n PlayerHands[0].append(PLAYER1)\r\n PlayerHands[1].append(PLAYER2)\r\n PlayerHands[2].append(PLAYER3)\r\n PlayerHands[3].append(PLAYER4)\r\n #PlayerHand format = [card1,card2,card3,card4,card5,Team,Name]\r\n\r\n return topcard", "def going_out(self, cards):\n for card in cards:\n self.out_of_use.append(int(card))\n # print(self.out_of_use)", "def followUpAttack(self, validCards):\n print(\"Select card from... \")\n cardManager.printHand(validCards)\n card = int(input(\"to your attack: \"))\n while card not in validCards: # error checking\n print(card)\n print(\"Please select a valid card from...\")\n cardManager.printHand(validCards)\n card = int(input(\"to your attack: \"))\n self.currentHand.remove(card)\n card = self.checkDoubles(card)\n return card", "def clearCards(self):\r\n self.cards = []", "def __init__(self):\r\n self.cards = []", "def open_cards(self) -> None:\r\n self.dealer.deal_cards_to(self.card_stack, PokerRules.CARDS_PER_ROUND[self.round_num])", "def get_card_list(self):\n return self.cards", "def __init__(self):\n self._cards = []", "def upgrade_all_cards(self):\n self.game.go_to_comic_cards()\n logger.info(\"Comic Cards: upgrading all available cards.\")\n if wait_until(self.emulator.is_ui_element_on_screen, timeout=3, ui_element=self.ui['CARDS_UPGRADE_ALL']):\n self.emulator.click_button(self.ui['CARDS_UPGRADE_ALL'].button)\n for card_index in range(1, 6):\n card_select_ui = self.ui[f'CARDS_SELECT_GRADE_{card_index}']\n self.emulator.click_button(card_select_ui.button)\n logger.debug(f\"Comic Cards: starting to upgrade UI Element {card_select_ui.name}\")\n if not wait_until(self.emulator.is_image_on_screen, timeout=3, ui_element=card_select_ui):\n logger.warning(\"Comic Cards: can't select card's grade.\")\n continue\n logger.debug(f\"Comic Cards: successfully selected UI Element {card_select_ui.name}\")\n self.emulator.click_button(self.ui['CARDS_SELECT_GRADE'].button)\n if wait_until(self.emulator.is_ui_element_on_screen, timeout=3,\n ui_element=self.ui['CARDS_UPGRADE_CONFIRM']):\n self.emulator.click_button(self.ui['CARDS_UPGRADE_CONFIRM'].button)\n if wait_until(self.emulator.is_ui_element_on_screen, timeout=10,\n ui_element=self.ui['CARDS_UPGRADE_RESULTS_OK']):\n logger.debug(f\"Comic Cards: successfully upgraded UI Element {card_select_ui.name}\")\n self.emulator.click_button(self.ui['CARDS_UPGRADE_RESULTS_OK'].button)\n wait_until(self.emulator.is_image_on_screen, timeout=3, ui_element=card_select_ui)\n continue\n if wait_until(self.emulator.is_ui_element_on_screen, timeout=3, ui_element=self.ui['CARDS_UPGRADE_ALL_CANCEL']):\n self.emulator.click_button(self.ui['CARDS_UPGRADE_ALL_CANCEL'].button)\n self.close_after_mission_notifications()\n self.game.go_to_main_menu()", "def __init__(self, cards):\n self.cards = cards", "def user_turn(self):\r\n\r\n self.display_state() # display the current state\r\n print(\r\n '\\nTURN: You -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-')\r\n # Get the row and col number of the card you want to select\r\n x1, y1 = self.input_validation('Enter the location of the first card you pick (row, col) -> ')\r\n self.selected = [x1, y1] # a temporary holder for the first choice\r\n\r\n # Get the corresponding card ID which is also the key for the dictionary with all the cards\r\n choice1_key = self.state[x1, y1]\r\n print('The card you selected: {0}'.format(self.deck[choice1_key]))\r\n\r\n # Repeat this for your second choice\r\n x2, y2 = self.input_validation('Enter the location of the second card you pick (row, col) -> ')\r\n self.selected = [-1, -1] # reset the temporary hold\r\n\r\n choice2_key = self.state[x2, y2]\r\n print('The card you selected: {0}'.format(self.deck[choice2_key]))\r\n\r\n # Check if the two cards are a match or not\r\n if self.check_card(self.deck[choice1_key], self.deck[choice2_key]):\r\n print('MATCH')\r\n # Replace the corresponding cards in the remaining inventory and state with -1\r\n self.remaining[choice1_key] = -1\r\n self.remaining[choice2_key] = -1\r\n self.state[x1, y1] = -1\r\n self.state[x2, y2] = -1\r\n self.player_cards += 2 # the player gets 2 cards\r\n self.bin.append([x1, y1]) # move the location of the card to the already-taken bin\r\n self.bin.append([x2, y2])\r\n self.forget_memory(choice1_key) # remove from computer's memory\r\n self.forget_memory(choice2_key)\r\n self.match = 1 # player will continue to choose cards\r\n else:\r\n print('NOT a match')\r\n # Add these cards to the computer's memory\r\n self.computer_memory[choice1_key] = [x1, y1]\r\n self.computer_memory[choice2_key] = [x2, y2]\r\n self.match = 0 # computer's turn\r", "def test_consumed_cards(self):\n game = TestGames.replay(9, [3, 1, 0, 0])\n consumed_cards = game.consumed_cards()\n self.assertEqual(len(consumed_cards), 8)\n\n self.assertListEqual(list(consumed_cards),\n [2 / 5, # guards\n 0 / 2, # priest\n 1 / 2, # baron\n 0 / 2, # handmaid\n 1 / 2, # prince\n 0 / 1, # king\n 0 / 1, # countess\n 0 / 1]) # princess", "def deliver_card(data, access=None):\n\n schema = get_card_schema(data)\n if not schema:\n schema = card_schema\n data = deepcopy(data)\n\n if access is 'learn' and data['kind'] is 'choice':\n if data['order'] == 'random':\n shuffle(data['options'])\n\n if data['max_options_to_show']:\n data['options'] = data['options'][:data['max_options_to_show']]\n\n return deliver_fields(schema, data, access)", "def all_cards_selected(self, game_key):\n participants = models.Participant.query(\n models.Participant.playing == True,\n models.Participant.selected_card == None,\n ancestor=game_key).fetch()\n logging.debug(\"participants who have not selected a card: %s\", participants)\n if participants:\n return False\n else:\n return True", "def draw_a_card(cards):\n import random\n card_drawn = random.choices(card_deck)\n cards.append(card_drawn[0])\n return", "def deal_card():\r\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\r\n return (random.choice(cards))", "def attack(self): # need to check defenders handcount\n \"\"\"Always returns a list of values\"\"\"\n if self.AI:\n # return rand.randint(0,len(self.currentHand))\n Error(\"AI not yet implemented for Attacking\")\n else:\n print(\"Select card from... \")\n cardManager.printHand(self.currentHand)\n card = int(input(\"to your attack: \"))\n while card not in self.currentHand: # error checking\n print(\"Please select a valid card from...\", end = \" \")\n cardManager.printHand(self.currentHand)\n card = int(input())\n self.currentHand.remove(card)\n card = self.checkDoubles(card)\n return card", "def test_cards_get(self):\n pass", "def hit(self, deck):\n self.cards.append(deck.draw_card())", "def dealer_card_choice(update, context):\n query = update.callback_query\n if query.message.reply_to_message:\n user = query.message.reply_to_message.from_user\n else:\n user = query.message.chat\n bot = context.bot\n CURRENT_USER = USERS[user.username]\n CURRENT_CONTEXT = process_card_value(query.data, CURRENT_USER)\n message = f'Round: {CURRENT_CONTEXT[\"round\"]} ({CURRENT_CONTEXT[\"username\"]}) \\nDealers Card: {CURRENT_CONTEXT[\"dealer_card\"]}\\nYour Cards: {CURRENT_CONTEXT[\"player_cards\"]} \\nYour total: {CURRENT_CONTEXT[\"player_total\"]} \\n\\nChoose Dealers Card: '\n bot.edit_message_text(\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n text=message,\n reply_markup=card_markup\n )\n\n # Tell ConversationHandler that we're in state `STRATEGY` now\n return STRATEGY", "def get_game_cards(gameId):\n pass", "def indicate_discard_card(whose_turn,players):\n cards_to_choose_from = players[whose_turn].hand.cards\n players[whose_turn].hand.print_cards()\n chosen_to_discard = int(input('Select a card to discard. Type a number. '))\n return chosen_to_discard", "def get_card_sets(self, name: str) -> List:", "def get_card(self, card):\n\n\t\tself.add_card_to_grps(card)\n\n\t\tself.grps = sorted(self.grps, key = lambda x: -len(x))\n\n\n\t\t# check if # of cards forming sets is more than 5; if yes, then break the set to allow computer to form runs\n\t\tnum_set_cards = 0\n\t\tpos = -1\n\t\tfor i in range(len(self.grps)):\n\t\t\tif len(self.grps[i]) > 1 and self.grps[i][0] == self.grps[i][1]:\n\t\t\t\tnum_set_cards += len(self.grps[i])\n\t\t\t\tpos = i\n\n\t\tif num_set_cards > 5:\n\t\t\tcard = self.grps[pos][-1]\n\t\t\tself.grps[pos].remove(card)\n\t\t\tlogger.info(f\"In computer.py/get_card: computer returned {card} to break too many set, computer = {self}\")\n\t\t\treturn card\n\n\n\t\t# if # of sets is fine, then remove a card from the group with least size\n\t\tcard = self.grps[-1][-1]\n\n\t\t\n\t\tif len(self.grps[-1]) == 1:\n\t\t\tself.grps.remove(self.grps[-1])\n\t\telse:\n\t\t\tself.grps[-1].remove(self.grps[-1][-1])\n\n\t\tlogger.info(f\"In computer.py/get_card: computer returned {card}, computer = {self}\")\n\n\t\treturn card", "def cards(\n self,\n cards: Union[List[Tuple[int, str, str]], List[Any]]\n ) -> None:\n self._cards: List[List[Tuple[int, str, str]]] = [cards]", "def deal_card():\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n card = random.choice(cards)\n return card", "def get_cards(self):\n card = self._starting_card\n return card", "def __init__(self, cards = []):\n self.cards=cards", "def receive_card(self, card: Card) -> None:\n\t\tself.deck.append(card)\n\t\t\n\t\t# Sorts the Deck by type and colour for aesthetic purposes\n\t\t\"\"\"self.deck.sort(key=lambda x: repr(x.type))\n\t\tself.deck.sort(key=lambda x: repr(x.colour))\"\"\"", "def choice():\n list_cards = [0, 0, 0, 0, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9,\n 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10]\n list_cards= list_cards*4\n play_1 = random.choice(list_cards)\n list_cards_1 = list_cards\n list_cards_1.remove(play_1)\n dealer_1 = random.choice(list_cards_1)\n list_cards_1.remove(dealer_1)\n play_2 = random.choice(list_cards_1)\n list_cards_1.remove(play_2)\n dealer_2 = random.choice(list_cards_1)\n list_cards_1.remove(dealer_2)\n list1=[play_1,play_2]\n list=[dealer_1,dealer_2]\n return (list1,list,list_cards_1)", "def card_selSource(self, **kwargs):\n if \"cardsrc\" in kwargs:\n if kwargs[\"cardsrc\"] == \"hand\":\n selectfrom = self.piles[Piles.HAND]\n elif kwargs[\"cardsrc\"] == \"played\":\n selectfrom = self.piles[Piles.PLAYED]\n elif kwargs[\"cardsrc\"] == \"discard\":\n selectfrom = self.piles[Piles.DISCARD]\n else:\n selectfrom = kwargs[\"cardsrc\"]\n else:\n selectfrom = self.piles[Piles.HAND]\n return selectfrom", "def selectAmbassadorInfluence(self, choices, influenceRemaining):\n # todo: raise notImplemented. should be overriden by the input class\n \n selected = []\n for i in range(influenceRemaining):\n card = random.choice(choices)\n selected.append(card)\n choices.remove(card)\n \n return selected", "def get_cards(query_param):\n return _query_scryfall(query_param)", "def add_card(self, added_cards):\n\n self.hand[:0] = added_cards", "def deal_cards(self):\n aux = random.randint(0, len(self.deck))\n card = self.deck[aux]\n self.deck.pop(aux)\n print(f\"Received: {card}\")\n return card", "def cards(self):\n return self._cards", "def test_cards_get_list(self):\n pass", "def step(self, action):\n assert self.completed_rounds < self.num_rounds\n\n player = self.players[self.current_player_id]\n card = action\n\n if card not in player.hand:\n raise ValueError(\"Action not allowed because the card is not in the player's hand\")\n\n player.hand.remove(card)\n player.played.add(card)\n # print(f\"Player {self.current_player_id} with hand {[c.id for c in player.hand]} played the card {card.id}\")\n best_combination_on_the_table = self._get_best_combination(card)\n if best_combination_on_the_table:\n self.last_player_capturing_id = self.current_player_id\n player.captured.add(card)\n for c in best_combination_on_the_table:\n self.table.remove(c)\n player.captured.add(c)\n if not self.table and not (self._is_last_round and self._is_round_over()):\n player.scope += 1\n else:\n self.table.add(card)\n # print(f\"Cards on the table after play: {[c.id for c in self.table]}\")\n\n if self._is_round_over():\n self.completed_rounds += 1\n # print(f\"=========== Round {self.current_round} completed ============\")\n self.current_player_id = (self.current_player_id + 1) % self.num_players\n\n if self.is_over():\n last_player_capturing = self.players[self.last_player_capturing_id]\n # print(f\"Giving the remaining cards to player {last_player_capturing.player_id}\")\n for card in self.table:\n last_player_capturing.captured.add(card)\n self.table = set()\n assert all([len(p.played) == 10 for p in self.players])\n assert all([len(p.hand) == 0 for p in self.players])\n return self.get_state(), self.current_player_id", "def action_hit(self) -> None:\n print(self.deal_card(self.user))", "def check_selected_card(_player1, _player2):\n if _player1.selected_card and _player2.selected_card:\n color = _player1.selected_card.suit\n if _player2.selected_card.suit != color and check_color_card(_player2, color):\n _player2.selected_card = None", "def collect_cards():\n \n cards_list = []\n while (cards_input := input(\"Enter card: \")) != '#':\n i = cards_input.upper()\n if not is_valid(i):\n print(f\"Please enter a valid card.\")\n continue\n cards_list.append(i)\n cards_decoded = [Board.translate(card) for card in cards_list]\n return cards_decoded", "def Send_newCards(self, cards): \n serialized = [c.serialize() for c in cards]\n self.Send({\"action\": \"newCards\", \"cards\": serialized})", "def __init__(self):\n self.cards = [Card(face=card[0], value=card[1], suit=suit)\n for card in CARD_VALUES().items() for suit in CARD_SUITS()]", "def check_card_action(self, card):\n if card.value == \"7\":\n self.seven_punishment()\n elif card.value == \"8\":\n self.eight_punishment()\n elif card.value == \"9\":\n self.nine_punishment()\n elif card.value == \"B\":\n self.jack_wish()", "def hook_buy_this_card(self, game, player):\n totrash = [c for c in player.piles[Piles.PLAYED] if c.isTreasure()]\n for c in totrash:\n player.output(f\"Mint trashing {c.name}\")\n player.trash_card(c)", "def get_card_info_list(self):\n self._get_card_info_list = pa_card_info_cb_t(self._card_info_cb)\n pa_context_get_card_info_list(self._context,\n self._get_card_info_list,\n None)", "def hit(self, deck):\n self.showOneCard = False\n while self.getPoints() < 17:\n self.cards.append(deck.deal())", "def __refresh_search_results(self):\n\n # Define the query\n try:\n query = CardQuery()\n query.max_count = int(self.__box_count.get_text())\n query.max_proficiency = self.__combo_proficiency.get_index()\n query.max_score = float(self.__box_score.get_text())\n if self.__combo_card_type.get_index() > 0:\n query.card_type = getattr(WordType, self.__combo_card_type.get_text())\n except:\n cards = []\n traceback.print_exc()\n else:\n # Query the cards\n cards = []\n for card in self.__cards_source.get_cards():\n study_data = self.__study_database.get_card_study_data(card)\n if query.matches(card, study_data):\n cards.append((card, study_data))\n\n # Sort the list\n sort_method = self.__combo_sort.get_text()\n if sort_method == SortMethod.RANDOM:\n random.shuffle(cards)\n all_cards = list(cards)\n cards = []\n while all_cards and len(cards) < query.max_count:\n index = random.randrange(len(all_cards))\n elem = all_cards[index]\n del all_cards[index] \n cards.append(elem)\n elif sort_method == SortMethod.LOWEST_SCORE:\n cards.sort(key=lambda x: x[1].get_history_score())\n elif sort_method == SortMethod.OLDEST:\n cards.sort(key=lambda x: x[1].get_last_encounter_time() or x[1].get_history_score())\n elif sort_method == SortMethod.NEWEST:\n cards.sort(key=lambda x: x[1].get_last_encounter_time() or x[1].get_history_score(), reverse=True)\n\n cards = cards[:query.max_count]\n\n # Define the study params\n params = StudyParams()\n params.random_side = self.__combo_side.get_index() == 0\n params.random_form = self.__checkbox_random_forms.is_checked()\n params.shown_side = (CardSide.English if self.__combo_side.get_index() == 1\n else CardSide.Russian)\n self.__study_params = params\n\n # Define the scheduler params\n self.__scheduler_params = SchedulerParams(\n max_repetitions=1 if self.__checkbox_only_once.is_checked() else 0)\n \n # Popluate the table\n self.__table_cards.clear()\n for card, study_data in cards:\n color = Config.proficiency_level_colors[study_data.get_proficiency_level()]\n row = self.__table_cards.add(card, color=color)\n cards = [card for card, _ in cards]\n\n self.__cards = cards\n self.__button_begin.set_enabled(len(cards) > 0 and self.__study_params)\n self.__label_result_count.set_text(\"{} Results\".format(len(self.__cards)))", "def hit(\n self,\n card: List[Tuple[int, str, str]],\n card_index: int = 0\n ) -> None:\n self._cards[card_index].extend(card)", "def refresh(self):\n self.deck = []\n\n for _suit in Suit:\n for _face in Face:\n self.insert(Card(_suit, _face, self))", "def _cards_getter(self):\n pass", "def add_cards(self, cards):\n self.get_cards().extend(cards)", "async def get_available_cards(self, game_id): # pass in a list of card ids\n all_cards = await self.get_all_cards()\n available_cards = []\n game = await self.get_game(game_id)\n player1_cards = await self.get_current_cards(game[1])\n player2_cards = await self.get_current_cards(game[2])\n for card in all_cards:\n if card not in player1_cards and card not in player2_cards:\n available_cards.append(card)\n return available_cards", "def draw_card(self, card):\n self.current_hand.append(card)", "def get_selected_card(self, pos, double_clicking=False):\n if self.selectable:\n double_select = False\n relative_pos_x = pos[0] - self.x\n relative_pos_y = pos[1] - self.y\n mouse_pos = (relative_pos_x, relative_pos_y)\n self.selected_card = -1\n if not self.draw_from_last:\n for i, card in enumerate(reversed(self.cards)):\n if card.rect.collidepoint(mouse_pos):\n self.selected_card = len(self.cards) - 1 - i\n break\n else:\n for i, card in enumerate(self.cards):\n if card.rect.collidepoint(mouse_pos):\n self.selected_card = i\n break\n\n if self.prev_selected[-1] == self.selected_card:\n if not double_clicking:\n self.selected_card = -1\n\n self.record_selected_history()\n self.update_deck_display()\n\n selected_history = [sel for sel in self.prev_selected if sel >= 0]\n\n return (len(selected_history) == 2 and self.prev_selected.count(self.selected_card) == 2\n and self.selected_card >= 0) and double_clicking\n return False", "def deal_card():\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n random_card = random.choice(cards)\n return random_card", "def select_card(player, _mouse_x=None, _mouse_y=None):\n if _mouse_x:\n for card in player.cards:\n lower_x = card.positionx\n lower_y = card.positiony\n if lower_x < _mouse_x < lower_x + 100 and lower_y < _mouse_y < lower_y + 100:\n player.selected_card = card", "def receive_chance_card(self, card):\r\n self.chance_cards.append(card)", "def build_deck_screen_my_deck_card_display(screen,buttons, screen_status, button_status, card_database_filter, user):\n # Draw the character card\n if user.character_card == '':\n pass\n else:\n user.character_card.rect.x = 65\n user.character_card.rect.y = 600\n screen.blit(user.character_card.image, user.character_card.rect)\n #Clear duplicate amount each frame and render the refined list\n for card_new in user.deck_list:\n card_new.duplicate = 1\n local_store_list = build_deck_screen_my_deck_card_list_refine(user)\n #use refined list to draw\n rect_position_x = 245 #local variables for rect position for the first card in the user deck\n rect_position_y = 600\n row_number = 1\n #Display cards in local_store_list:\n\n if screen_status.build_deck_screen_my_deck_page_id <= 0:\n screen_status.build_deck_screen_my_deck_page_id = 1\n # Edge cases when len() = 6,12,18....\n if len(local_store_list) % 6 == 0 and len(local_store_list) != 0:\n if screen_status.build_deck_screen_my_deck_page_id >= (len(local_store_list))//6 + 1:\n screen_status.build_deck_screen_my_deck_page_id = (len(local_store_list))//6 + 0\n\n else:\n if screen_status.build_deck_screen_my_deck_page_id >= (len(local_store_list))//6 + 2:\n screen_status.build_deck_screen_my_deck_page_id = (len(local_store_list))//6 + 1\n # Algorithm to draw all cards in local_store_list, 6 card per page.\n for card in local_store_list[6*(screen_status.build_deck_screen_my_deck_page_id - 1):6 * screen_status.build_deck_screen_my_deck_page_id]:\n if row_number <= 6:\n card.rect.x = rect_position_x\n card.rect.y = rect_position_y\n screen.blit(card.image, card.rect)\n rect_position_x += 145\n row_number += 1\n build_deck_screen_my_deck_duplicate_number_display(card, screen)\n if row_number >= 7:\n row_number = 1", "def deal_cards(self):\n self.card = random.randint(1, 13)\n return self.card", "def deal_opening_cards(self) -> None:\r\n for i in range(self.num_of_players):\r\n self.dealer.deal_cards_to(self.players[i].cards_stack, PokerRules.CARDS_PER_PLAYER)", "def cards_to_deal(cls, context={}):\n\t\traise NotImplementedError()", "def cards():\n if user_loggined():\n user = models.User.query.get(session['user_id'])\n u_cards = user.cards.all()\n prep_cards = []\n for card in u_cards:\n prep_cards.append(card.type + ' **** '+card.cnb[-9:])\n else:\n return redirect(url_for('index'))\n return redirect(url_for('index'))", "def take(self, table):\n # take card\n self.hand.add(table.card)\n table.card = table.cards.pop()\n # take chips\n self.chips += table.chips\n table.chips = 0", "def receive_card(self, card, new_suite=None, is_using_chameleon=False):\n assert isinstance(card, tuple)\n assert len(card) == 2\n\n if card.value == 14 or card.value == self.chameleon and is_using_chameleon:\n assert new_suite != None, print('You must specify a new suite.')\n assert new_suite in ['hearts', 'spades', 'diamonds', 'clubs']\n\n self.current_suite = new_suite\n self.current_value = None\n\n else:\n self.cards.append(card)\n self.current_suite = card.suite\n self.current_value = card.value", "def card_output():\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n return random.choice(cards)", "def initial_draw(self):\n self.player.take_card(self.deck)\n self.dealer.take_card(self.deck)\n self.player.take_card(self.deck)\n self.dealer.put_face_down(self.deck)", "def get_options(cls, player, context={}):\n\t\toptions = []\n\t\tfor card in player.hand:\n\t\t\tif cls.can_be_played(card, context):\n\t\t\t\toptions.extend(card.actions)\n\t\toptions.append(Action(None, \"DRAW\", [DrawCard]))\n\t\treturn options", "def check_cards_eligibility(self):\n for c in self.hand:\n c.check_actions(self)\n for c in self.phand:\n c.check_actions(self)\n for c in self.discard:\n c.check_actions(self)\n for c in self.active_player.phand:\n c.check_actions(self)\n for c in self.active_player.hand:\n c.check_actions(self)\n for c in self.active_player.discard:\n c.check_actions(self)\n for c in self.played_user_cards:\n c.check_actions(self)\n if ACTION_KEEP in self.actions:\n for p in self.players:\n for c in p.phand:\n c.check_actions(self)\n for c in p.hand:\n c.check_actions(self)\n for c in p.discard:\n c.check_actions(self)", "def get_hand(self):\n return self.cards", "def get_cards(self):\n return deepcopy(self._cards)", "def lobby_screen_pick_deck_display(ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, action, player2):\n # Pick deck text\n button_text_1 = Button('Pick an exist deck or create a new one: ','', (250,250,250),400, 100, 400, 35, font_color = (0,0,0), alpha = 150)\n button_text_1.update()\n button_text_1.draw(screen)\n\n # Deck list buttons\n with open('user_deck_list_string.txt','r') as f:\n f.seek(0)\n if len(f.readlines()) >= 12:\n pass\n else:\n button_new_deck = Button('+ New Deck','', (250,250,250),1020, 110, 120, 35, font_color = (0,0,0), alpha = 150)\n button_new_deck.update()\n button_new_deck.draw(screen)\n\n\n f.seek(0)\n x = len(f.readlines())\n y = 0\n deck_list_index = 0\n\n for i in range(1,7):\n f.seek(0)\n for line in f:\n if 'DECK_LIST_' + str(i) not in line:\n y += 1\n if y < x: # DECK_LIST_i exist\n f.seek(0)\n for line in f:\n if 'DECK_LIST_' + str(i) in line:\n deck_length = len(make_card_list_from_string(line.replace('DECK_LIST_' + str(i) + ' = ', ''), ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2))\n # deck_length = int((len(line.replace('DECK_LIST_' + str(i) + ' = ', '')) -1)/14)\n if 'CHARACTER_' + str(i) in line:\n character_length = 1\n character_card = eval('card_' + line.replace('CHARACTER_' + str(i) + ' = ', '')[7:12])\n\n if user.deck_list_index == str(i):\n\n button_top = Button(character_card.name + ': ','', (100,30,130),85 + 180* (i-1), 165, 130, 60)\n button_top.update()\n button_top.draw(screen)\n\n if deck_length < 40:\n button_bottom = Button(str(character_length) + '/1 | ' + str(deck_length) +'/40','', (100,30,130),85 + 180* (i-1), 225, 130, 50, font_color = (250,0,0))\n button_bottom.update()\n button_bottom.draw(screen)\n else:\n button_bottom = Button(str(character_length) + '/1 | ' + str(deck_length) +'/40','', (100,30,130),85 + 180* (i-1), 225, 130, 50)\n button_bottom.update()\n button_bottom.draw(screen)\n\n else:\n\n button_top = Button(character_card.name + ': ','', (160,160,160),85 + 180* (i-1), 165, 130, 60, alpha = 240)\n button_top.update()\n button_top.draw(screen)\n\n if deck_length < 40:\n button_bottom = Button(str(character_length) + '/1 | ' + str(deck_length) +'/40','', (160,160,160),85 + 180* (i-1), 225, 130, 50, font_color = (200,0,0), alpha = 240)\n button_bottom.update()\n button_bottom.draw(screen)\n else:\n button_bottom = Button(str(character_length) + '/1 | ' + str(deck_length) +'/40','', (160,160,160),85 + 180* (i-1), 225, 130, 50, alpha = 240)\n button_bottom.update()\n button_bottom.draw(screen)\n\n y = 0\n\n else: # DECK_LIST_i not exist\n\n button = Button('Empty','', (200,200,200),85 + 180* (i-1), 165, 130, 110, alpha = 80)\n button.update()\n button.draw(screen)\n\n y = 0\n\n\n for i in range(1,7):\n if user.deck_list_index == str(i):\n button_edit = Button('Edit','', (50,50,170),85 + 180* (i-1), 282, 60, 30)\n button_edit.update()\n button_edit.draw(screen)\n\n button_delete = Button('Delete','', (160,30,30), 155 + 180* (i-1), 282, 60, 30)\n button_delete.update()\n button_delete.draw(screen)", "def hit(hand=bj.player1.hand):\r\n hand.append(bj.deck.remove_card())", "def test_play_card(self):\n self.plr.piles[Piles.DECK].set(\"Silver\", \"Province\", \"Moat\", \"Gold\")\n self.vic.piles[Piles.DECK].set(\"Duchy\")\n self.plr.test_input = [\"discard\", \"discard\", \"putback\"]\n self.plr.play_card(self.card)\n self.g.print_state()\n self.assertEqual(self.plr.actions.get(), 1)\n self.assertIn(\"Duchy\", self.vic.piles[Piles.DISCARD])\n self.assertIn(\"Gold\", self.plr.piles[Piles.DISCARD])\n self.assertIn(\"Province\", self.plr.piles[Piles.HAND])\n self.assertIn(\"Moat\", self.plr.piles[Piles.HAND])\n self.assertIn(\"Silver\", self.plr.piles[Piles.DECK])", "def hit(self, deck):\n try:\n self.hand.append(deck.pop(0))\n except IndexError:\n print('There are no more cards in the deck!')", "def get_cards(self):\n return [card.view_model() for card in self._deck.loc]", "def deal(self, cards_num):\n\n cards = []\n while cards_num > 0:\n\n x = random.randint(0, 53)\n if self.in_use[x] == 0:\n self.in_use[x] += 1\n cards.append(x)\n cards_num -= 1\n\n return cards", "def player_card_one_choice(update, context):\n query = update.callback_query\n if query.message.reply_to_message:\n user = query.message.reply_to_message.from_user\n else:\n user = query.message.chat\n bot = context.bot\n CURRENT_CONTEXT = USERS[user.username]\n message = f'Round: {CURRENT_CONTEXT[\"round\"]} ({CURRENT_CONTEXT[\"username\"]}) \\nDealers Card: {CURRENT_CONTEXT[\"dealer_card\"]}\\nYour Cards: {CURRENT_CONTEXT[\"player_cards\"]} \\nYour total: {CURRENT_CONTEXT[\"player_total\"]} \\n\\nChoose Your 1st Card: '\n bot.edit_message_text(\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n text=message,\n reply_markup=card_markup\n )\n\n return PLAYER_CARD_TWO", "def main():\n\n # call to OS for positioning window\n os.environ['SDL_VIDEO_WINDOW_POS'] = \"%d,%d\" % (0, 25)\n\n # Initialization block\n pygame.init() # Initialize pygame module\n screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT)) # initialize screen\n\n # Testing\n # model_card = m_card.Card(m_card.CardType.TEMPURA)\n # view_card = v_card.CardView(screen, model_card)\n\n deck = Deck()\n player = Player()\n b_pack = deck.generate_booster(10)\n player.booster_pack = b_pack\n\n hand_view = HandView(screen, (0, SCREEN_HEIGHT - SCREEN_HEIGHT / 5), (SCREEN_WIDTH, SCREEN_HEIGHT / 5), player)\n pick_crds = PickedCardsView(screen, (0, 0), (SCREEN_WIDTH, SCREEN_HEIGHT / 5), player, 0)\n pick_crds2 = PickedCardsView(screen, (0, 0), (SCREEN_WIDTH, SCREEN_HEIGHT / 5), player, 180)\n # Game loop\n while True:\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n\n elif event.type == pygame.MOUSEBUTTONUP:\n is_clicked([hand_view, pick_crds, pick_crds2], pygame.mouse.get_pos())\n screen.fill((0, 0, 0))\n hand_view.draw()\n pick_crds.draw()\n pick_crds2.draw()\n pygame.display.flip()", "def battle_screen_my_hand_card_display(screen,buttons, screen_status, button_status, card_database_filter, user):\n rect_position_x = 100\n rect_position_y = 610\n row_number = 1\n if screen_status.battle_screen_action_indicator == 'stage-0':\n pass\n else :\n\n if screen_status.battle_screen_my_hand_page_id <= 0:\n screen_status.battle_screen_my_hand_page_id = 1\n # Edge cases when len() = 6,12,18....\n if len(user.hand_list) % 7 == 0 and len(user.hand_list) != 0:\n if screen_status.battle_screen_my_hand_page_id >= (len(user.hand_list))//7 + 1:\n screen_status.battle_screen_my_hand_page_id = (len(user.hand_list))//7 + 0\n\n else:\n if screen_status.battle_screen_my_hand_page_id >= (len(user.hand_list))//7 + 2:\n screen_status.battle_screen_my_hand_page_id = (len(user.hand_list))//7 + 1\n # Algorithm to draw all cards in local_store_list, 6 card per page.\n for card in user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1):7 * screen_status.battle_screen_my_hand_page_id]:\n if row_number <= 7:\n card.rect.x = rect_position_x\n card.rect.y = rect_position_y\n screen.blit(card.image, card.rect)\n rect_position_x += 145\n row_number += 1\n if row_number >= 8:\n row_number = 1", "def dealCards(deck, player, numCards):\n print \"dealing %s cards to %s...\" % (numCards, player.name)\n for card in range(numCards):\n card = deck[0]\n deck.pop(0)\n player.cards.append(card)\n print \"added %s card for %s\" % (card, player.name)\n print player.cards", "def setup_cards(self, server):\r\n\t\tversions_list = self.ice.getVersionsList()\r\n\t\talarm_list = self.ice.getAlarmStatus()\r\n\t\tstatus_list = self.ice.getStatus()\r\n\t\twarning_list = self.ice.getWarnings()\r\n\r\n\t\tdateTimeObj = datetime.now()\r\n\t\ttimestampStr = dateTimeObj.strftime(\"%d-%b-%Y (%H:%M:%S)\")\r\n\t\tcards = self.ice.getCardsAlive()\r\n\t\tfor i in range(len(versions_list)):\r\n\t\t\tjson_body = versions_list[i]\r\n\t\t\tjson_body.update({'alarm':alarm_list[i],'status':status_list[i], 'card':cards[i], 'warning':warning_list[i],'update':timestampStr, 'hostname':self.ip})\r\n\t\t\tserver.index(index='icepap_info', id=self.ip + '_' + str(cards[i]), body=json_body)", "def getAllCards(self):\n return self._cards", "def build_deck_screen_card_gallery_card_display(screen, buttons, screen_status, button_status, card_database_filter):\n rect_position_x = 100 #local variables for rect position for the first card in the card gallery\n rect_position_y = 130\n row_number = 1 # local variable to help keep track of position of card\n # Check the page number to make sure if will not go negative or randomly large\n if screen_status.build_deck_screen_card_gallery_page_id <= 0:\n screen_status.build_deck_screen_card_gallery_page_id = 1\n # Edge cases when len() = 14, 28, 42...\n if len(cdf.request_card_list(card_database_filter)) % 14 == 0 and len(cdf.request_card_list(card_database_filter)) != 0:\n if screen_status.build_deck_screen_card_gallery_page_id >= (len(cdf.request_card_list(card_database_filter)))//14 + 1:\n screen_status.build_deck_screen_card_gallery_page_id = (len(cdf.request_card_list(card_database_filter)))//14 + 0\n\n else:\n if screen_status.build_deck_screen_card_gallery_page_id >= (len(cdf.request_card_list(card_database_filter)))//14 + 2:\n screen_status.build_deck_screen_card_gallery_page_id = (len(cdf.request_card_list(card_database_filter)))//14 + 1\n # Algorithm to draw all cards in request_card_list, 14 card per page.\n for card in cdf.request_card_list(card_database_filter)[14*(screen_status.build_deck_screen_card_gallery_page_id - 1):14 * screen_status.build_deck_screen_card_gallery_page_id]:\n if row_number <= 7:\n card.rect.x = rect_position_x\n card.rect.y = rect_position_y\n screen.blit(card.image, card.rect)\n rect_position_x += 145\n row_number += 1\n elif row_number <= 14:\n card.rect.x = rect_position_x - 1015\n card.rect.y = rect_position_y + 200\n screen.blit(card.image, card.rect)\n rect_position_x += 145\n row_number += 1\n if row_number >= 15:\n row_number = 1", "def setup_newgame(self):\n global chips\n self.bet = 100\n if chips < self.bet: \n self.game_over = True\n chips -= self.bet\n \n\n self.cards_list = arcade.SpriteList()\n\n #resets on newgame\n self.top_card_int = 0 ## this had to be moved here to make it so that you are not drawing over the 52 card limit\n self.player_hand = []\n self.dealer_hand = []\n self.player_value = 0\n self.dealer_value = 0\n self.player_ace_count = 0\n self.dealer_ace_count = 0\n self.player_almost_bust = 0\n self.dealer_almost_bust = 0\n self.blackjack = False\n self.victory = False\n self.defeat = False\n \n #creates deck\n for card_suit in CARD_SUITS:\n for card_value in CARD_VALUES:\n card = Card(card_suit, card_value, CARD_SCALE)\n self.cards_list.append(card)\n #shuffles deck\n for pos1 in range(len(self.cards_list)):\n pos2 = random.randrange(len(self.cards_list))\n self.cards_list.swap(pos1, pos2)\n \n #Current way to add cards to player and dealer hands since using .pop() on self.cards_list deletes the card itself even in the other hands\n \n #self.dealer_hand.append(self.top_card_int)\n self.hit(\"dealer\")\n self.dealer_hand[0].face_down()\n #first_card = self.dealer_hand[0]\n #first_card.face_down()\n #self.dealer_hand[0].face_down()\n self.hit(\"player\")\n self.player_hand[0].face_down()\n self.hit(\"dealer\")\n self.dealer_hand[1].face_down()\n self.hit(\"player\")\n self.player_hand[1].face_down()\n self.update_card_positions()" ]
[ "0.6779038", "0.64504594", "0.6393454", "0.6248788", "0.6065166", "0.60555077", "0.60298246", "0.6014514", "0.6014514", "0.5978343", "0.5904059", "0.5888115", "0.5883593", "0.5855397", "0.58428776", "0.584109", "0.5830821", "0.582095", "0.5810005", "0.5807255", "0.5788884", "0.57709986", "0.574667", "0.5738221", "0.5737967", "0.57058537", "0.5680407", "0.56735003", "0.5642896", "0.56291485", "0.562021", "0.5614011", "0.5595327", "0.5593024", "0.556809", "0.55641556", "0.55591893", "0.5558305", "0.5548686", "0.55382204", "0.5533083", "0.55326396", "0.5528881", "0.5528702", "0.55202246", "0.5513971", "0.55041254", "0.5503086", "0.55018395", "0.5488135", "0.54830146", "0.54814434", "0.54784435", "0.5475379", "0.5463058", "0.544991", "0.54472756", "0.544597", "0.54333675", "0.5411004", "0.54085934", "0.5400689", "0.54006267", "0.53977484", "0.5396665", "0.539553", "0.5387312", "0.53800654", "0.5378856", "0.537282", "0.53693086", "0.53602904", "0.53573686", "0.5354459", "0.5354429", "0.5347059", "0.53427076", "0.5342567", "0.53405184", "0.53245234", "0.53223515", "0.5321809", "0.5314615", "0.5301498", "0.52926624", "0.52908504", "0.5289689", "0.5285544", "0.528017", "0.52695525", "0.526711", "0.5264777", "0.52642614", "0.52633953", "0.52546424", "0.52470315", "0.5243854", "0.52418876", "0.52303237", "0.52292776" ]
0.5896568
11
Flattened array of ints, specifying the index of this object. This has to account for shaped parameters!
def _raveled_index(self): return np.r_[:self.size]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _idxs_postformat_array(self):\n self.idxs = np.array(self.idxs)", "def flatten_idx(idx, axis=-1):\n idx = numpy.asanyarray(idx)\n if not idx.dtype.kind in ('i', 'u'):\n idx = idx.astype(int)\n preshape = idx.shape[:axis]\n postshape = idx.shape[axis:]\n stride = int(numpy.product(postshape[1:])) #1 if applied to empty\n #The index on this axis moves stride elements in flat\n outidx = idx.flatten() * stride #makes a copy\n #First add the offsets to get us to [..., idx @ axis = 0, 0...)\n outidx += numpy.repeat(\n numpy.arange(0, len(outidx), int(numpy.product(postshape)),\n dtype=idx.dtype),\n numpy.product(postshape))\n #Now offsets for non-zero on the trailing axes [0, 0, ... 0@axis, ...]\n outidx += numpy.tile(numpy.arange(0, stride, dtype=idx.dtype),\n int(numpy.product(preshape)) * idx.shape[axis])\n return outidx", "def flatten(self):\n xv, yv = np.meshgrid(self.columns, self.index, indexing='xy')\n return np.array([xv.ravel(), yv.ravel(), self.values.ravel()])", "def _to_flat_index(self, idx_in):\n idx_in = tuple([np.array(z, ndmin=1, copy=False) for z in idx_in])\n msk = np.all(np.stack([t < n for t, n in zip(idx_in, self.shape)]), axis=0)\n idx = np.ravel_multi_index(\n tuple([t[msk] for t in idx_in]), self.shape, mode=\"wrap\"\n )\n\n return idx, msk", "def getIntArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def indices(self):\n return tuple([slice(*r) for r in self.location])", "def flatten(self):\n\n if self.ndim == 1:\n return self.copy()\n\n return ArrayCoordinates1d(self.coordinates.flatten(), **self.properties)", "def getShortArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def master_ndindex(self): # itermaster_indices(self):\n return itertools_product(\n *[range(*r) for r in self.location]\n ) # TODO check", "def atom_idxs(self):\n\n return np.array([atom.atom_idxs for atom in self])", "def matrix_to_flat(self, idx_rows):\n idx = []\n for i in range(self.nts):\n idx.append(self._matrix_to_flat_by_ts(idx_rows, i))\n return idx", "def flatten(x):\n return x.view(x.size(0), -1)", "def flatten(self):\n return [e for es in self.array for e in es]", "def batch_flatten(this,x):\n shape = x.get_shape().as_list()[1:]\n if None not in shape:\n return tf.reshape(x, [-1, int(np.prod(shape))])\n return tf.reshape(x, tf.stack([tf.shape(x)[0], -1]))", "def batch_flatten(this,x):\n shape = x.get_shape().as_list()[1:]\n if None not in shape:\n return tf.reshape(x, [-1, int(np.prod(shape))])\n return tf.reshape(x, tf.stack([tf.shape(x)[0], -1]))", "def FlattenModelData(y, i):\n outs = np.array([y[j][i][0] for j in range(len(y))])\n return outs", "def to_flat_index(self) -> Index: # type: ignore[override]\n return Index(self._values, tupleize_cols=False)", "def _flatten(self):\n n = self.B\n idx = self.nodect - 1\n self.seq = []\n while n is not None:\n n['idx'] = idx\n self.seq.insert(0, n)\n idx -= 1\n n = n['pred']", "def flatten(self, arr):\n shape = arr.shape\n return arr.reshape(shape[0] * shape[1], *shape[2:])", "def GetPointToIncidentEdgesArray(self, p_int):\n ...", "def reconstruct_input(self, ix):", "def flatten(self) -> np.ndarray:\n\n return self.data.copy()", "def flatten(a, start=0, count=2):\n s = a.shape\n return np.reshape(a, s[:start] + (-1,) + s[start+count:])", "def flatten_npar(np_array):\n \n itr = len(np_array)\n start = np_array[0]\n \n for i in range(1,itr):\n start = np.hstack((start,np_array[i]))\n \n return(np.array(start))", "def array_form(self):\n return tuple(self)", "def flatten(self):\n pass", "def do_flatten(obj):\n if type(obj) == list:\n return np.array(obj).flatten()\n return obj.flatten()", "def flatten(x):\n return reshape(x, (x.shape[0], -1))", "def flatten_stimulus(stimulus):\n n, h, w = stimulus.shape\n return stimulus.reshape((n, h * w))", "def get(self, idx_in):\n shape_out = idx_in[0].shape\n idx_flat_in, msk_in = self._to_flat_index(idx_in)\n idx, msk = find_in_array(idx_flat_in, self.idx)\n val_out = np.full(shape_out, self._fill_value)\n val_out.flat[np.flatnonzero(msk_in)[msk]] = self._data[idx[msk]]\n return np.squeeze(val_out)", "def unstacked_index(size, index):\n return index % size, index // size", "def ravel_indices(shape, *args):\n new_positions = []\n for arg in args:\n new_positions.append(np.ravel_multi_index(arg, shape))\n return new_positions", "def row(self):\n return self.reshape((1, self.size))", "def GetPointToIncidentFacesArray(self, p_int):\n ...", "def flatten():", "def GetPointToOneRingPointsArray(self, p_int):\n ...", "def batch_indices(self):\n b = self.batch_size\n return [np.arange(i*b, i*b+b) for i in range(self.num_batches)]", "def _flatten(self, inputT, size):\n return tf.reshape(inputT, (-1, size))", "def _indarray(np_array):\n return skil_client.INDArray(\n ordering='c',\n shape=list(np_array.shape),\n data=np_array.reshape(-1).tolist()\n )", "def flatten_array(X_input):\r\n X_input_flat = np.array([x.flatten() for x in X_input])\r\n return X_input_flat", "def index_object(idxs=None):", "def serialize_flatten(name, value, array_indices=True):\r\n # call the recursive function that returns a tuple of tuples\r\n return tuple(serialize_flatten_rec(name, value, array_indices))", "def row(self, index: int) -> List[int]:\n return self.matrix[index - 1]", "def _multi_index(indexes, shape):\n indexes = indexes if isinstance(indexes, typing.Sequence) else (indexes,)\n if any(isinstance(i, type(Ellipsis)) for i in indexes):\n raise IndexError('Ellipsis index currently is not supported.')\n # Fill the right-most elements.\n indexes = indexes + (slice(0, None, None),) * (len(shape) - len(indexes))\n # Convert to positive index.\n positive_indexes = []\n for i, index in enumerate(indexes):\n if isinstance(index, slice):\n index = slice(\n index.start or 0, index.stop or shape[i], index.step or 1\n )\n positive_indexes.append(\n slice(\n index.start + shape[i] if index.start < 0 else index.start,\n index.stop + shape[i] if index.stop < 0 else index.stop,\n # Negative step means index backward, no need to convert to\n # positive interger.\n index.step,\n )\n )\n elif isinstance(index, int):\n positive_indexes.append(index + shape[i] if index < 0 else index)\n else:\n raise TypeError(f'Not supported index type {index}.')\n return tuple(positive_indexes)", "def indices(self):\n slice_list = []\n for axis in range(self.ndim):\n if axis in self.displayed:\n slice_list.append(slice(None))\n else:\n if self.clip:\n p = np.clip(\n self.point[axis],\n np.round(self.range[axis][0]),\n np.round(self.range[axis][1]) - 1,\n )\n else:\n p = self.point[axis]\n p = np.round(p / self.range[axis][2]).astype(int)\n slice_list.append(p)\n return tuple(slice_list)", "def GetEdgeArray(self, p_int):\n ...", "def wrap(self, flatten_x):\n batch_size = flatten_x.size(0)\n x = torch.reshape(flatten_x, (batch_size, self.num_frames, self.num_ticks_per_frame, -1))\n return x", "def one_dim_index(self, i, j):\n return int(i + j * self.nx)", "def flatten(self):\n return DataArray([s for s in self.unstructured()])", "def unflatten(self, flat, unused_shaped_like):\n return next(flat)", "def getLongArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def get(self):\n return _n.reshape(self.next_x, self.original_shape)", "def indXtoJ(indX):\n return np.unravel_index(indX % xx.size, xx.shape)", "def _shaped_arange(*shape):\n return np.random.randn(np.prod(shape)).astype(np.float32).reshape(\n *shape\n ) * np.prod(shape)", "def _flatten_parameters(self):\n [m.flatten_parameters() for m in self._to_flatten]", "def flat(self):\n return Op('flat', self)", "def getBinIndices(self, linear_index):\n return linear_index / self.magic_array % self.nbins_across_dims", "def flatten(X):\n N = X.shape[-1]\n flat = np.zeros((N, 3072))\n for idx, i in enumerate(range(N)):\n # if not idx:\n # print(X[:,:,:,i].reshape(3072))\n flat[i] = X[:,:,:,i].reshape(3072)\n return flat", "def _get_args(idx, *args):\n new_args = []\n for arg in list(args[0]):\n if isinstance(arg, Iterable):\n new_args.append(arg[idx])\n else:\n new_args.append(arg)\n\n return new_args", "def BatchCreator(self, j, n_batch):\n j_start = (j-1)*n_batch + 1\n j_end = j*n_batch + 1\n ind = np.arange(start= j_start, stop=j_end, step=1)\n return ind", "def batched_index_select(input, dim, index):\n views = [input.shape[0]] + [1 if i != dim else -1 for i in range(1, len(input.shape))]\n expanse = list(input.shape)\n expanse[0] = -1\n expanse[dim] = -1\n index = index.view(views).expand(expanse)\n return torch.gather(input, dim, index)", "def __getitem__(self, index):\n if index == Ellipsis:\n index = tuple(self.dim*[slice(None)])\n\n if len(index) < self.dim:\n # --- Add extra dims to index if needed\n index = list(index)\n for i in range(len(index), self.dim):\n index.append(slice(None))\n index = tuple(index)\n\n if self.dim == 2:\n return self._getitem2d(index)\n elif self.dim == 3:\n return self._getitem3d(index)", "def batch_flatten(x):\n shape = x.get_shape().as_list()[1:]\n if None not in shape:\n return tf.reshape(x, [-1, int(np.prod(shape))])\n return tf.reshape(x, tf.stack([tf.shape(x)[0], -1]))", "def batch_flatten(x):\n shape = x.get_shape().as_list()[1:]\n if None not in shape:\n return tf.reshape(x, [-1, int(np.prod(shape))])\n return tf.reshape(x, tf.stack([tf.shape(x)[0], -1]))", "def transform(self, x):\n res = [x[i] for i in range(len(x))\n if i not in self.index_value_pairs]\n return res if isinstance(x, list) else np.asarray(res)", "def _extend_index_dim(input_index, new_index, new_index_max):\n # Construct an iterator from new_index\n if isinstance(new_index, (int, np.integer)):\n it = [new_index]\n else:\n if isinstance(new_index, slice):\n # slices don't work very well with multi-dimensional circular mappings.\n it = _conv_slice_to_list(slice_obj=new_index, stop_def=new_index_max)\n else:\n it = new_index\n # Index extension\n if input_index is None:\n output = []\n for i in it:\n output.append(tuple([i]))\n return output\n else:\n output = []\n for _i in input_index:\n output_row = []\n for i in it:\n output_row.append(tuple(list(_i) + [i]))\n output.append(output_row)\n return output", "def _flatten(params):\n params, _ = tree_flatten(params)\n return jnp.concatenate([jnp.reshape(param, [-1]) for param in params])", "def flatten(self):\n return tuple(map(lambda i: 1 if i > 0 else 0, self.freq()))", "def make_indices(dimensions):\n\n level = len(dimensions)\n\n if level == 1:\n return range(dimensions[0])\n\n indices = [[]]\n\n while level:\n\n _indices = []\n\n for j in range(dimensions[level - 1]):\n\n _indices += [[j] + i for i in indices]\n\n indices = _indices\n\n level -= 1\n\n try:\n return [tuple(i) for i in indices]\n except TypeError:\n return indices", "def GetFaceToAdjacentFacesArray(self, p_int):\n ...", "def sub2ind(self, ix, iy):\n idx = np.ravel_multi_index((ix, iy), self.shape)\n return idx", "def flatten_numpy(ndarray):\n return np.reshape(ndarray, (-1,), 'F')", "def loc_data_idx(loc_idx):\n retval = []\n for i in as_tuple(loc_idx):\n if isinstance(i, slice) and i.step is not None and i.step == -1:\n if i.stop is None:\n retval.append(slice(0, i.start+1, -i.step))\n else:\n retval.append(slice(i.stop+1, i.start+1, -i.step))\n elif isinstance(i, slice) and i.step is not None and i.step < -1:\n if i.stop is None:\n lmin = i.start\n while lmin >= 0:\n lmin += i.step\n retval.append(slice(lmin-i.step, i.start+1, -i.step))\n else:\n retval.append(slice(i.stop+1, i.start+1, -i.step))\n elif is_integer(i):\n retval.append(slice(i, i+1, 1))\n else:\n retval.append(i)\n return as_tuple(retval)", "def __getitem__(self, index: list) -> (np.array, np.array):\n # Generate indexes of the batch\n indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]\n\n # Find list of IDs\n list_IDs_temp = [self.list_IDs[k] for k in indexes]\n\n # Generate data\n X, M = self.__data_generation(list_IDs_temp)\n\n return X, M", "def get_x(self):\n return self.x[:self.nump, :]", "def a(a,N): \n a=np.ravel(a, order='F') # Same order\n return a", "def a(a,N): \n a=np.ravel(a, order='F') # Same order\n return a", "def flatten_array(self):\n numel = self.xyz_array[:, :, 0].size # Number of elements in dataset\n self.flat_array = np.zeros([self._len_z, numel]) # Create array to hold flattened array\n\n # Loop through each dimension (dataset) and flatten it into new array\n for dim in range(self._len_z):\n self.flat_array[dim, :] = np.ravel(self.xyz_array[:, :, dim])", "def flatten_parameters(self):", "def get_scatter_indices_for_bboxes(flatted_indices, batch_size, total_bboxes):\n indices_size = len(flatted_indices)\n scatter_indices = tf.concat(flatted_indices, 1)\n return tf.reshape(scatter_indices, (batch_size, total_bboxes, indices_size))", "def encode_to_flat_array_index(row, column, matrix):\n return row * matrix.cols + column", "def mainIndices(self):\n return self.i1, self.i2", "def flatten(self, x):\n return np.concatenate([c.flatten(xi) for c, xi in zip(self.spaces, x)])", "def sliceshape(slicetuple, totshape):\n res = []\n for i,s in enumerate(slicetuple):\n if isinstance(s,int):\n #n = 1\n pass\n else:\n i0,i1,istep = s.indices(totshape[i])\n n = (i1-i0)//istep\n res.append(n)\n return res", "def gather(x, idx, method=2):\n\n if method == 0:\n return x[idx]\n elif method == 1:\n x = x.unsqueeze(1)\n x = x.expand((-1, idx.shape[-1], -1))\n idx = idx.unsqueeze(2)\n idx = idx.expand((-1, -1, x.shape[-1]))\n return x.gather(0, idx)\n elif method == 2:\n for i, ni in enumerate(idx.size()[1:]):\n x = x.unsqueeze(i+1)\n new_s = list(x.size())\n new_s[i+1] = ni\n x = x.expand(new_s)\n n = len(idx.size())\n for i, di in enumerate(x.size()[n:]):\n idx = idx.unsqueeze(i+n)\n new_s = list(idx.size())\n new_s[i+n] = di\n idx = idx.expand(new_s)\n return x.gather(0, idx)\n else:\n raise ValueError('Unkown method')", "def _asarray1d(arr, copy=False):\n if copy:\n return asarray(arr).flatten()\n else:\n return asarray(arr).ravel()", "def _normalize_index(self, index: int):\n if index < 0:\n return len(self) + index\n else:\n return index", "def _simplify_index(indices, shape):\n # First clean up and check indices, unpacking ellipsis and boolean arrays\n indices = da.slicing.normalize_index(indices, shape)\n out = []\n axis = 0\n for index in indices:\n if index is not np.newaxis:\n length = shape[axis]\n axis += 1\n # If there is 1-D fancy index on this axis, try to convert to slice\n if isinstance(index, np.ndarray) and index.ndim == 1:\n try:\n index = _range_to_slice(index)\n except ValueError:\n pass\n else:\n index = da.slicing.normalize_slice(index, length)\n out.append(index)\n return tuple(out)", "def advanced_indexing_op(input, index):\n batch_size = tf.shape(input)[0]\n max_length = int(input.get_shape()[1])\n dim_size = int(input.get_shape()[2])\n index = tf.range(0, batch_size) * max_length + (index - 1)\n flat = tf.reshape(input, [-1, dim_size])\n relevant = tf.gather(flat, index)\n return relevant", "def recursive_index_decode(int_array, max=32767, min=-32768):\n out_arr = []\n decoded_val = 0\n for item in int_array.tolist():\n if item==max or item==min:\n decoded_val += item\n else:\n decoded_val += item\n out_arr.append(decoded_val)\n decoded_val = 0\n return numpy.asarray(out_arr,dtype=numpy.int32)", "def ordered_indices(self):\r\n return np.arange(len(self), dtype=np.int64)", "def _create_flatten(cls, onnx_node, inputs, opset_version):\n factor = onnx_node.getattr('axis', 1)\n if factor < 0:\n # in order to support the negative axis\n factor = len(inputs[0].shape) + factor\n\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return None, forward(axis=factor)", "def flattened_indices_from_row_col_indices(row_indices, col_indices, num_cols):\n return (row_indices * num_cols) + col_indices", "def state_from_id(index, dims_state_grid):\n\n entries = [index] * len(dims_state_grid)\n for i in range(1, len(dims_state_grid)):\n value = 1\n for j in range(i, len(dims_state_grid)):\n value *= dims_state_grid[j]\n for k in range(i - 1, len(dims_state_grid)):\n if k == i - 1:\n entries[k] //= value\n else:\n entries[k] %= value\n\n out = np.array(object=entries)\n\n return out", "def index2d(src, idx):\n broadcast_to = P.BroadcastTo(idx.shape)\n offs = broadcast_to(P.range(Tensor(0, mindspore.int32),\n Tensor(idx.shape[0], mindspore.int32),\n Tensor(1, mindspore.int32))[:, None])\n idx = idx + (offs()) * idx.shape[1]\n\n return src.view(-1)[idx.view(-1)].view(idx.shpe)", "def duplicate_flatten(size: int) -> List[List[int]]:\n duplicate = []\n for _ in range(size):\n temp = [-1] * size\n duplicate.append(temp)\n return duplicate", "def unflatten(self, x):\n dims = [c.flat_dim for c in self.spaces]\n flat_x = np.split(x, np.cumsum(dims)[:-1])\n return tuple(c.unflatten(xi) for c, xi in zip(self.spaces, flat_x))", "def _asarray(self, vec):\n shape = self.domain[0][0].shape + self.pshape\n arr = np.empty(shape, dtype=self.domain.dtype)\n for i, xi in enumerate(vec):\n for j, xij in enumerate(xi):\n arr[..., i, j] = xij.asarray()\n\n return arr", "def flattenImage(input_array):\r\n shp = np.size(input_array)\r\n return np.reshape(input_array, (shp,))" ]
[ "0.65175533", "0.62058824", "0.5974461", "0.59025675", "0.58936995", "0.58421", "0.58367753", "0.57612103", "0.5670626", "0.5655063", "0.5648909", "0.56180966", "0.5589195", "0.55857176", "0.55857176", "0.5580362", "0.55702096", "0.5560747", "0.544082", "0.5435741", "0.54188216", "0.5409668", "0.53965384", "0.5367334", "0.53597945", "0.5351686", "0.5341007", "0.52925646", "0.5280118", "0.52661866", "0.52611285", "0.5256359", "0.52527046", "0.52487624", "0.5247055", "0.524415", "0.5242159", "0.5231501", "0.5228461", "0.5195386", "0.5180177", "0.5172795", "0.51692456", "0.5161007", "0.5160354", "0.5155424", "0.515318", "0.5135404", "0.5124941", "0.5120456", "0.5118928", "0.51058185", "0.51023453", "0.5096304", "0.50954634", "0.5094139", "0.50928885", "0.50887007", "0.50881743", "0.5080389", "0.5077059", "0.5072143", "0.50647575", "0.50647575", "0.50589573", "0.5048208", "0.5047156", "0.5045411", "0.504235", "0.5034239", "0.50310206", "0.5030486", "0.5030197", "0.50243735", "0.5024333", "0.50241196", "0.50241196", "0.50136197", "0.5009852", "0.4994477", "0.4993231", "0.49893743", "0.49876082", "0.49783012", "0.49714625", "0.49661586", "0.49639437", "0.4962034", "0.49616212", "0.4958802", "0.4941388", "0.49397555", "0.49366266", "0.49348867", "0.49333957", "0.49313772", "0.49312207", "0.49266088", "0.49158248" ]
0.53458685
27
Validate the response that came back from the API, return True if it's good, False if bad
def _validate_response(self, response): # Check for unexpected response - all should be JSON dicts that have # already been deserialised if not isinstance(response, types.DictionaryType): self.message( "\t\t[!] ERROR - Unexpected value returned from the API: '%s'" % (response)) return False # Check for valid errors if "error" in response and "msg" in response: self.message( "\t\t[!] ERROR - %s (%s)" % (response["msg"], response["timestamp"])) return False # Is this a valid response message if "msg" in response: return True # Catch all...dictionary returned but does not contain expected keys? # Who know's what's going on here?! else: self.message( "\t\t[!] ERROR - Unexpected dictionary response returned from the API: '%s'" % (response)) return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate(self, response):\n return response[\"status_code\"] == 1", "def is_valid_response(self, response):\r\n if response.status_code in VALID_CODES:\r\n return True\r\n return False", "def validate_response(self, response):\n pass", "def validate_response(response: json):\n if \"error\" in response:\n print(\"ERROR: Request returned error\")\n print_request_response(response)\n exit(1)", "def response_check(response):\n print(response)\n print(response.text)\n return response.status_code == 201", "def check_response_errors(self, resp):\n return True", "def validate_response(response):\n assert response.ok\n rpcdict = response.json()\n assert rpcdict['jsonrpc'] == '2.0'\n assert rpcdict['id']\n assert 'error' in rpcdict.keys() or 'result' in rpcdict.keys()", "def user_should_get_an_ok_response():\n assert web_app.validate_reponse()", "def check_response_invalid_fields(response: HTTPResponse) -> bool:\n return response.status_code == 422", "def is_okay(self) -> bool:\n if getattr(self.api_response, \"status_code\", 200) != 200:\n return False\n\n return self.result_code in [0, 810, 820]", "def response_check(response):\n print(response)\n print(response.text)\n return response.status_code == 200", "def check_http_response(self, response, url):\n try:\n response.raise_for_status()\n success = True\n except (requests.HTTPError, ValueError):\n success = False\n excpt_msg = \"Invalid API response.\"\n try:\n excpt_msg = response.headers[\"cause-message\"]\n except:\n try:\n excpt_msg = response.json()[\"error\"][\"message\"][\"value\"]\n except:\n excpt_msg = \"Unknown error ('{0}'), check url in a web browser: '{1}'\".format(response.reason, url)\n api_error = EODataDownResponseException(excpt_msg, response)\n api_error.__cause__ = None\n raise api_error\n return success", "def checkResponseOK(response):\n assert response['result'] == 'OK'", "def check_no_data_response(response: HTTPResponse) -> bool:\n return response.status_code == 422", "def check_response_no_data(response: HTTPResponse) -> bool:\n return response.status_code == 422", "def check_response(response):\n status = response.get('status')\n ret = status and status == 'OK'\n if not ret:\n logging.error('Received unexpected failure response from polyswarmd: %s', response)\n return ret", "def check_response_valid_update(response: HTTPResponse) -> bool:\n return response.status_code == 200", "def verif_response(response):\n if response.status_code >= 200 and response.status_code <= 299:\n logging.debug(\"response server OK::{}\".format(response.text))\n return True\n\n logging.error(\"response server KO::{}\".format(response.text))\n return False", "def validate_response(self, response: requests.Response) -> None:\n if 400 <= response.status_code < 500:\n msg = (\n f\"{response.status_code} Client Error: \"\n f\"{response.reason} for path: {self.path}. \"\n f\"Request payload: {response.request.body}\"\n )\n raise FatalAPIError(msg)\n\n elif 500 <= response.status_code < 600:\n msg = (\n f\"{response.status_code} Server Error: \"\n f\"{response.reason} for path: {self.path}\"\n )\n raise RetriableAPIError(msg)", "def is_success(self):\r\n if self.status_code < 400:\r\n return True\r\n return False", "def is_success(self):\r\n if self.status_code < 400:\r\n return True\r\n return False", "def _check_response(self, response):\n if response.status_code == requests.codes.ok:\n # Since the ZenHub REST API does not send back 204 when there is\n # no content, we have to check the Content-Length for 0 :(\n if int(response.headers['Content-Length']):\n return response.json()\n elif response.status_code == requests.codes.not_found:\n return None\n else:\n return response.raise_for_status()", "def is_response_valid(response):\n valid_keys = ('action', 'time', 'data', 'code', 'address')\n if all(key in response for key in valid_keys):\n return True\n raise ValueError", "def the_response_should_be_result(result):\n assert web_app.check_response(result)", "def _is_successful(response) -> bool:\n return response.status_code == 200", "def is_response_ok(cls, data):\n if data is None:\n cls.notify_widget.show_message(\"一个不好的消息\", \"网络出现一点问题\")\n return False\n\n if not isinstance(data, dict):\n return True\n\n if data['code'] == 200:\n return True\n\n cls.notify_widget.show_message(\"一个不好的消息\", \"网络出现一点问题\")\n return False", "def check_response(self, resp):\n\n # For successful API call, response code will be 200 (OK)\n if resp.ok:\n json = resp.json()\n self.response = ResponseHolder()\n self.response.response = json\n\n # Check the code\n if 'status' not in json:\n raise InvalidResponse('No status field')\n\n self.response.status = self.field_to_long(json['status'])\n if self.response.status != EBConsts.STATUS_OK:\n txt_status = self.get_text_status(json)\n raise InvalidStatus('Status is %s (%04X)'\n % (txt_status if txt_status is not None else \"\", self.response.status))\n\n if self.response_checker is not None:\n self.response_checker(self.response)\n\n return self.response\n\n else:\n # If response code is not ok (200), print the resulting http error code with description\n resp.raise_for_status()\n pass", "def is_raw_response(self, response: object) -> bool:", "def fusion_api_validate_response(self, respDict, valDict):\n success = True\n returnDict = {}\n keys = []\n for key in valDict:\n if not valDict[key]:\n continue\n # logger._log_to_console_and_log_file('key: %s' % (key))\n keyDict = {'key': key, 'expected': valDict[\n key], 'actual': respDict[key], 'success': True}\n if key in respDict:\n pattern = re.compile(str(valDict[key]))\n # if not re.search(str(valDict[key]), str(respDict[key])):\n # t = re.compile('(?i)Warning|Unknown|Terminated|Killed|Error|Completed')\n\n if not re.search(pattern, str(respDict[key])):\n\n success = False\n keyDict['success'] = False\n else:\n success = False\n keyDict['success'] = False\n keys.append(keyDict)\n\n returnDict['success'] = success\n returnDict['keys'] = keys\n return returnDict", "def check(self):\n invalid = []\n\n if not self.route:\n invalid.append(('route', 'missing'))\n elif not self.route[1] in ['GET', 'POST', 'PUT']:\n invalid.append(('route', 'invalid method: %s' % self.route[1]))\n\n has_2xx = False\n for rcode in self.return_codes:\n code = rcode[0]\n if code >= 200 and code < 300:\n has_2xx = True\n break\n if not has_2xx:\n invalid.append(('return_codes', 'Missing succes return code doc'))\n\n if self.client_auth is None:\n invalid.append(\n ('client_auth', 'Please provide client auth requirement'))\n\n if self.user_auth is None:\n invalid.append(\n ('user_auth', 'Please provide user auth requirement'))\n\n if invalid:\n msgs = []\n for error in invalid:\n msgs.append(\"%s: %s\" % error)\n raise ValueError(\n \"APIFunc for %s is invalid: %s\"\n % (self.viewfunc.__name__,\n ', '.join(msgs)))", "def validate_response(response):\n\n r = response\n try:\n r.raise_for_status()\n except HTTPError as e:\n message = dict(status_code=r.status_code, exception=e)\n\n try:\n response = r.json()\n message['response'] = response\n except JSONDecodeError as e:\n message['response'] = r.content\n\n raise HTTPError(message)", "def _check_response(self, res: requests.Response, token: str) -> None:\n return", "def validate_get_response(self, response):\n\n self.validate_response(response)\n if response.status_code not in self.model._meta['valid_get_status']:\n raise InvalidStatusError(\n self.model._meta['valid_get_status'], response\n )", "def check_status(response):\n if response.status_code == 200:\n return True", "def is_success_response(resp: Response) -> bool:\n return \\\n resp and \\\n is_dict(resp) and \\\n resp.get(\"success\", False) is True", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200 \n and content_type is not None)", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None)", "def has_error(self, response):\n return response.find(' Matched') == -1 and response.find(' Failed') == -1", "def error_invalid_response(self):\r\n return self.type() == 0x00", "def _is_error_call(self, response):\n status = response.get('ResponseMetadata', {}).get('HTTPStatusCode')\n return status != 200", "def is_error(response: str) -> bool:\n return \"ERROR\" in response", "def validation_check():\n is_valid = blockchain.is_chain_valid(blockchain.chain)\n if is_valid:\n response = {'message': 'Blockchain is valid',\n 'chain': blockchain.chain,\n 'length': len(blockchain.chain)}\n else:\n response = {'error': 'There are errors in the Blockchain',\n 'chain': blockchain.chain,\n 'length': len(blockchain.chain)}\n return jsonify(response), 200", "def is_good_response(res):\n content_type = res.headers['Content-Type'].lower()\n return (res.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)", "def _check_200(self, response):\n if response.code != 200:\n raise YubiKeyVerificationError(\n \"Received {0} response.\".format(response.code))\n return response", "def is_good_response(resp):\n #Headers is a method in requests library\n content_type = resp.headers['Content-Type'].lower()\n \n return (resp.status_code == 200 # Returns true if the website is online and available (statuscode=200)\n #Returns true if content_type exists\n and content_type is not None\n #Returns true if it is an html document or a json document.\n and (content_type.find('json') > -1 or content_type.find('html')))", "def verify(self, response):", "def is_good_response(resp) -> bool:\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)", "async def test_is_error_response() -> None:\n assert not is_error_response(\"True\")\n assert not is_error_response(True)\n assert not is_error_response(\"\")\n assert is_error_response(\n json.loads(\n '{\"errorCode\": \"INVALID_NUMBER_PARAMETER_VALUE\", \"minValue\": 0.0, \"maxValue\": 1.01}'\n )\n )\n assert not is_error_response(json.loads('{\"errorCode\": \"\"}'))", "def is_error(self):\r\n if self.status not in [STATUS_CODES['200'], ]:\r\n return True\r\n else:\r\n return False", "def _process_unsuccessful_response(\n self,\n response: Response,\n case: Literal['validate_api_key', 'balances', 'trades', 'asset_movements'],\n ) -> Union[\n list,\n tuple[bool, str],\n ExchangeQueryBalances,\n ]:\n try:\n response_list = jsonloads_list(response.text)\n except JSONDecodeError as e:\n msg = f'{self.name} {case} returned an invalid JSON response: {response.text}.'\n log.error(msg)\n\n if case in ('validate_api_key', 'balances'):\n return False, msg\n if case in ('trades', 'asset_movements'):\n self.msg_aggregator.add_error(\n f'Got remote error while querying {self.name} {case}: {msg}',\n )\n return []\n\n raise AssertionError(f'Unexpected {self.name} response_case: {case}') from e\n\n error_data = self._get_error_response_data(response_list)\n if error_data.error_code == API_ERR_AUTH_NONCE_CODE:\n message = API_ERR_AUTH_NONCE_MESSAGE\n # Errors related with the API key return a human readable message\n elif case == 'validate_api_key' and error_data.error_code == API_KEY_ERROR_CODE:\n message = API_KEY_ERROR_MESSAGE\n else:\n # Below any other error not related with the system clock or the API key\n reason = error_data.reason or response.text\n message = (\n f'{self.name} query responded with error status code: {response.status_code} '\n f'and text: {reason}.'\n )\n log.error(message)\n\n if case in ('validate_api_key', 'balances'):\n return False, message\n if case in ('trades', 'asset_movements'):\n self.msg_aggregator.add_error(\n f'Got remote error while querying {self.name} {case}: {message}',\n )\n return []\n\n raise AssertionError(f'Unexpected {self.name} response_case: {case}')", "def assert_valid_responses(response) -> None:\n assert valid_resp_name in response.text\n assert valid_resp_addr in response.text\n assert response.status_code == 200", "def valid_in_response(self):\n return self._repeatable[1] is not None", "def _check_response(response: requests.Response) -> None:\n logger.debug('Received response:\\n%s', response.content)\n try:\n response.raise_for_status()\n if not response.json()['status']:\n _report_failure('your e-mail address appears to be invalid')\n except requests.exceptions.HTTPError:\n _report_failure()\n except (ValueError, KeyError):\n _report_failure('there was a problem with the server response')", "def check_status():\n try:\n return HTTPClient().fetch(\"https://api.random.org/\").code == 200\n except Exception: # pylint: disable=broad-except\n return False", "def is_good_response(self, resp):\r\n\t\tcontent_type = resp.headers['Content-Type'].lower()\r\n\t\treturn (resp.status_code == 200 and content_type is not None and content_type.find('html') > -1)", "def check_response(rv):\n if rv != 'OK':\n print \"No message found\"\n return False\n return True", "def IsOk(self):\r\n \r\n return True", "def is_good_response(self, resp):\r\n content_type = resp.headers['Content-Type'].lower()\r\n return (resp.status_code == 200 and content_type is not None and content_type.find('html') > -1)", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200 and content_type is not None\n and content_type.find('html') > -1)", "def check(self, response, payload):\n # check response body\n if not response.text:\n return False\n\n # check for output\n # uid=user gid=group groups=groups\n regex = r\"(uid=\\d+[\\(\\)\\w\\-]+)(\\s+gid=\\d+[\\(\\)\\w\\-]+)(\\s+groups=\\d+[\\(\\)\\w\\-,]+)?\"\n if re.search(regex, response.text):\n return True\n else:\n return False", "def isGoodResponse(resp):\r\n content_type = resp.headers['Content-Type'].lower()\r\n return (resp.status_code == 200\r\n and content_type is not None\r\n and content_type.find('html') > -1)", "def test_generating(resp):\n errors = []\n if not check_int(resp[\"tightness\"]):\n errors.append(\"Invalid type for Itinerary response's 'tightness' field.\")\n\n if not isinstance(resp, bool):\n errors.append(\"Invalid type for Itinerary response's 'start_from_airport' field.\")", "def check_event_registration_response_valid(\n response: HTTPResponse, user_id: user_models.UserId) -> bool:\n try:\n assert response.status_code == 201\n assert response.json()\n event_id = response.json().get(\"event_id\")\n assert check_event_id_added_to_user(event_id, user_id)\n assert check_event_status_set_properly(event_id)\n return True\n except AssertionError as assert_error:\n debug_msg = f\"failed at: {assert_error}. resp json: {response.json()}\"\n logging.debug(debug_msg)\n return False", "def is_good_response(resp):\r\n content_type = resp.headers['Content-Type'].lower()\r\n return (resp.status_code == 200 \r\n and content_type is not None \r\n and content_type.find('html') > -1)", "def test_response_ok():\n\t\n\t# Send GET request to API given endpoint and store the response.\n\tresponse = get_items()\n\n\t# Confirm that the request-response cycle completed successfully.\n\t#assert_true(response.ok)\n\tif ('None' in response): print(\"Failed calling REST API: {}\".format(response))\n\telse: print(\"TC Passed, Response OK: {}\".format(response))", "def __CheckResponse(self, response):\n\n status = response.status\n if (status == httplib.OK or status == httplib.CREATED\n or status == httplib.NO_CONTENT):\n return\n elif (status == httplib.UNAUTHORIZED):\n raise BadCredentialsException\n elif (status == httplib.SERVICE_UNAVAILABLE):\n raise ServerBusyException\n elif (status == httplib.BAD_REQUEST\n or status == httplib.UNPROCESSABLE_ENTITY):\n raise BadArgumentsException\n elif (status == httplib.NOT_FOUND):\n raise NotFoundException\n else:\n raise BadOperationException", "def validate_token(self):\n r = requests.get(urljoin(self._url, Client._token_resource),\n params={\"tokenid\": self._token_id})\n\n if r.status_code == requests.status_codes.codes.unauthorized:\n raise ClientUnauthorized()\n elif r.status_code != requests.status_codes.codes.ok:\n error_messages = self._parse_invalid_request(r.text)\n raise ClientException(r.status_code, error_messages)\n\n try:\n type_, value = r.text.split(\"=\")\n value = value.strip(\" \\r\\n\")\n except Exception, e:\n raise ClientException(r.status_code,\n \"Some error has ocurred getting the result value from %s\"\n % r.text)\n\n return value == \"true\"", "def is_good_response(resp):\n\tcontent_type = resp.headers['Content-Type'].lower()\n\treturn (resp.status_code == 200 \n\t\tand content_type is not None \n\t\tand content_type.find('html') > -1)", "def validate_response(self, response):\n crypted = response[-0x100:]\n # check that not all values are the same\n if all(v == crypted[0] for v in crypted):\n return False\n # return if chunks of 0x10 repeat\n return (len([True for i in range(0x10, len(crypted), 0x10)\n if crypted[:0x10] == crypted[i:i+0x10]])) == 0xf", "def _validate_response(request, response, schema_data, schema_resolver):\n try:\n validate_outgoing_response(\n request,\n response,\n schema_data,\n schema_resolver\n )\n except jsonschema.exceptions.ValidationError as exc:\n # This will alter our stack trace slightly, but Pyramid knows how\n # to render it. And the real value is in the message anyway.\n raise HTTPInternalServerError(str(exc))", "def is_good_response(resp):\n\tcontent_type = resp.headers['Content-Type'].lower()\n\treturn (resp.status_code == 200 and content_type is not None and content_type.find('html') > -1)", "def check_no_header_response(response: HTTPResponse) -> bool:\n return response.status_code == 422", "def is_good_response(resp):\r\n content_type = resp.headers['Content-Type'].lower()\r\n return (resp.status_code == 200\r\n and content_type is not None\r\n and content_type.find('html') > -1)", "def check_string():\n\n # Forcing check for valid json and headers with Content-Type:application/json\n content = request.get_json(silent=False, force=True)\n\n payload = content.get('data', None)\n \n if not payload:\n return response_handler(\n {\"error\": \"'data' key missing from JSON payload.\"},\n 400\n )\n if not isinstance(payload, basestring):\n return response_handler(\n {\"error\": \"Value of 'data' key is not of type 'string'.\"},\n 400\n )\n \n pangram = analyze_string(payload)\n if not pangram:\n return response_handler(\n {\"error\": False},\n 400\n )\n\n return response_handler(\n {\"success\": True},\n 200\n )", "def assertValidJSONResponse(self, resp):\r\n self.assertHttpOK(resp)\r\n self.assertTrue(resp['Content-Type'].startswith('application/json'))\r\n self.assertValidJSON(resp.content)", "def verify_object(self, data):\n rv = self.get(data[self.id_field])\n result = not is_404(rv)\n if result:\n for key, value in data:\n if not in_response(rv, value):\n return False\n return result", "def is_ok(r) -> bool:\n\tif r.status_code == 200:\n\t\treturn True", "def mora_assert(response):\n assert response.status_code in (200, 201, 400, 404), response.status_code\n if response.status_code == 400:\n # Check actual response\n assert (\n response.text.find(\"not give raise to a new registration\") > 0\n ), response.text\n logger.debug(\"Request had no effect\")\n return None", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200 \n and content_type is not None \n and content_type.find('html') > -1)", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200 \n and content_type is not None \n and content_type.find('html') > -1)", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200 \n and content_type is not None \n and content_type.find('html') > -1)", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200 \n and content_type is not None \n and content_type.find('html') > -1)", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)", "def _is_valid_result(result):\n return result.get(\"version\", \"\") != \"\"", "def _assert_bad_request(self, response, field, zendesk_mock_class, datadog_mock):\r\n self.assertEqual(response.status_code, 400)\r\n resp_json = json.loads(response.content)\r\n self.assertTrue(\"field\" in resp_json)\r\n self.assertEqual(resp_json[\"field\"], field)\r\n self.assertTrue(\"error\" in resp_json)\r\n # There should be absolutely no interaction with Zendesk\r\n self.assertFalse(zendesk_mock_class.return_value.mock_calls)\r\n self.assertFalse(datadog_mock.mock_calls)", "def is_good_response(resp):\n content_type = resp.headers[\"Content-Type\"].lower()\n return (\n resp.status_code == 200\n and content_type is not None\n and content_type.find(\"html\") > -1\n )", "def CheckSuccess(self, response):\n if 'error' in response:\n raise ComputeEngineApiError('An operation completed with errors:\\n%s'\n % response)", "async def get_invalid(self, **kwargs: Any) -> bool:\n error_map = {\n 401: ClientAuthenticationError,\n 404: ResourceNotFoundError,\n 409: ResourceExistsError,\n 304: ResourceNotModifiedError,\n }\n error_map.update(kwargs.pop(\"error_map\", {}) or {})\n\n _headers = kwargs.pop(\"headers\", {}) or {}\n _params = kwargs.pop(\"params\", {}) or {}\n\n cls: ClsType[bool] = kwargs.pop(\"cls\", None)\n\n request = build_bool_get_invalid_request(\n headers=_headers,\n params=_params,\n )\n request.url = self._client.format_url(request.url)\n\n _stream = False\n pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access\n request, stream=_stream, **kwargs\n )\n\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n if _stream:\n await response.read() # Load the body in memory and close the socket\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response)\n\n if response.content:\n deserialized = response.json()\n else:\n deserialized = None\n\n if cls:\n return cls(pipeline_response, cast(bool, deserialized), {})\n\n return cast(bool, deserialized)", "def assert_response_correct(self, response, expected_status, expected_content):\n assert response.status_code == expected_status\n parsed_content = json.loads(response.content.decode('utf-8'))\n assert parsed_content == expected_content", "def assert_json_failure_response_is_missing_social_auth(self, response):\r\n self.assertEqual(200, response.status_code) # Yes, it's a 200 even though it's a failure.\r\n payload = json.loads(response.content)\r\n self.assertFalse(payload.get('success'))\r\n self.assertIn('associated with your %s account' % self.PROVIDER_CLASS.NAME, payload.get('value'))", "def successful(self) -> bool:\n return self._unparsed_response is not None", "def verify_response_dict(api_key, response):\n LOGGER.debug('Verifying WSAPI response signature')\n\n # Remove signature from the response\n r = dict(response)\n del r['h']\n\n # Convert to HTML query as that is used by Yubico to sign the response\n query = sorted_urlencode(list(r.iteritems()))\n\n # We unquote it because it's not the HTTP quoted version\n query = urllib.unquote_plus(query)\n\n status = sign(api_key, query) == response['h']\n LOGGER.debug('Signature result ' + str(status))\n return status" ]
[ "0.8024064", "0.78625363", "0.77986264", "0.7515456", "0.74777824", "0.74465626", "0.73384", "0.7265737", "0.7262182", "0.72441804", "0.7198832", "0.712405", "0.7028976", "0.701251", "0.69981617", "0.69959635", "0.6970569", "0.6948191", "0.6897628", "0.68025744", "0.68025744", "0.6800186", "0.67974865", "0.67858255", "0.6780485", "0.676281", "0.67326874", "0.6712715", "0.67036015", "0.66709816", "0.66679287", "0.66633785", "0.66413915", "0.6637445", "0.6618039", "0.6610584", "0.66000175", "0.6586345", "0.65496165", "0.6531632", "0.65289193", "0.6482799", "0.646065", "0.6457958", "0.6457733", "0.6433299", "0.6422639", "0.64125013", "0.6410823", "0.6407977", "0.6402173", "0.6393354", "0.6390981", "0.6384058", "0.6378157", "0.6358686", "0.6338603", "0.63369715", "0.633359", "0.6324581", "0.6315461", "0.6308886", "0.63048613", "0.6301798", "0.62915355", "0.6285658", "0.6281826", "0.62785673", "0.62781996", "0.62777996", "0.6270371", "0.6267701", "0.6242745", "0.6234679", "0.62320304", "0.62277687", "0.6223658", "0.6216222", "0.62107384", "0.62107384", "0.62107384", "0.62107384", "0.62092763", "0.62092763", "0.62092763", "0.62092763", "0.62092763", "0.62092763", "0.62092763", "0.62092763", "0.62092763", "0.6208142", "0.6202829", "0.619694", "0.61769086", "0.61766976", "0.6169507", "0.61694765", "0.6152311", "0.61425745" ]
0.76538676
3
estimate an MxF user factor matrix and an FxN item factor matrix from the MxN rating matrix
def factor_mat(all_dat, f_num, iterations, regularization): # get # of users and # of items [u_num, i_num] = all_dat.shape # init user factors and item factors with random values u_fac = np.matrix(np.random.rand(u_num, f_num)) # MxF i_fac = np.matrix(np.random.rand(i_num, f_num)) # NxF # calculate the preference matrix preference = cal_preference(all_dat) # calculate the confidence matrix confidence = cal_confidence(all_dat) # recalculate the user factors and item factors using the alternating least square method for itr in range(iterations): u_fac = alternate_ls(u_num, i_fac, preference, confidence, regularization) #print itr, "u_fac" i_fac = alternate_ls(i_num, u_fac, preference.T, confidence.T, regularization) #print itr, "i_fac" # save the output df = pd.DataFrame(u_fac) df.to_csv("tmp/u_fac.tmp", index=False, header=False, sep='\t', encoding='utf-8') df = pd.DataFrame(i_fac.T) df.to_csv("tmp/i_fac.tmp", index=False, header=False, sep='\t', encoding='utf-8') # an MxF user factor matrix and an FxN item factor matrix return [u_fac, i_fac.T]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_matrix(self):\n\n self.matrix = np.zeros((len(self.users), len(self.items)))\n\n for user in self.train_set['users']:\n for item in self.train_set['feedback'][user]:\n self.matrix[self.user_to_user_id[user]][self.item_to_item_id[item]] = \\\n self.train_set['feedback'][user][item]", "def get_user_item_matrix(datafile, user_index, product_index):\n num_users = len(user_index)\n num_items = len(product_index)\n result = np.zeros((num_users, num_items))\n num_reviews = len(datafile)\n result_dense = np.zeros((num_reviews, 3))\n for line in datafile.iterrows():\n i = line[0]\n user_id = datafile['user_id'][i]\n product_id = datafile['business_id'][i]\n user = user_index[user_id]\n product = product_index[product_id]\n rating = datafile['stars'][i]\n result[user, product] = rating\n result_dense[i, 0] = user\n result_dense[i, 1] = product\n result_dense[i, 2] = rating\n return result, result_dense", "def create_user_item_matrix(data,type='unary'): \n if type == 'unary':\n\n # for unary rating drop duplicates\n data = data.drop_duplicates()\n\n # create sparse matrix\n matrix = csr_matrix((data['rating'], (data['user_id'],data['product_id'])))\n\n # rows and cols with empty values will be dropped (doesnt make any difference in size for sparse matrix, but if later converted to dense, it saves space)\n # get all non empty rows and cols\n rows, cols = matrix.nonzero()\n unique_rows = np.unique(rows)\n unique_cols = np.unique(cols)\n\n # select only rows and cols with values\n matrix = matrix[unique_rows]\n matrix = matrix[:,unique_cols]\n\n return matrix\n\n if type == 'count':\n\n # create sparse matrix with counted ratings\n matrix = csr_matrix((data['rating'], (data['user_id'],data['product_id'])))\n\n # rows and cols with empty values will be dropped (doesnt make any difference in size for sparse matrix, but if later converted to dense, it saves space)\n # get all non empty rows and cols\n rows, cols = matrix.nonzero()\n unique_rows = np.unique(rows)\n unique_cols = np.unique(cols)\n\n # select only rows and cols with values\n matrix = matrix[unique_rows]\n matrix = matrix[:,unique_cols]\n\n '''\n Im Gegensatz zum Memory Based approach, muss beim Model Based Approach noch das Rating angepasst werden. \n Das heisst, dass Produkte die viel eingekauft wurden ein höhers Rating erhalten und solche die weniger \n eingekauft wudren ein tieferes. Gleichzeitig muss das Maximum limitiert werden. Dies wird mittels \n dem max_count integer in der Funktion bewerkstelligt\n '''\n\n # create diagonal Matrix with 1 divided by maximum values per row. This needs to be done because there is no divide function for csr matrices implemented\n # source: https://stackoverflow.com/questions/42225269/scipy-sparse-matrix-division\n diag = diags(1/matrix.max(axis=1).A.ravel())\n\n # multipy the matrix with the maximum values to get range from 0-1\n matrix = diag.dot(matrix)\n\n # sort indices; not really needed, just cosmetics\n matrix.sort_indices()\n\n return matrix", "def example():\n num_user, num_item, ratings = build_ticket()\n \n # suffle_data\n np.random.shuffle(ratings)\n\n # split data to training & validation\n train_pct = 0.9\n train_size = int(train_pct * len(ratings))\n train = ratings[:train_size]\n validation = ratings[train_size:]\n\n # params\n num_feature = 5\n bmf_model = BayesianMatrixFactorization(\n num_user, num_item, num_feature, train, validation, max_rating=1, min_rating=0, ratingsMatirx=ratings)\n\n start_time = time.clock()\n bmf_model.estimate(10)\n end_time = time.clock()\n \n mat = np.matrix(bmf_model.item_features)\n with open('../data/ticket/item_feature', 'w') as f:\n for line in mat:\n np.savetxt(f, line, fmt='%.5f')\n\n print \"time spend = %.3f\" % (end_time - start_time)\n\n return bmf_model", "def get_user_feature_matrix_p(user_dict, user_index, aspect_index, N, popularity, A_dense, Polarity):\n result = np.zeros((len(user_index), len(aspect_index)))\n for key in user_dict.keys():\n index_user = user_index[key]\n user_reviews = user_dict[key]\n count_dict = {}\n max = 0\n min = 1000\n for review in user_reviews:\n feature = review[0]\n if feature not in aspect_index:\n continue\n aspect = aspect_index[feature]\n if aspect not in count_dict:\n count_dict[aspect] = 0;\n if Polarity == False:\n count_dict[aspect] += 1\n else:\n count_dict[aspect] += review[1]\n for aspect in count_dict.keys():\n count = count_dict[aspect]\n if count > max:\n max = count\n if count < min:\n min = count\n for aspect in count_dict.keys():\n count = count_dict[aspect]\n result[index_user, aspect] = (((count - min)/(max - min))*5)\n\n if len(popularity) > 0:\n col = np.zeros((len(result), 1))\n result = np.append(result, col, axis=1)\n for i in range(len(result)):\n items = A_dense[A_dense[:, 0] == i][:, 1]\n items = items.astype(int)\n result[i, len(result[1]) - 1] = np.mean(popularity[items, 1])\n return result", "def fit(self, users, items, ratings, test_users=[], test_items=[], test_ratings=[], **kargs):\n global_mean_bias_init = np.float32(np.mean(ratings))\n global_mean_bias_init = 0.01\n self.model = DeepFM_(**self.dfm_params, global_mean_bias_init=global_mean_bias_init, first_half_fit_only_fm=self.first_half_fit_only_fm)\n \n # もし、CTR予測の場合は、y=0のデータをランダム生成する。\n if self.ctr_prediction:\n users = list(users) + list(np.random.choice(list(set(users)), size=len(users)))\n items = list(items) + list(np.random.choice(list(set(items)), size=len(items)))\n ratings = list((np.array(ratings)>0).astype(int)) + [0]*len(ratings)\n test_ratings = list((np.array(test_ratings)>0).astype(int))\n \n Xi, Xv = self.data_manager.transform_users_and_items_to_Xi_Xv(users, items)\n \n if len(test_users)>0:\n test_Xi, test_Xv = self.data_manager.transform_users_and_items_to_Xi_Xv(test_users, test_items)\n self.model.fit(Xi, Xv, ratings, test_Xi, test_Xv, test_ratings, early_stopping=True)\n else:\n self.model.fit(Xi, Xv, ratings, early_stopping=True, **kargs)\n \n # load data\n self.trained_users = list(set(users))\n self.trained_items = list(set(items))\n self.global_mean = self.model.predict(Xi, Xv).mean()", "def calculate_recommendations(self, vote_list, itemMatch, itemIgnored):\n #print \"--------------------------------------------------\"\n #print \"calculate_recommendations\"\n #print \"--------------------------------------------------\"\n\n # http://www.quuxlabs.com/blog/2010/09/matrix-factorization-a-simple-tutorial-and-implementation-in-python/\n\n # U = np.array('users')\n # D = np.array('video_games')\n\n # R = |U| cross |D|\n\n # We want to discover K latent features\n\n # Find\n # P(a | |U| corss K matrix)\n # Q(a | |D| cross K matrix)\n # Such that their product approximates R\n # R approx= P cross transpose(Q) = hat(R)\n #\n\n # r[i][j] = transpose(p)[i] * q[j]\n # = sum( 1..k, p[i][k] * q[k][j] )\n\n # e[i][j]**2 = (r[i][j] - hat(r)[i][j])**2\n # = (r[i][j] - sum( 1..K, p[i][k] * q[k][j]))**2\n # squared error, estimated rating can be either higher or lower than the real thing\n\n # find the gradient\n # diff(e[i][j]**2, p[i][k]) = -2*(r[i][j] - hat(r)[i][j]) * (q[k][j]) = -2*e[i][j] * q[k][j]\n # diff(e[i][j]**2, q[k][j]) = -2*(r[i][j] - hat(r)[i][j]) * (p[i][k]) = -2*e[i][j] * p[i][k]\n\n # update rules\n # alpha = settings.alpha # learning_rate\n # alpha = 0.0002 # learning_rate\n # p[i][k]' = p[i][k] + alpha * diff(e[i][j]**2, p[i][k])\n # = p[i][k] + 2 * alpha * e[i][j] * q[k][j]\n # q[k][j]' = q[k][j] + alpha * diff(e[i][j]**2, q[k][j])\n # = q[k][j] + 2 * alpha * e[i][j] * p[i][k]\n\n # training data\n # T = (u[i], d[j], r[i][j])\n # np.array()\n\n # iterate until convergance\n # E = sum((u[i], d[j], r[i][j]) in T, e[i][j])\n # = sum((u[i], d[j], r[i][j]) in T, r[i][j]\n # - sum(1..k, p[i][k]*q[k][j]))**2\n\n # regularization\n # beta = 0.02\n # e[i][j]**2 = (r[i][j] - sum(1..K, p[i][j]*q[k][j]))**2\n # + ((beta/2) * sum(1..K, norm(P)**2 + norm(Q)**2))\n #\n # p[i][k]' = p[i][k] + alpha * (2 * e[i][j] * q[k][j] - beta * p[i][k])\n # q[k][j]' = q[k][j] + alpha * (2 * e[i][j] * p[i][k] - beta * q[k][j])\n\n data = np.array(vote_list)\n\n encoder = OneHotEncoder()\n\n users = data[:,0]\n unique_users = list(set(users))\n for i in range(len(users)):\n users[i] = unique_users.index(users[i])\n\n video_games = data[:,1]\n unique_games = list(set(video_games))\n for i in range(len(video_games)):\n video_games[i] = unique_games.index(video_games[i])\n\n ratings = data[:,2]\n M = len(set(video_games))\n N = len(set(users))\n R = np.zeros((N,M))\n for i in range(len(users)):\n user = users[i]\n game = video_games[i]\n rating = ratings[i]\n R[user][game] = rating\n\n K = 2\n\n P = np.random.rand(N,K)\n Q = np.random.rand(M,K)\n\n nP, nQ = self.matrix_factorization(R, P, Q, K)\n nR = np.dot(nP, nQ.T)\n\n itemMatch = {}\n for i in range(N):\n user = unique_users[i]\n itemMatch[user] = []\n for j in range(M):\n if R[i][j] == 0:\n video_game = unique_games[j]\n recommendation = (video_game, nR[i][j])\n itemMatch[user].append(recommendation)\n itemMatch[None] = []\n print 'pmf recommendations', itemMatch.items()\n print '\\n'\n recommendations = itemMatch.items()\n\n # returns\n # [\n # (<user1>, [\n # (\"<object_identifier1>\", <score>),\n # (\"<object_identifier2>\", <score>),\n # ]),\n # (<user2>, [\n # (\"<object_identifier1>\", <score>),\n # (\"<object_identifier2>\", <score>),\n # ]),\n # ]\n\n return recommendations", "def init_matrix(self, train, num_features):\n user_matrix = np.random.rand(self.num_users, num_features)\n item_matrix = np.random.rand(num_features, self.num_items)\n item_nnz = train.getnnz(axis=0)\n item_sum = train.sum(axis=0)\n item_matrix[0, :] = item_sum / item_nnz\n return user_matrix, item_matrix", "def create_adjust_matrix(self, is_sub: bool):\n matrix = None\n if not is_sub:\n ratings = np.ones_like(self._user, dtype=np.float32)\n matrix = sp.csr_matrix(\n (ratings, (self._user, self._item + self.n_users)),\n shape=(self.n_users + self.n_items, self.n_users + self.n_items),\n )\n else:\n if self.type == \"ND\":\n drop_user = self.rand_sample(\n self.n_users,\n size=int(self.n_users * self.drop_ratio),\n replace=False,\n )\n drop_item = self.rand_sample(\n self.n_items,\n size=int(self.n_items * self.drop_ratio),\n replace=False,\n )\n R_user = np.ones(self.n_users, dtype=np.float32)\n R_user[drop_user] = 0.0\n R_item = np.ones(self.n_items, dtype=np.float32)\n R_item[drop_item] = 0.0\n R_user = sp.diags(R_user)\n R_item = sp.diags(R_item)\n R_G = sp.csr_matrix(\n (\n np.ones_like(self._user, dtype=np.float32),\n (self._user, self._item),\n ),\n shape=(self.n_users, self.n_items),\n )\n res = R_user.dot(R_G)\n res = res.dot(R_item)\n\n user, item = res.nonzero()\n ratings = res.data\n matrix = sp.csr_matrix(\n (ratings, (user, item + self.n_users)),\n shape=(self.n_users + self.n_items, self.n_users + self.n_items),\n )\n\n elif self.type == \"ED\" or self.type == \"RW\":\n keep_item = self.rand_sample(\n len(self._user),\n size=int(len(self._user) * (1 - self.drop_ratio)),\n replace=False,\n )\n user = self._user[keep_item]\n item = self._item[keep_item]\n\n matrix = sp.csr_matrix(\n (np.ones_like(user), (user, item + self.n_users)),\n shape=(self.n_users + self.n_items, self.n_users + self.n_items),\n )\n\n matrix = matrix + matrix.T\n D = np.array(matrix.sum(axis=1)) + 1e-7\n D = np.power(D, -0.5).flatten()\n D = sp.diags(D)\n return D.dot(matrix).dot(D)", "def forward(self, user, item):\n item_emb = self.product_factors(item.view(-1)) + self.product_bias(\n item.view(-1)\n )\n user_emb = self.user_factors(user.view(-1)) + self.user_bias(user.view(-1))\n mat_mult = (item_emb * user_emb).sum(1)\n\n return mat_mult", "def affinity_matrix(test_specs):\n\n np.random.seed(test_specs[\"seed\"])\n\n # uniform probability for the 5 ratings\n s = [(1 - test_specs[\"spars\"]) / test_specs[\"ratings\"]] * test_specs[\"ratings\"]\n s.append(test_specs[\"spars\"])\n P = s[::-1]\n\n # generates the user/item affinity matrix. Ratings are from 1 to 5, with 0s denoting unrated items\n X = np.random.choice(\n test_specs[\"ratings\"] + 1, (test_specs[\"users\"], test_specs[\"items\"]), p=P\n )\n\n Xtr, Xtst = numpy_stratified_split(\n X, ratio=test_specs[\"ratio\"], seed=test_specs[\"seed\"]\n )\n\n return Xtr, Xtst", "def update_model(self):\n for itemidx, itemid in self._allitems.iterrows():\n self.__itemid2idx[str(itemid['itemid'])] = itemidx\n self.__itemidx2id[itemidx] = str(itemid['itemid'])\n for useridx, userid in enumerate(self._uservisits['userid'].unique()):\n self.__userid2idx[str(userid)] = useridx\n self.__useridx2id[useridx] = str(userid)\n userid = self._uservisits['userid'].values\n itemid = self._uservisits['itemid'].values\n rating = self._uservisits['rating'].values\n useridx = [self.__userid2idx[str(int(uid))] for uid in userid]\n itemidx = [self.__itemid2idx[str(int(iid))] for iid in itemid]\n rating = list(map(numpy.double, rating))\n self.__itemuser = csr_matrix((rating, (useridx, itemidx)), shape=(len(set(useridx)), len(set(itemidx))))\n self.__recommender = implicit.als.AlternatingLeastSquares(factors=self.__numtopics)\n self.__recommender.fit(self.__itemuser)", "def recommend_NMF(new_user,movies_num,movies_ratings):\n list_id_movies = movies_ratings['movieId'].unique()\n nmf,Q = load_NMF_model()\n new_user_vector = pd.DataFrame(new_user, index=list_id_movies).T\n new_user_vector_filled = new_user_vector.fillna(3)\n #calculate Matrix P (Genres)\n P = nmf.transform(new_user_vector_filled)\n #make predictions\n predictions = np.dot(P,Q)\n recommendations = pd.DataFrame(predictions.reshape(-1), index=list_id_movies).T\n #Remove already watched movies:\n not_watched_movies_mask = np.isnan(new_user_vector)\n not_watched = recommendations[not_watched_movies_mask]\n\n top_movies_ids = not_watched.T.sort_values(by=[0], ascending=False).index[:movies_num]\n\n Top_recommended = movieId_to_title(top_movies_ids,movies_ratings) \n return Top_recommended", "def fit(self, df):\n\n # generate continuous indices if this hasn't been done\n if self.index2item is None:\n self.set_index(df)\n\n logger.info(\"Collecting user affinity matrix\")\n if not np.issubdtype(df[self.col_rating].dtype, np.number):\n raise TypeError(\"Rating column data type must be numeric\")\n\n # copy the DataFrame to avoid modification of the input\n temp_df = df[[self.col_user, self.col_item, self.col_rating]].copy()\n\n if self.time_decay_flag:\n logger.info(\"Calculating time-decayed affinities\")\n # if time_now is None use the latest time\n if not self.time_now:\n self.time_now = df[self.col_timestamp].max()\n\n # apply time decay to each rating\n temp_df[self.col_rating] *= exponential_decay(\n value=df[self.col_timestamp],\n max_val=self.time_now,\n half_life=self.time_decay_half_life,\n )\n\n # group time decayed ratings by user-item and take the sum as the user-item affinity\n temp_df = (\n temp_df.groupby([self.col_user, self.col_item]).sum().reset_index()\n )\n else:\n # without time decay use the latest user-item rating in the dataset as the affinity score\n logger.info(\"De-duplicating the user-item counts\")\n temp_df = temp_df.drop_duplicates(\n [self.col_user, self.col_item], keep=\"last\"\n )\n\n logger.info(\"Creating index columns\")\n # map users and items according to the two dicts. Add the two new columns to temp_df.\n temp_df.loc[:, self.col_item_id] = temp_df[self.col_item].map(self.item2index)\n temp_df.loc[:, self.col_user_id] = temp_df[self.col_user].map(self.user2index)\n\n # retain seen items for removal at prediction time\n self.seen_items = temp_df[[self.col_user_id, self.col_item_id]].values\n\n # affinity matrix\n logger.info(\"Building user affinity sparse matrix\")\n self.user_affinity = self.compute_affinity_matrix(\n temp_df, self.n_users, self.n_items\n )\n\n # calculate item co-occurrence\n logger.info(\"Calculating item co-occurrence\")\n item_cooccurrence = self.compute_coocurrence_matrix(\n temp_df, self.n_users, self.n_items\n )\n\n # free up some space\n del temp_df\n\n self.item_frequencies = item_cooccurrence.diagonal()\n\n logger.info(\"Calculating item similarity\")\n if self.similarity_type == sar.SIM_COOCCUR:\n self.item_similarity = item_cooccurrence\n elif self.similarity_type == sar.SIM_JACCARD:\n logger.info(\"Calculating jaccard\")\n self.item_similarity = jaccard(item_cooccurrence).astype(\n df[self.col_rating].dtype\n )\n elif self.similarity_type == sar.SIM_LIFT:\n logger.info(\"Calculating lift\")\n self.item_similarity = lift(item_cooccurrence).astype(\n df[self.col_rating].dtype\n )\n else:\n raise ValueError(\n \"Unknown similarity type: {0}\".format(self.similarity_type)\n )\n\n # free up some space\n del item_cooccurrence\n\n logger.info(\"Done training\")", "def fit_data(self, matrix, user_features=None, item_features=None):\r\n matrix.sort_index(inplace=True)\r\n matrix.sort_index(inplace=True, axis=1)\r\n dataset = Dataset()\r\n dataset.fit((x for x in matrix.index),\r\n (x for x in matrix.columns))\r\n interactions = pd.melt(matrix.replace(0, np.nan).reset_index(),\r\n id_vars='index',\r\n value_vars=list(matrix.columns[1:]),\r\n var_name='plu_id',\r\n value_name='rating').dropna().sort_values('index')\r\n interactions.columns = ['crd_no', 'plu_id', 'rating']\r\n self.interactions, self.weights = dataset.build_interactions([tuple(x) for x in interactions.values])\r\n\r\n if user_features is not None:\r\n user_features.sort_index(inplace=True)\r\n dataset.fit_partial(users=user_features.index,\r\n user_features=user_features)\r\n self.user_features = dataset.build_user_features(\r\n ((index, dict(row)) for index, row in user_features.iterrows()))\r\n else:\r\n self.user_features = None\r\n if item_features is not None:\r\n item_features.sort_index(inplace=True)\r\n dataset.fit_partial(items=item_features.index,\r\n item_features=item_features)\r\n self.item_features = dataset.build_item_features(\r\n ((index, dict(row)) for index, row in item_features.iterrows()))\r\n else:\r\n self.item_features = None", "def __init__(self, ratings, rank, user_reg_loss, item_reg_loss):\n self._ratings = ratings\n self._users_num, self._items_num = ratings.shape\n self._rank = rank\n self._user_reg_loss = user_reg_loss\n self._item_reg_loss = item_reg_loss\n \n self._train_function = self._als_partial_step_explicit\n \n # DONE init latent factors for user and item matrix\n # losowo ustalamy inicjalne wartości X i Y\n self._user_factors = np.random.random((self._users_num, rank))\n self._item_factors = np.random.random((self._items_num, rank))", "def predict_rating(user_id,item_id):\n user_preference = latent_user_preferences[user_id]\n item_preference = latent_item_features[item_id]\n return user_preference.dot(item_preference)", "def compute_affinity_matrix(self, df, n_users, n_items):\n\n return sparse.coo_matrix(\n (df[self.col_rating], (df[self.col_user_id], df[self.col_item_id])),\n shape=(n_users, n_items),\n ).tocsr()", "def init_MF(train, num_features):\n num_user = train.shape[1]\n num_item = train.shape[0]\n user_features = np.random.rand(num_features,num_user) # user_features shape (20,943)\n item_features = np.random.rand(num_item, num_features) # item_features shape (1152,20)\n return user_features, item_features", "def feature_matrix(df, user_id=None, item_id=None):\n print(\"get feature matrix\")\n df1 = df.drop_duplicates(subset=['user_id'], keep='first', inplace=False)\n user_x = None\n if user_id is not None:\n user_x = int(np.argwhere(df1['user_id'].values == user_id))\n user_features = df1[['average_stars']].values\n csr_user_features = sparse.csr_matrix(user_features)\n\n df2 = df.drop_duplicates(\n subset=['business_id'],\n keep='first',\n inplace=False)\n item_x = None\n if item_id is not None:\n item_x = int(np.argwhere(df2['business_id'].values == item_id))\n item_features = df2.iloc[:, 10:].values\n\n csr_item_features = sparse.csr_matrix(item_features)\n return csr_user_features, csr_item_features, user_x, item_x", "def simMatrix(self, d = 1/5):\n \n self.fit_baseline(d)\n self.evalBaseline()\n \n \n df_mat = np.array(self.df[[\"user ind\", \"item ind\", \"rating\"]].merge(self.r_b, on = [\"user ind\", \"item ind\"]))\n df_ind = df_mat[:,:2].astype(int)\n df_rat = df_mat[:,2] - df_mat[:,3]\n \n \n self.M = np.zeros((self.n_us, self.n_it))\n \n \n widgets = ['Test: ', Percentage(), ' ', Bar(\"#\"), ' ', ETA()]\n pbar = ProgressBar(widgets = widgets, maxval = self.n_us)\n pbar.start()\n \n for us in self.user_ind:\n it = df_ind[np.where(df_ind[:,0] == us)[0], 1]\n rat1 = df_rat[np.where(df_ind[:,0] == us)[0]]\n self.M[us,it] = rat1\n \n pbar.update(us)\n \n pbar.finish()\n \n #self.M = self.UI.toarray()\n pbar = ProgressBar(widgets = widgets, maxval = self.n_it * (self.n_it - 1) / 2)\n pbar.start()\n \n self.S = np.empty((self.n_it, self.n_it)) * np.nan\n \n for i1 in range(self.n_it):\n # self.S[i1,i1] = 1\n x1 = self.M[:,i1]\n for i2 in range(i1+1,self.n_it):\n x2 = self.M[:,i2]\n I = np.logical_and(x1, x2)\n if (len(I) > 1):\n self.S[i1,i2] = self.S[i2,i1] = Sim.cos2(x1.T[I], self.M[:,i2].T[I])\n \n pbar.update((self.n_it)*(i1+1) - (i1+2)*(i1+1)/2)\n \n pbar.finish()\n \n return self.S", "def score_items(X, U, mu,\n scoremethod='lowhigh',\n missingmethod='none',\n feature_weights=[]):\n\n # Use U to model and then reconstruct the data in X.\n # 1. Project all data in X into space defined by U,\n # then reconstruct it.\n if missingmethod.lower() != 'ignore':\n # All missing values should have been replaced with 0,\n # or non-existent.\n # 1a. Subtract the mean and project onto U\n proj = np.dot(U.T, (X - mu))\n # 1b. Reconstruct by projecting back up and adding mean\n reproj = np.dot(U, proj) + mu\n # 1c. Compute the residual\n #print('X:', X.T)\n #print('reproj:', reproj.T)\n err = X - reproj\n #print('err:', err.T)\n #input()\n \n else:\n # Missing method must be 'ignore' (Brand 2002)\n (err, reproj) = compute_error_with_missing(X, U, mu)\n\n # 2. Compute reconstruction error\n if scoremethod == 'low': # Blank out all errors > 0\n err[err>0] = 0\n elif scoremethod == 'high': # Blank out all errors < 0\n err[err<0] = 0\n else: # default, count everything\n pass\n \n # Weight features if requested\n if len(feature_weights) > 0:\n for i in range(len(feature_weights)):\n err[i,:] = err[i,:] * feature_weights[i]\n\n if missingmethod.lower() == 'ignore':\n # Only tally error for observed features.\n # This means that items with missing values are not penalized\n # for those features, which is probably the best we can do.\n scores = np.nansum(np.array(np.power(err, 2)), axis=0)\n else:\n scores = np.sum(np.array(np.power(err, 2)), axis=0)\n\n #print('scores:', scores)\n #print('reproj:', reproj)\n #input()\n return (scores, reproj)", "def ratings_to_matrix(ratings_df, user_col, item_col, rating_col, forced_shape=None):\n users_num = ratings_df.user_id.max() + 1\n items_num = ratings_df.item_id.max() + 1\n \n if forced_shape:\n users_num = max(users_num, forced_shape[0])\n items_num = max(items_num, forced_shape[1])\n \n ratings_mat = np.zeros([users_num, items_num])\n for rating in ratings_df.itertuples():\n ratings_mat[rating[user_col], rating[item_col]] = rating[rating_col]\n \n return ratings_mat", "def __init__(self, user_factors, item_factors):\n self._user_factors = np.copy(user_factors)\n self._item_factors = np.copy(item_factors)\n \n self._users_num = user_factors.shape[0]\n self._items_num = item_factors.shape[0]\n\n assert user_factors.shape[1] == item_factors.shape[1]", "def fit(self, ratings):\n # Training proceeds in 2 steps:\n # 1. Normalize item vectors to be mean-centered and unit-normalized\n # 2. Compute similarities with pairwise dot products\n self._timer = util.Stopwatch()\n\n init_rmat, users, items = matrix.sparse_ratings(ratings)\n n_items = len(items)\n _logger.info('[%s] made sparse matrix for %d items (%d ratings from %d users)',\n self._timer, len(items), init_rmat.nnz, len(users))\n\n rmat, item_means = self._mean_center(ratings, init_rmat, items)\n\n rmat = self._normalize(rmat)\n\n _logger.info('[%s] computing similarity matrix', self._timer)\n smat = self._compute_similarities(rmat)\n\n _logger.info('[%s] got neighborhoods for %d of %d items',\n self._timer, np.sum(np.diff(smat.rowptrs) > 0), n_items)\n\n _logger.info('[%s] computed %d neighbor pairs', self._timer, smat.nnz)\n\n self.item_index_ = items\n self.item_means_ = item_means\n self.item_counts_ = np.diff(smat.rowptrs)\n self.sim_matrix_ = smat\n self.user_index_ = users\n self.rating_matrix_ = init_rmat\n\n return self", "def important_factors_based_on_ratings(data: pd.DataFrame) -> np.ndarray:\n # Turn labels into binary classification for equal class distribution\n data = utils.add_ratings_binary(data)\n # Get feature and label data for classifcation from original dataset\n X, y = utils.get_rating_features_labels(data)\n\n # Grab features from feature matrix\n features = X.columns\n\n # split data into train and test set\n X_train, X_test, y_train, y_test = model_selection.train_test_split(X.values, y, test_size=0.2) \n\n # Instantiate and train xgboost model for rating classfication\n xgb_model = xgboost.XGBClassifier()\n xgb_model.fit(X_train, y_train)\n\n # Grab feature importance scores from trained model\n feature_importance = xgb_model.feature_importances_\n # Find indices of top 2 important features\n top_important_features_ind = np.argpartition(feature_importance, -2)[-2:]\n\n print(f\"The top 2 important features are {features[top_important_features_ind]}\")\n\n return feature_importance", "def get_user_feature_matrix(user_dict, user_index, aspect_index, N):\n result = np.zeros((len(user_index), len(aspect_index)))\n for key in user_dict.keys():\n index_user = user_index[key]\n user_reviews = user_dict[key]\n count_dict = {}\n for review in user_reviews:\n feature = review[0]\n if feature not in aspect_index:\n continue\n aspect = aspect_index[feature]\n if aspect not in count_dict:\n count_dict[aspect] = 0;\n count_dict[aspect] += 1\n for aspect in count_dict.keys():\n count = count_dict[aspect]\n result[index_user, aspect] = 1 + (N - 1) * (2 / (1 + exp(-count)) - 1)\n return result", "def personalization(prediction, n):\n # prediction\n # n top n recommendation\n\n top_n = get_top_n(prediction, n)\n\n rec_dict = {}\n for uid, user_ratings in top_n.items():\n rec_dict[uid] = [iid for (iid, _) in user_ratings]\n\n rec_user_ls = [pred[0] for pred in prediction]\n rec_item_ls = [pred[1] for pred in prediction]\n\n unique_rec_user_ls = np.unique(rec_user_ls)\n unique_rec_item_ls = np.unique(rec_item_ls)\n\n # assign each item with index number\n unique_rec_item_dict = {item: ind for ind,\n item in enumerate(unique_rec_item_ls)}\n\n n_unique_rec_user = len(unique_rec_user_ls)\n n_unique_rec_item = len(unique_rec_item_ls)\n\n # recommended user item matrix\n rec_matrix = np.zeros(shape=(n_unique_rec_user, n_unique_rec_item))\n\n # represent recommended item for each user as binary 0/1\n for user in range(n_unique_rec_user):\n # get userid\n user_id = unique_rec_user_ls[user]\n # get rec item list\n item_ls = rec_dict[user_id]\n\n for item_id in item_ls:\n # get item index\n item = unique_rec_item_dict[item_id]\n rec_matrix[user, item] = 1\n\n # calculate cosine similarity matrix across all user recommendations\n similarity = cosine_similarity(X=rec_matrix, dense_output=False)\n # calculate average of upper triangle of cosine matrix\n upper_right = np.triu_indices(similarity.shape[0], k=1)\n # personalization is 1-average cosine similarity\n score = 1 - np.mean(similarity[upper_right])\n return score", "def update_item_feature(\n train, item_features, user_features, lambda_item,\n nz_item_userindices, I):\n for n, item_n in enumerate(nz_item_userindices):\n nnz_users_per_item = len(item_n[1]) # Number of users who rated item n\n if (nnz_users_per_item == 0): nnz_users_per_item = 1\n # Least squares solution\n A_n = np.dot(user_features[:,item_n[1]], user_features[:,item_n[1]].T) + lambda_item * nnz_users_per_item * I\n V_n = np.dot(user_features[:,item_n[1]], train.T[item_n[1],item_n[0]].todense())\n #if (n%3 == 0): print(\"item_n: {}\".format(item_n[0]), np.linalg.det(A_n))\n if (np.linalg.det(A_n) != 0): item_features.T[:,item_n[0]] = np.linalg.solve(A_n,V_n)\n else: \n A_n[0,0] += 1; A_n[1,1] += 1; A_n[2,2] += 1; A_n[3,3] += 1; A_n[4,4] += 1; A_n[5,5] += 1 # if matrix A_n is singular, slightly modify several values\n item_features.T[:,item_n[0]] = np.linalg.solve(A_n,V_n)", "def item_user_matrix(X):\n X['user_id'] = X['user_id'].astype(\"category\")\n X['song_id'] = X['song_id'].astype(\"category\")\n\n row = X['song_id'].cat.codes.copy()\n col = X['user_id'].cat.codes.copy()\n\n nrow = len(X['song_id'].cat.categories)\n ncol = len(X['user_id'].cat.categories)\n\n item_user = csr_matrix((X['score'], (row, col)), shape=(nrow, ncol))\n\n user = dict(enumerate(X['user_id'].cat.categories))\n user_index = {u: i for i, u in user.items()}\n\n item = dict(enumerate(X['song_id'].cat.categories))\n item_index = {s: i for i, s in item.items()}\n\n return item_user, item_index, user_index", "def fit(self, ratings_mat):\n self.logger.debug(\"starting fit\")\n # self.n = ratings.max()['user']+1\n # self.p = ratings.max()['movie']+1\n self.ratings_mat = ratings_mat\n self.k = ratings_mat.shape[0]//20\n\n #ratings_array = ratings[ratings.columns[:-1].values].values\n\n #self.ratings_mat = np.zeros((self.n, self.p))\n\n #for i, rating in ratings.iterrows():\n # self.ratings_mat[( rating['user'], rating['movie'] )] = rating['rating']\n\n self.cosine_dists = squareform(pdist(ratings_mat, 'cosine'))\n\n #if a user has no ratings data, cosine dist will return a nan. In this case, we assume they are as different as possible, since we cannot predict using those users anyways\n self.cosine_dists = 1 - np.nan_to_num(1 - self.cosine_dists)\n\n self.similarity_ranks = self.cosine_dists.argsort(axis = 1)\n\n # ...\n\n self.logger.debug(\"finishing fit\")\n return(self)", "def FM(user_feature_columns, item_feature_columns, l2_reg_embedding=1e-6, loss_type='softmax', temperature=0.05,\n sampler_config=None, seed=1024,\n ):\n\n embedding_matrix_dict = create_embedding_matrix(user_feature_columns + item_feature_columns, l2_reg_embedding,\n seed=seed,\n seq_mask_zero=True)\n\n user_features = build_input_features(user_feature_columns)\n user_inputs_list = list(user_features.values())\n user_sparse_embedding_list, _ = input_from_feature_columns(user_features,\n user_feature_columns,\n l2_reg_embedding, seed=seed,\n support_dense=False,\n embedding_matrix_dict=embedding_matrix_dict)\n\n item_features = build_input_features(item_feature_columns)\n item_inputs_list = list(item_features.values())\n item_sparse_embedding_list, _ = input_from_feature_columns(item_features,\n item_feature_columns,\n l2_reg_embedding, seed=seed,\n support_dense=False,\n embedding_matrix_dict=embedding_matrix_dict)\n\n user_dnn_input = concat_func(user_sparse_embedding_list, axis=1)\n user_vector_sum = Lambda(lambda x: reduce_sum(x, axis=1, keep_dims=False))(user_dnn_input)\n user_vector_sum = l2_normalize(user_vector_sum)\n\n item_dnn_input = concat_func(item_sparse_embedding_list, axis=1)\n item_vector_sum = Lambda(lambda x: reduce_sum(x, axis=1, keep_dims=False))(item_dnn_input)\n item_vector_sum = l2_normalize(item_vector_sum)\n\n if loss_type == \"logistic\":\n score = inner_product(user_vector_sum, item_vector_sum, temperature)\n output = PredictionLayer(\"binary\", False)(score)\n\n elif loss_type == \"softmax\":\n output = InBatchSoftmaxLayer(sampler_config._asdict(), temperature)(\n [user_vector_sum, item_vector_sum, item_features[sampler_config.item_name]])\n else:\n raise ValueError(' `loss_type` must be `logistic` or `softmax` ')\n\n model = Model(inputs=user_inputs_list + item_inputs_list, outputs=output)\n\n model.__setattr__(\"user_input\", user_inputs_list)\n model.__setattr__(\"user_embedding\", user_vector_sum)\n\n model.__setattr__(\"item_input\", item_inputs_list)\n model.__setattr__(\"item_embedding\", item_vector_sum)\n\n return model", "def getUserItemMatrix(self):\n\t\tdf = self.getrating()\n\n\t\trows_index = df.user_id.unique()\n\t\tcolumn_index = df.venue_id.unique() \n\n\t\trow_len = len(rows_index)\n\t\tcol_len = len(column_index)\n\n\t\tX = lil_matrix((row_len, col_len))\n\t\trow_map = dict(zip(rows_index, range(row_len)))\n\t\tcol_map = dict(zip(column_index, range(col_len)))\n\n\t\t# Get mapping table for rows and columns\n\t\td = {}\n\t\td[\"row\"] = row_map\n\t\td[\"col\"] = col_map\n\n\t\tfor index, row in df.iterrows():\n\t\t\tX[d[\"row\"][row[\"user_id\"]], d[\"col\"][row[\"venue_id\"]]] = row[\"Rating\"]\n\n\t\tX = X.tocsr() # Allow efficient row slicing\n\n\t\treturn [d,X]", "def find_predictions(actives, train_rdd_gbitem_dict, train_rdd_gbuser_dict, num_items):\n active_user = actives[0][0]\n active_item = actives[0][1]\n\n # -----------------------------------\n # train_rdd_gbitem_dict = (item, ([(user,r),(user,r)...],avg_of_item))\n # train_rdd_gbuser_dict = (user, [(item,r),(item,r)...]\n\n if active_user not in train_rdd_gbuser_dict and active_item not in train_rdd_gbitem_dict:\n return (active_user, active_item), 2.5\n\n # all user, ratings that have rated active_item\n if active_item in train_rdd_gbitem_dict:\n active_item_avg = train_rdd_gbitem_dict[active_item][1]\n active_item_dict = dict(train_rdd_gbitem_dict[active_item][0]) # {user: rating, user: rating, ...}\n else:\n # item not found in training set\n # new item problem.\n average_of_user_list = train_rdd_gbuser_dict[active_user]\n average_of_user = sum([x[1] for x in average_of_user_list]) / len(average_of_user_list)\n return (active_user, active_item), average_of_user\n\n # user rated items - all (item, ratings) that the user has rated\n if active_user in train_rdd_gbuser_dict:\n active_user_rated_items = train_rdd_gbuser_dict[active_user] # [(item, rating), (item, rating), ...]\n else:\n # user not found in training set\n # new user problem.\n return (active_user, active_item), train_rdd_gbitem_dict[active_item][1]\n\n similarity_list = []\n for item, rating in active_user_rated_items:\n item_dict = dict(train_rdd_gbitem_dict[item][0])\n item_avg = train_rdd_gbitem_dict[item][1]\n similarity = find_similarity(dict(active_item_dict), active_item_avg, dict(item_dict), item_avg)\n similarity_list.append((rating, similarity))\n\n # Have obtained similarity list for active item and item from the above code.\n # Filter according to a top 'N' items and then take avg rating.\n # similarity_list.sort(key=lambda x: x[1], reverse=True)\n # similarity_list = similarity_list[:len(similarity_list) // 4]\n # similarity_list = [(x[0], x[1]*abs(x[1])**1.5) for x in similarity_list]\n # print(similarity_list)\n pred_rating = find_weighted_average(similarity_list, num_items)\n\n # for i in similarity_list:\n # print(i)\n # print(\"Pred-rating: \", pred_rating)\n\n return (active_user, active_item), pred_rating", "def convert_fidelity_matrix(fidelities, fidelity, sx, factor):\n\n matrix = np.zeros((sx, fidelities - 1))\n for idx in range(0, fidelity):\n matrix[:, idx] = np.ones((sx)) * factor[fidelity]\n\n return matrix", "def train_nmf(movies_ratings):\n \n #pivot the dataframe\n movies_ratings = movies_ratings.pivot_table(index='userId', columns='movieId', values='rating')\n #Fill Nan with 3.0 rating:\n movies_ratings.fillna(3.0, inplace=True)\n\n nmf_model = NMF(\n n_components=20,\n init='random',\n random_state=10,\n max_iter=10000\n )\n nmf_model.fit(movies_ratings)\n\n #save nmf model\n pickle.dump(nmf_model,open(\"models/nmf_model.sav\", 'wb'))", "def metric_test(self):\n k = 10\n latent_factor = 10\n n_users = 10\n n_items = 12\n\n interactions, user_features, item_features = util.generate_dummy_data_with_indicator (num_users=n_users, num_items=n_items, interaction_density=.5)\n print (\"interactiosn shape={}\".format( np.shape(interactions) ))\n print (\"user features shape={}\".format( np.shape(user_features.toarray()) ))\n print (\"item features shape={}\".format( np.shape(item_features.toarray()) ))\n\n model = TensorRec(n_components=latent_factor)\n\n model.fit(interactions, user_features, item_features, epochs=19)\n\n ranks = model.predict_rank(user_features=user_features, item_features=item_features)\n\n print (\"Ranks shape={}\".format(np.shape(ranks)))\n\n self.assertTrue(np.shape(interactions) == np.shape(ranks))\n\n tr_recall_result = eval.recall_at_k(predicted_ranks=ranks, test_interactions=interactions, k=k, preserve_rows=False)\n # print (tr_recall_result.mean())\n\n tr_precision_result = eval.precision_at_k(predicted_ranks=ranks, test_interactions=interactions, k=k, preserve_rows=False)\n # print(tr_precision_result.mean())\n\n # we need csr for interactions data\n interactions_ = interactions.tocsr()\n recall_result = metrics.recall_at_k(ranks, interactions_, k=k, preserve_rows=False)\n # print(recall_result.mean())\n\n precision_result = metrics.precision_at_k(ranks, interactions_, k=k, preserve_rows=False)\n # print (precision_result.mean())\n\n self.assertTrue (tr_recall_result.mean() == recall_result.mean())\n self.assertTrue (tr_precision_result.mean() == precision_result.mean())", "def update_user_feature(\n train, user_features, item_features, lambda_user,\n nz_user_itemindices, I):\n for d, user_d in enumerate(nz_user_itemindices): # iterate over non zero users\n nnz_items_per_user = len(user_d[1]) # Number of items user d has rated\n if (nnz_items_per_user == 0): nnz_items_per_user = 1\n \n # Least squares solution\n A_d = np.dot(item_features[user_d[1]].T, item_features[user_d[1]]) + lambda_user * nnz_items_per_user * I\n V_d = np.dot(item_features[user_d[1]].T, train[user_d[1],user_d[0]].todense())\n user_features[:,user_d[0]] = np.linalg.solve(A_d,V_d)", "def _estimate_ratings(self, U, V):\n Rhat = np.matmul(U, V.transpose()) # estimate R with UV^T\n Rhat = np.where(self.R_missing, 0, Rhat) # fill in missing values of R with 0s\n return Rhat", "def estimate(self, u, j):\n if not (self.trainset.knows_user(u) and self.trainset.knows_item(j)):\n raise PredictionImpossible('User and/or item is unknown.')\n\n u_ratings = self.trainset.ur[u]\n\n if self.weighting == 'linear':\n weight = sum(self.freqs[i, j, self.to_index(r)] for i, r in u_ratings)\n score = sum(self.sums[i, j, self.to_index(r)] for i, r in u_ratings)\n return score / weight\n\n # self.weighting == 'log' or None\n weights = [self.freqs[i, j, self.to_index(r)] for i, r in u_ratings]\n reciprocals = [1 / w if w else 0 for w in weights]\n scores = [self.sums[i, j, self.to_index(r)] for i, r in u_ratings]\n scores = [s * w for s, w in zip(scores, reciprocals)]\n\n if self.weighting is None:\n return np.mean(scores)\n # self.weighting == 'log'\n logs = [np.log(w + 1) if w >= 1 else 0 for w in weights]\n return np.dot(scores, logs) / np.sum(logs)", "def compute_user_user_sim_base_on_common_items(self):\n self.sim_matrix = {}\n for item in self.items.values():\n # convert to list of tuples for indexing\n users = list(item.covered_users.items())\n item_popularity = len(users)\n # iter through all user pairs\n for i in range(len(users)-1):\n for j in range(i+1, len(users)):\n user_A_info, user_B_info = users[i], users[j]\n # remember to update pair wise!\n self.update_user_user_sim(user_A_info, user_B_info,\n item_popularity)\n self.update_user_user_sim(user_B_info, user_A_info,\n item_popularity)", "def compute_pmf(X_inp, rank, c1, c2):\n\n X_arr = []\n u_arr = []\n v_arr = []\n d_arr = []\n\n X = deepcopy(X_inp)\n\n v_init = np.ones((np.shape(X)[1],1))\n\n for i in range(rank):\n X_arr.append(X)\n (d,u,v) = compute_factor(X, v_init, c1, c2)\n\n\n assert abs(npla.norm(u) - 1 ) < 1e-3\n assert abs(npla.norm(v) - 1 ) < 1e-3\n\n d_arr.append(d)\n u_arr.append(u)\n v_arr.append(v)\n\n toSub = np.outer(u,v)\n assert np.shape(toSub) == np.shape(X)\n X -= d * toSub\n\n return (X_arr, u_arr, v_arr, d_arr)", "def item_based_CF(G):\n\n\tcode, pmid = nx.bipartite.sets(G)\n\tX = nx.bipartite.biadjacency_matrix(G,pmid,column_order=code)\n\tmean_X = np.mean(X,axis=0)\n\tadjusted_X = X - mean_X\n\tsimilarity = cosine_similarity(X.T)\n\trating = mean_X + np.dot(adjusted_X,similarity)/np.sum(np.abs(similarity),axis=1)\n\tdf = pd.DataFrame(data=rating,index=pmid,columns=code)\n\n\treturn df", "def predict_rating(self, user_id, item_id):\n user_preference = self.latent_user_preferences[user_id]\n item_feature = self.latent_item_features[item_id]\n return user_preference.dot(item_feature)", "def create_user_item_array(self):\n user_em = self.user_factors.weight.detach()\n item_em = self.product_factors.weight.detach()\n user_b = self.user_bias.weight.detach()\n item_b = self.product_bias.weight.detach()\n\n user_item_array = (item_em + item_b) @ (user_em + user_b).transpose(0, 1)\n preds = self._prob_to_class(user_item_array).numpy()\n\n return preds", "def als(user_ids : numpy.ndarray, item_ids : numpy.ndarray,\n ratings : numpy.ndarray, num_item_factors : int,\n num_users: int, num_items : int, min_r_decrease=0.01,\n max_iterations=200, algorithm=1):\n # allocate \"user_factors\" and \"item_factors\"\n num_user_factors = num_item_factors + 1\n user_factors = numpy.random.uniform(-1, 1, num_users * num_user_factors)\n item_factors = numpy.random.uniform(-1, 1, num_items * num_item_factors)\n\n # argument construction\n user_ids_ptr = user_ids.ctypes.data_as(ctypes.POINTER(ctypes.c_double))\n item_ids_ptr = item_ids.ctypes.data_as(ctypes.POINTER(ctypes.c_double))\n\n ratings_length = len(ratings)\n ratings_ptr = ratings.ctypes.data_as(ctypes.POINTER(ctypes.c_double))\n\n user_factors_length = len(user_factors)\n user_factors_ptr = user_factors.ctypes.data_as(ctypes.POINTER(ctypes.c_double))\n\n item_factors_length = len(item_factors)\n item_factors_ptr = item_factors.ctypes.data_as(ctypes.POINTER(ctypes.c_double))\n\n iterations = _dll.als_from_python(\n user_ids_ptr, item_ids_ptr, ratings_length, ratings_ptr,\n num_item_factors, user_factors_length, user_factors_ptr,\n item_factors_length, item_factors_ptr, ctypes.c_double(min_r_decrease),\n max_iterations, algorithm)\n\n return user_factors, item_factors, iterations", "def NMF(model, maxIter=100, beliefs=None, verbose=False):\n if beliefs is None: beliefs = [Factor([Xi],1.0/Xi.states) for Xi in model.X]\n \n lnZ = sum([beliefs[Xi].entropy() for Xi in model.X])\n for f in model.factors:\n m = f.log()\n for v in f.vars: m *= beliefs[v]\n lnZ += m.sum()\n if verbose: print(\"Iter 0: \"+str(lnZ))\n\n for t in xrange(1,maxIter+1): # for each iteration:\n # Update all the beliefs via coordinate ascent:\n for Xi in model.X: # for each variable, \n bNew = 0.0 # compute E[ log f ] as a function of Xi:\n for f in model.factorsWith(Xi,copy=False): # for each factor f_a, compute:\n m = f.log() # E[log f_a] = \\sum \\log f_a \\prod b_v\n for v in f.vars - [Xi]: m *= beliefs[v]\n bNew += m.marginal([Xi]) # sum them up to get E[log f]\n bNew -= bNew.max() # (numerical issues)\n bNew = bNew.exp()\n bNew /= bNew.sum() # set b(Xi) = exp( E[log f] ) / Z\n beliefs[Xi] = bNew\n #\n # Compute the lower bound on the partition function:\n # E_b [ log f ] + H(b) = \\sum_a E[log f_a] + \\sum_i H(b_i) for independent beliefs\n lnZ = sum([beliefs[Xi].entropy() for Xi in model.X])\n for f in model.factors:\n m = f.log()\n for v in f.vars: m *= beliefs[v]\n lnZ += m.sum()\n if verbose: print(\"Iter \"+str(t)+\": \"+str(lnZ))\n return lnZ,beliefs", "def final_recommendation_score_matrix_helper_for_different_knn_types(self, type_of_method, index_matrix, value_matrix):\n if type_of_method == \"user\":\n for i in range(index_matrix.shape[1]):\n temp = index_matrix[:,i].astype(int)\n k_similar_users_profiles = self.utility_matrix[temp]\n k_similar_users_similarity_values = value_matrix[:,i]\n self.final_score_matrix[i,:] = np.dot(k_similar_users_similarity_values,k_similar_users_profiles)\n elif type_of_method == \"item\":\n for i in range(index_matrix.shape[1]):\n temp = index_matrix[:,i].astype(int)\n k_similar_item_profiles = self.utility_matrix.T[temp]\n k_similar_item_similarity_values = value_matrix[:,i]\n self.final_score_matrix[:,i] = np.dot(k_similar_item_similarity_values,k_similar_item_profiles)", "def get_users_features_matrix(games_features_matrix: csr_matrix, users_games_matrix: csr_matrix) -> csr_matrix:\n logging.getLogger(__name__).debug('Users features matrix calculating...')\n users_features_matrix = users_games_matrix * games_features_matrix\n logging.getLogger(__name__).debug('users_features_matrix.shape: ' + str(users_features_matrix.shape))\n return users_features_matrix", "def NMF(X,r):\n nmf_mdl = nmf.NMF(X,num_bases=r)\n nmf_mdl.initialization()\n nmf_mdl.factorize()\n return nmf_mdl.W,nmf_mdl.H,np.dot(nmf_mdl.W,nmf_mdl.H)", "def fit(self, data, num_features, lambda_user, lambda_item, gamma):\n user_matrix, item_matrix = self.init_matrix(data, num_features)\n nnz_users, nnz_items = data.nonzero()\n nnz_data = list(zip(nnz_users, nnz_items))\n for it in tqdm(range(self.num_epochs)):\n gamma /= 1.1\n np.random.shuffle(nnz_data)\n for u, i in nnz_data:\n user = user_matrix[u, :]\n item = item_matrix[:, i]\n err = data[u, i] - user @ item\n user_matrix[u, :] += gamma * (err * item - lambda_user * user)\n item_matrix[:, i] += gamma * (err * user - lambda_item * item)\n\n self.user_matrix = user_matrix\n self.item_matrix = item_matrix", "def evaluate_hmdb51_fusion():\n vlen = 0\n ob_suffix = '-max.feat.npy.gz'\n fv_suffix = '_fv.npy.gz'\n ob_root = '/home/syq/research_final/data/features/ob_hmdb51_pooled_python/'\n fv_root = '/home/syq/research_final/data/dense-traj/fv_hmdb51_python/'\n hmdb_splits = 'testTrainMulti_7030_splits/'\n categories = os.listdir(fv_root)\n weight = 1.0\n weights = [i / 20.0 for i in range(21)]\n acc_to_weights = {}\n\n for weight in weights:\n print \"Weight: %.2f\" % weight\n accs = np.zeros(3)\n for splitnum in range(1,4):\n ts = time.time()\n trainfiles, testfiles = hmdb51_splits.loadsplit(categories,\n hmdb_splits,\n splitnum)\n print 'Have %d train files' % len(trainfiles)\n print 'Have %d test files' % len(testfiles)\n\n if not vlen:\n fp = gzip.open(os.path.join(ob_root,'%s%s'%(trainfiles[0][0][:-4],\n ob_suffix)),\"rb\")\n vlen_ob = len(np.load(fp))\n fp.close()\n print \"OB vector length is %d\" % vlen_ob\n fp = gzip.open(os.path.join(fv_root,'%s%s'%(trainfiles[0][0][:-4],\n fv_suffix)),\"rb\")\n vlen_fv = len(np.load(fp))\n fp.close()\n print \"IDTFV vector length is %d\" % vlen_fv\n\n Dtrain_ob = np.zeros( (len(trainfiles),vlen_ob), np.float32 )\n Dtrain_fv = np.zeros( (len(trainfiles),vlen_fv), np.float32 )\n\n Ytrain = np.ones ( (len(trainfiles) )) * -1000\n\n for fi,f in enumerate(trainfiles):\n fp = gzip.open(os.path.join(ob_root,'%s%s'%(f[0][:-4],\n ob_suffix)),\"rb\")\n Dtrain_ob[fi][:] = np.load(fp)\n fp.close()\n Ytrain[fi] = f[1]\n\n fp = gzip.open(os.path.join(fv_root,'%s%s'%(f[0][:-4],\n fv_suffix)),\"rb\")\n Dtrain_fv[fi][:] = np.load(fp)\n fp.close()\n\n Dtest_ob = np.zeros( (len(testfiles),vlen_ob), np.float32 )\n Dtest_fv = np.zeros( (len(testfiles),vlen_fv), np.float32 )\n\n Ytest = np.ones ( (len(testfiles) )) * -1000\n\n for fi,f in enumerate(testfiles):\n fp = gzip.open(os.path.join(ob_root,'%s%s'%(f[0][:-4],\n ob_suffix)),\"rb\")\n Dtest_ob[fi][:] = np.load(fp)\n fp.close()\n Ytest[fi] = f[1]\n\n fp = gzip.open(os.path.join(fv_root,'%s%s'%(f[0][:-4],\n fv_suffix)),\"rb\")\n Dtest_fv[fi][:] = np.load(fp)\n fp.close()\n\n \"\"\"\n Early fusion\n Dtrain = np.hstack((Dtrain_ob, Dtrain_fv))\n Dtest = np.hstack((Dtest_ob, Dtest_fv))\n\n clf = OneVsRestClassifier(estimator=LinearSVC(C=100), n_jobs=8)\n acc = clf.fit(Dtrain, Ytrain).score(Dtest, Ytest)\n \"\"\"\n fv_clf = OneVsRestClassifier(estimator=LinearSVC(C=100), n_jobs=8)\n\n ob_clf = OneVsRestClassifier(estimator=SVC(C=10,\n cache_size=1000,\n kernel='linear',\n probability=True),\n n_jobs=-1)\n\n # Get probabilities for late fusion\n Dtrain_fv = fv_clf.fit(Dtrain_fv, Ytrain).decision_function(Dtrain_fv)\n Dtrain_ob = ob_clf.fit(Dtrain_ob, Ytrain).decision_function(Dtrain_ob)\n Dtest_fv = fv_clf.decision_function(Dtest_fv)\n Dtest_ob = ob_clf.decision_function(Dtest_ob)\n\n # Scale decision values b/w 0 and 1\n Dtrain_fv = preprocessing.normalize(Dtrain_fv)\n Dtrain_ob = preprocessing.normalize(Dtrain_ob)\n Dtest_fv = preprocessing.normalize(Dtest_fv)\n Dtest_ob = preprocessing.normalize(Dtest_ob)\n\n # Late fusion\n scores_train = (Dtrain_fv * weight) + (Dtrain_ob * (1 - weight))\n latefusion_clf = OneVsRestClassifier(estimator=LinearSVC(C=100), n_jobs=-1)\n latefusion_clf.fit(scores_train, Ytrain)\n\n scores_test = (Dtest_fv * weight) + (Dtest_ob * (1 - weight))\n acc = latefusion_clf.score(scores_test, Ytest)\n print 'Fold', splitnum, 'late fusion acc', acc\n print \"Train & testing time %.3f\" % (time.time() - ts)\n accs[splitnum-1] = acc\n acc_to_weights[weight] = accs\n\n print \"Mean accuracy: %.3f\" % accs.mean()\n with open(\"hmdb51_weight_gridsearch.txt\", \"w\") as f:\n for weight, accs in acc_to_weights.items():\n f.write(str(weight) + str(accs) + '\\n')\n return acc_to_weights\n\n \"\"\"\n with open('fv_hmdb51_accs.txt', 'w') as f:\n f.write(\"%s\\nMean:%.3f\" % (str(accs), np.mean(accs)))\n \"\"\"", "def mrr(self):\n _test = self.drop_bad_ratings()\n merged = pd.merge(left=_test, right=self.predict, on=['user', 'item'], how='right')[\n ['user', 'item', 'rating_x', 'rating_y']]\n nott = np.vectorize(lambda x: not x)\n mrrs = []\n for user in merged.user.unique():\n frame = merged[merged.user == user].sort_values(by='rating_y', ascending=False)\n true_ratings = frame.rating_x.values\n positions = np.where(nott(np.isnan(true_ratings)))[0]\n if len(positions) > 0:\n mrrs.append(1 / (positions[0] + 1))\n else:\n mrrs.append(0)\n\n return sum(mrrs) / len(mrrs)", "def feature_processing(array2d):\n new_array2d = np.zeros([array2d.shape[0], 29])\n # items/ orders\n new_array2d[:, 0] = array2d[:, 4] / array2d[:, 3]\n # cancels / orders\n new_array2d[:, 1] = array2d[:, 5] / array2d[:, 3]\n # returns / items\n new_array2d[:, 2] = array2d[:, 6] / array2d[:, 4]\n # voucher / orders\n new_array2d[:, 3] = array2d[:, 10] / array2d[:, 3]\n # female_items / female_items + male_items\n new_array2d[:, 4] = array2d[:, 15] / ([1 if x == 0 else x for x in (array2d[:, 15] + array2d[:, 16])])\n # male_items / female_items + male_items\n new_array2d[:, 5] = array2d[:, 16] / ([1 if x == 0 else x for x in (array2d[:, 15] + array2d[:, 16])])\n # unisex_items / items\n new_array2d[:, 6] = array2d[:, 17] / array2d[:, 4]\n # wapp_items / items\n new_array2d[:, 7] = array2d[:, 18] / array2d[:, 4]\n # wftw_items / items\n new_array2d[:, 8] = array2d[:, 19] / array2d[:, 4]\n # mapp_items / items\n new_array2d[:, 9] = array2d[:, 20] / array2d[:, 4]\n # wacc_items / items\n new_array2d[:, 10] = array2d[:, 21] / array2d[:, 4]\n # macc_items / items\n new_array2d[:, 11] = array2d[:, 22] / array2d[:, 4]\n # mftw_items / items\n new_array2d[:, 12] = array2d[:, 23] / array2d[:, 4]\n # wspt_items / items\n new_array2d[:, 13] = array2d[:, 24] / array2d[:, 4]\n # mspt_items / items\n new_array2d[:, 14] = array2d[:, 25] / array2d[:, 4]\n # curvy_items / items\n # Curvy item has a strong correlation with gender, however they are very right-skewed use np.power(1/6) to smooth it\n new_array2d[:, 15] = np.power(array2d[:, 26] / array2d[:, 4], 1 / 6)\n # sacc_items / items\n new_array2d[:, 16] = array2d[:, 27] / array2d[:, 4]\n # msite_orders / orders\n new_array2d[:, 17] = array2d[:, 28] / array2d[:, 3]\n # desktop_orders / orders\n new_array2d[:, 18] = array2d[:, 29] / array2d[:, 3]\n # android_orders / orders\n new_array2d[:, 19] = array2d[:, 30] / array2d[:, 3]\n # ios_orders / orders\n new_array2d[:, 20] = array2d[:, 31] / array2d[:, 3]\n # other_device_orders / orders\n new_array2d[:, 21] = array2d[:, 32] / array2d[:, 3]\n # work_orders / orders\n new_array2d[:, 22] = array2d[:, 33] / array2d[:, 3]\n # home_orders / orders\n new_array2d[:, 23] = array2d[:, 34] / array2d[:, 3]\n # parcelpoint_orders / orders\n new_array2d[:, 24] = array2d[:, 35] / array2d[:, 3]\n # other_collection_orders / orders\n new_array2d[:, 25] = array2d[:, 36] / array2d[:, 3]\n # average_discount_onoffer\n new_array2d[:, 26] = array2d[:, 39]\n # average_discount_used\n new_array2d[:, 27] = array2d[:, 40]\n # revenue / order\n new_array2d[:, 28] = array2d[:, 41] / array2d[:, 3]\n\n # normalize by each feature\n new_array2d = normalize(new_array2d, axis=0, norm='max')\n return new_array2d", "def __init__(\n self,\n n_users,\n n_products,\n n_factors=20,\n optimizer=torch.optim.SGD,\n lr=0.001,\n l2=0,\n momentum=0,\n loss_fn=nn.BCEWithLogitsLoss,\n activation=nn.Sigmoid,\n ):\n super(NNMatrixFactorization, self).__init__()\n\n self.l2 = l2\n self.lr = lr\n self.momentum = momentum\n self.user_factors = ScaledEmbedding(n_users, n_factors)\n self.product_factors = ScaledEmbedding(n_products, n_factors)\n self.user_bias = ZeroEmbedding(n_users, 1)\n self.product_bias = ZeroEmbedding(n_products, 1)\n\n self.activation = activation()\n self.loss_fn = loss_fn()\n self.optimizer = optimizer(\n self.parameters(), lr=self.lr, weight_decay=self.l2, momentum=self.momentum\n )", "def get_scores(self):\n hist = self.confusion_matrix\n # hist = [TN,FP;FN,TP]\n acc = np.diag(hist).sum() / hist.sum()\n acc_cls = np.diag(hist) / hist.sum(axis=1)\n acc_cls = np.nanmean(acc_cls)\n iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))\n # iou = iu.sum() / self.n_classes\n mean_iou = np.nanmean(iu) # if classes = 2: iou = miou\n freq = hist.sum(axis=1) / hist.sum()\n fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()\n cls_iou = dict(zip(range(self.n_classes), iu))\n\n ##############################################\n tn = hist[0, 0]\n tp = np.diag(hist).sum() - tn\n fp = np.triu(hist, 1).sum()\n fn = np.tril(hist, -1).sum()\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n f1 = 2 * precision * recall / (precision + recall)\n\n # for medical img, img_seg \\in [0,1]\n dice = 2 * tp / (tp + tp + fn + fp)\n # dice = f1-score\n dsc = 2 * tp / (tp + fn + fp)\n # dsc = jaccard\n # voe = 2 * abs(fp + fn) / (tp + tp + fn + fp)\n # voe = 1 - dsc\n\n k2 = {\n # \"Overall Acc: \\t\": acc,\n 'Mean Acc': float(judge_nan(acc_cls)),\n # \"FreqW Acc : \\t\": fwavacc,\n 'Mean IoU': float(judge_nan(mean_iou)),\n 'F1-score': float(judge_nan(f1)),\n 'DSC': float(judge_nan(dsc)),\n 'Precision': float(judge_nan(precision)),\n 'Recall': float(judge_nan(recall)),\n }\n\n return k2", "def predict(user_id, item_id, ratings):\n\n # convert long to wide\n ratings_wide = ratings.pivot(index='user', columns='movie', values='rating')\n\n # compute user similarities\n similarities = compute_similarities(user_id, ratings_wide)\n \n prediction = predict_rating(item_id, ratings_wide, similarities, N=N_NEIGHBORS)\n \n return prediction", "def __factor_matrix(self, R, K, alpha, steps, beta, error_limit):\n # Transform regular array to numpy array\n R = numpy.array(R)\n\n # Generate P - N x K\n # Use random values to start. Best performance\n N = len(R)\n M = len(R[0])\n P = numpy.random.rand(N, K)\n\n # Generate Q - M x K\n # Use random values to start. Best performance\n Q = numpy.random.rand(M, K)\n Q = Q.T\n\n error = 0\n\n # iterate through max # of steps\n for step in xrange(steps):\n\n # iterate each cell in r\n for i in xrange(len(R)):\n for j in xrange(len(R[i])):\n if R[i][j] > 0:\n\n # get the eij (error) side of the equation\n eij = R[i][j] - numpy.dot(P[i, :], Q[:, j])\n\n for k in xrange(K):\n # (*update_rule) update pik_hat\n P[i][k] = P[i][k] + alpha * (2 * eij * Q[k][j] - beta * P[i][k])\n\n # (*update_rule) update qkj_hat\n Q[k][j] = Q[k][j] + alpha * ( 2 * eij * P[i][k] - beta * Q[k][j] )\n\n # Measure error\n error = self.__error(R, P, Q, K, beta)\n\n # Terminate when we converge\n if error < error_limit:\n break\n\n # track Q, P (learned params)\n # Q = Products x feature strength\n # P = Users x feature strength\n self.Q = Q.T\n self.P = P\n\n self.__print_fit_stats(error, N, M)", "def recommend(self, user_id, N=10):\n scores = self.user_factors[user_id] @ self.product_factors.T\n best = np.argpartition(scores, -N)[-N:]\n return sorted(zip(best, scores[best]), key=lambda x: -x[1])", "def weighted_majority_vote(c_pred,m_pred,f_pred,acc_c,acc_m,acc_f, dataset):\n c,m,f = np.argmax(c_pred),np.argmax(m_pred),np.argmax(f_pred)\n coarse = np.zeros(2)\n middle = np.zeros(4)\n fine = np.zeros(10)\n\n if dataset == 'cifar10':\n middle = np.zeros(5)\n coarse[c] = 1\n middle[m] = 1\n fine[f] = 1\n res = np.zeros(10)\n w1 = np.log(acc_c/(1.-acc_c))\n w2 = np.log(acc_m/(1.-acc_m))\n w3 = np.log(acc_f/(1.-acc_f))\n if dataset == 'cifar10':\n for i in range(10):\n if i <2:\n res[i] = w1*coarse[0] + w2*middle[0] + w3*fine[i]\n elif 2<=i <4:\n res[i] = w1*coarse[0] + w2*middle[1] + w3*fine[i]\n elif 4 <=i<6:\n res[i] = w1*coarse[1] + w2*middle[2] + w3*fine[i]\n elif 6<=i<8:\n res[i] = w1*coarse[1] + w2*middle[3] + w3*fine[i]\n else:\n res[i] = w1*coarse[1] + w2*middle[4] + w3*fine[i]\n else :\n for i in range(10):\n if i <3:\n res[i] = w1*coarse[0] + w2*middle[0] + w3*fine[i]\n elif 3<=i <5:\n res[i] = w1*coarse[0] + w2*middle[1] + w3*fine[i]\n elif 5 <=i<8:\n res[i] = w1*coarse[1] + w2*middle[2] + w3*fine[i]\n else:\n res[i] = w1*coarse[1] + w2*middle[3] + w3*fine[i]\n index = np.argmax(res)\n return(index)", "def _compute_mu_factor2(*input_mols):\n mu_factor = 1\n for mol in input_mols:\n mu_factor *= np.prod(fact(mol))\n return mu_factor", "def update_model(self):\n temp_uservisits = self._uservisits\n unique_users = temp_uservisits['userid'].unique()\n for itemidx, itemid in self._allitems.iterrows():\n self.__itemid2idx[str(itemid['itemid'])] = itemidx\n self.__itemidx2id[itemidx] = str(itemid['itemid'])\n for useridx, userid in enumerate(unique_users):\n self.__userid2idx[str(userid)] = useridx\n self.__useridx2id[useridx] = str(userid)\n useritem = set(temp_uservisits[temp_uservisits['userid'] == userid]['itemid'].astype('str').values)\n allitem = set(self.__itemid2idx.keys())\n itemsnotinuser = allitem - useritem\n self.__useridx2nvitems[useridx] = list(itemsnotinuser)\n temp = pandas.DataFrame([{\"userid\": userid, \"itemid\": t, \"rating\": 0, \"timestamp\": \"NA\"} for t in itemsnotinuser])\n temp_uservisits = pandas.concat([temp_uservisits, temp])\n userid = temp_uservisits['userid'].values\n itemid = temp_uservisits['itemid'].values\n rating = temp_uservisits['rating'].values\n useridx = [self.__userid2idx[str(int(uid))] for uid in userid]\n itemidx = [self.__itemid2idx[str(int(iid))] for iid in itemid]\n model = self.__get_model(num_users=len(temp_uservisits['userid'].unique()), num_items=len(self._allitems),\n mf_dim=self.__numtopics)\n if self.__learner.lower() == \"adagrad\":\n model.compile(optimizer=Adagrad(lr=self.__learningrate), loss='binary_crossentropy')\n elif self.__learner.lower() == \"rmsprop\":\n model.compile(optimizer=RMSprop(lr=self.__learningrate), loss='binary_crossentropy')\n elif self.__learner.lower() == \"adam\":\n model.compile(optimizer=Adam(lr=self.__learningrate), loss='binary_crossentropy')\n else:\n model.compile(optimizer=SGD(lr=self.__learningrate), loss='binary_crossentropy')\n\n for epoch in range(self.__numiterations):\n t1 = time.time()\n hist = model.fit([numpy.array(useridx), numpy.array(itemidx)], numpy.array(rating),\n batch_size=self.__batchsize, nb_epoch=1, verbose=0, shuffle=True)\n t2 = time.time()\n self.__recommender1 = model\n rating = list(map(numpy.double, rating))\n self.__itemuser = csr_matrix((rating, (useridx, itemidx)), shape=(len(set(useridx)), len(set(itemidx))))\n self.__recommender2 = implicit.als.AlternatingLeastSquares(factors=self.__numtopics)\n self.__recommender2.fit(self.__itemuser)", "def create_matrix(ratings_df, jokes_df):\r\n \"\"\" note: empty entries are populated with zeros \"\"\"\r\n\r\n matrix_handler = matrix_object()\r\n\r\n num_joke_features = 5\r\n\r\n ''' add all joke features '''\r\n for row_idx in range(0, jokes_df.shape[0]):\r\n joke_idx = int(jokes_df.iloc[row_idx][\"Idx\"])\r\n isAggressive = jokes_df.iloc[row_idx][\"isAggressive\"]\r\n isIncongruence = jokes_df.iloc[row_idx][\"isIncongruence\"]\r\n generation = jokes_df.iloc[row_idx][\"Generation\"]\r\n isMillenial = (generation == \"Millenial\")\r\n isGenX = (generation == \"Gen X\")\r\n isGenZ = (generation == \"Gen Z\")\r\n\r\n if(int(isMillenial) == 1.0 and int(isGenX) == 1.0):\r\n raise Valueerror()\r\n\r\n matrix_handler.add_value(joke_idx - 1, 0, int(isAggressive))\r\n matrix_handler.add_value(joke_idx - 1, 1, int(isIncongruence))\r\n matrix_handler.add_value(joke_idx - 1, 2, int(isMillenial))\r\n matrix_handler.add_value(joke_idx - 1, 3, int(isGenX))\r\n matrix_handler.add_value(joke_idx - 1, 4, int(isGenZ))\r\n\r\n ''' add all ratings '''\r\n for row_idx in range(0, ratings_df.shape[0]):\r\n for joke_idx in range(1, 122):\r\n col_name = \"joke\" + str(joke_idx)\r\n matrix_handler.add_value(joke_idx - 1, row_idx + num_joke_features, ratings_df.iloc[row_idx][col_name])\r\n\r\n matrix = matrix_handler.compile_matrix()\r\n new_df = matrix_handler.to_df(matrix)\r\n\r\n return matrix, new_df", "def feature_selection_classifier_3(array2d):\n newArray2d = np.zeros([array2d.shape[0], 18])\n # female_items / female_items + male_items\n newArray2d[:, 0] = array2d[:, 4]\n # male_items / female_items + male_items\n newArray2d[:, 1] = array2d[:, 5]\n # wapp_items / items\n newArray2d[:, 2] = array2d[:, 7]\n # wftw_items / items\n newArray2d[:, 3] = array2d[:, 8]\n # mapp_items / items\n newArray2d[:, 4] = array2d[:, 9]\n # wacc_items / items\n newArray2d[:, 5] = array2d[:, 10]\n # macc_items / items\n newArray2d[:, 6] = array2d[:, 11]\n # mftw_items / items\n newArray2d[:, 7] = array2d[:, 12]\n # curvy_items / items\n newArray2d[:, 8] = array2d[:, 15]\n # msite_orders / orders\n newArray2d[:, 9] = array2d[:, 17]\n # desktop_orders / orders\n newArray2d[:, 10] = array2d[:, 18]\n # android_orders / orders\n newArray2d[:, 11] = array2d[:, 19]\n # ios_orders / orders\n newArray2d[:, 12] = array2d[:, 20]\n # other_device_orders / orders\n newArray2d[:, 13] = array2d[:, 21]\n # home_orders / orders\n newArray2d[:, 14] = array2d[:, 23]\n # other_collection_orders / orders\n newArray2d[:, 15] = array2d[:, 25]\n # average_discount_onoffer\n newArray2d[:, 16] = array2d[:, 26]\n # average_discount_used\n newArray2d[:, 17] = array2d[:, 27]\n return newArray2d", "def fit(self):\n\n rmse_old = .0\n for epoch in range(self.epochs):\n error_final = .0\n\n for user, item, feedback in self.feedback_triples:\n pu = self.p[user] + self.y_sum_rows(user)\n\n # Calculate error\n eui = feedback - self._predict_svd_plus_plus_score(user, item, pu, False)\n error_final += (eui ** 2.0)\n\n # update bu and bi\n self.bu[user] += self.bias_learn_rate * (eui - self.delta_bias * self.bu[user])\n self.bi[item] += self.bias_learn_rate * (eui - self.delta_bias * self.bi[item])\n\n # Adjust the factors\n norm_eui = eui / self.n_u[user]\n\n i_f = self.q[item]\n\n # Compute factor updates\n delta_u = np.subtract(np.multiply(eui, i_f), np.multiply(self.delta, self.p[user]))\n self.p[user] += np.multiply(self.learn_rate, delta_u)\n\n delta_i = np.subtract(np.multiply(eui, pu), np.multiply(self.delta, i_f))\n self.q[item] += np.multiply(self.learn_rate, delta_i)\n\n # update y (implicit factor)\n common_update = norm_eui * i_f\n\n for j in self.items_id_seen_by_user[user]:\n delta_y = np.subtract(common_update, self.delta * self.y[j])\n self.y[j] += self.learn_rate * delta_y\n\n rmse_new = np.sqrt(error_final / self.train_set[\"number_interactions\"])\n\n if np.fabs(rmse_new - rmse_old) <= self.stop_criteria:\n break\n else:\n rmse_old = rmse_new", "def ndpm(self):\n\n merged = pd.merge(left=self.test, right=self.predict, on=['user', 'item'], how='inner')[\n ['user', 'rating_x', 'rating_y']]\n ndpms = []\n for user in merged.user.unique():\n frame = merged[merged.user == user]\n if frame.shape[0] <= 1:\n continue\n C_plus = self.num_of_ordered_positive(frame, 'rating_x', 'rating_y')\n C_minus = self.num_of_ordered_negative(frame, 'rating_x', 'rating_y')\n C_u = self.num_of_ordered(frame, 'rating_x')\n if C_u == 0:\n continue\n C_s = self.num_of_ordered(frame, 'rating_y')\n C_u0 = C_u - (C_plus + C_minus)\n ndpms.append(1 - (C_minus + 0.5 * C_u0) / C_u)\n\n return sum(ndpms) / len(ndpms)", "def gen_W(users, items, ratings):\n\n # initiate graph\n user = users.tolist()\n item = items.tolist()\n rating = ratings.tolist()\n B = nx.Graph()\n B.add_nodes_from(user, bipartite=0)\n B.add_nodes_from(item, bipartite=1)\n\n # create edges\n for i in range(len(user)):\n B.add_edges_from([(user[i], item[i])])\n B[user[i]][item[i]]['weight'] = rating[i]\n\n users_unique = users.unique() \n items_unique = items.unique()\n\n # BiAdjacency matrix - for bipartite network\n W = biadjacency_matrix(B, users_unique,items_unique).toarray()\n\n # sparce form of Biadjacency matrix\n W = spa.csr_matrix(W)\n print('Shape of W: '+str(W.shape))\n\n return W, users_unique, items_unique", "def alternate_ls (u_num, Y, P, C, reg):\n\n\t# get # of items/users and # of latent factors\n\t[i_num, f_num] = Y.shape\n\n\t# output buffer\n\tX = np.zeros((u_num, f_num))\n\n\t# precalculate YtY to improve the performance\n\tYtY = Y.T * Y\n\n\t# iterate over each user/item\n\tfor u in range(u_num):\n\n\t\t# store the diagonal elements of the matrix Cu discussed in the paper in a vector\n\t\tCu = C[u,:]\n\n\t\t# store the coresponding row/column of the preference matrix\n\t\tPu = P[u,:]\n\n\t\t# compute Cu-I\n\t\tCu_I = Cu - 1\n\n\t\t# calculate Yt(Cu-I)Y\n\t\tYtCu_IY = np.zeros((f_num, f_num))\n\t\tCuIY = np.multiply(Y, Cu_I.T) # weight each row of Y with Cu-I\n\t\tfor row in range(f_num):\n\t\t\tfor col in range(f_num):\n\t\t\t\tYtCu_IY[row,col] = Y[:,row].T * CuIY[:,col]\n\t\t\n\t\t# left term : ((YtCuY + regI)^(-1)) = (YtY + Yt(Cu-I)Y + regI)^(-1)\n\t\tleft_inv = YtY + YtCu_IY + reg*np.eye(f_num)\n\t\tleft = np.linalg.inv(left_inv)\n\n\t\t# right term : YtCuPu\n\t\tright = Y.T * np.multiply(Cu.T, Pu.T)\n\n\t\t# compute the latent factor of the user/item\n\t\tx = left * right\n\t\t\n\t\t# store it in a matrix\n\t\tX[u,:] = x.T\n\n\t# return an MxF or NxF matrix\n\treturn np.matrix(X)", "def calc_score(self, user_id, item_id): \n p = np.dot(self.U[user_id], self.V[item_id])\n if self.trunc_score_rule==None:pass\n else: p=self.trunc_score_rule(p)\n \n return p", "def evaluate_ucf50_fusion():\n accs = np.zeros(3)\n ob_suffix = '-max.feat.npy.gz'\n fv_suffix = '_fv.npy.gz'\n ob_root = '/home/syq/research_final/data/features/ob_ucf50_pooled_python/'\n fv_root = '/home/syq/research_final/data/features/fv_ucf50_python/'\n fv_groups, full, sets = utility.split_data(fv_root,\n suffix=fv_suffix,\n useLooCV=False)\n\n ob_groups, _, _ = utility.split_data(ob_root,\n suffix=ob_suffix,\n useLooCV=False)\n weights = [i / 20.0 for i in range(8, 13)]\n acc_to_weights = {}\n\n for weight in weights:\n print \"Weight: %.2f\" % weight\n accs = np.zeros(2)\n for i in xrange(2):\n ts = time.time()\n Dtrain_fv, Dtest_fv, Ytrain, Ytest = utility.load_groups(\n fv_groups, np.setdiff1d(full, sets[i]),\n sets[i], scale=False, verbose=False)\n\n Dtrain_ob, Dtest_ob, Ytrain, Ytest = utility.load_groups(\n ob_groups, np.setdiff1d(full, sets[i]),\n sets[i], scale=False, verbose=False)\n\n fv_clf = OneVsRestClassifier(estimator=LinearSVC(C=100), n_jobs=8)\n\n ob_clf = OneVsRestClassifier(estimator=SVC(C=10,\n cache_size=1000,\n kernel='linear',\n probability=True),\n n_jobs=-1)\n\n # Get probabilities for late fusion\n Dtrain_fv = fv_clf.fit(Dtrain_fv, Ytrain).decision_function(Dtrain_fv)\n Dtrain_ob = ob_clf.fit(Dtrain_ob, Ytrain).decision_function(Dtrain_ob)\n Dtest_fv = fv_clf.decision_function(Dtest_fv)\n Dtest_ob = ob_clf.decision_function(Dtest_ob)\n\n # Scale decision values b/w 0 and 1\n Dtrain_fv = preprocessing.normalize(Dtrain_fv)\n Dtrain_ob = preprocessing.normalize(Dtrain_ob)\n Dtest_fv = preprocessing.normalize(Dtest_fv)\n Dtest_ob = preprocessing.normalize(Dtest_ob)\n\n # weighted averaging\n scores_train = (Dtrain_fv * weight) + (Dtrain_ob * (1 - weight))\n latefusion_clf = OneVsRestClassifier(estimator=LinearSVC(C=100), n_jobs=-1)\n latefusion_clf.fit(scores_train, Ytrain)\n\n scores_test = (Dtest_fv * weight) + (Dtest_ob * (1 - weight))\n latefusion_acc = latefusion_clf.score(scores_test, Ytest)\n print 'Fold', i, 'late fusion acc', latefusion_acc\n print \"Train & testing time %.3f\" % (time.time() - ts)\n accs[i] = latefusion_acc\n\n print \"Mean accuracy: %.3f\" % accs.mean()\n with open(\"fv_ucf50_accs_5fold_gridsearch.txt\", \"w\") as f:\n for weight, accs in acc_to_weights.items():\n f.write(str(weight) + str(accs) + '\\n')\n return acc_to_weights", "def _predict_user_item(self, user, item):\n if not isinstance(user, int):\n user = self._user_to_ndx[user]\n if not isinstance(item, int):\n item = self._item_to_ndx[item]\n\n try:\n rating_mean = self._averages[user]\n except AttributeError:\n raise RuntimeError('Must fit before predicting')\n\n other_users = [other for other in self._users if other != user and\n np.isfinite(self._votes[other][item])]\n weights = np.array([self._weight(user, other)\n for other in other_users])\n deviations = np.array([self._votes[other][item] - self._averages[other]\n for other in other_users])\n\n weight_sum = np.sum(np.absolute(weights))\n if weight_sum < _EPSILON:\n return rating_mean # No similar users, so guess their avg rating\n\n norm_const = 1 / weight_sum\n\n weighted_avg = np.sum(weights * deviations)\n return rating_mean + norm_const * weighted_avg", "def construct_classifier_for_user(user_indexed_reviews, restaurant_indexed_reviews):\t\n # compute all_reviews\n all_reviews = []\n for restaurant in restaurant_indexed_reviews:\n reviews_content = ''\n for user in restaurant_indexed_reviews[restaurant]:\n reviews = restaurant_indexed_reviews[restaurant][user]\n for review in reviews:\n reviews_content += review['text'][0:len(review['text'])-1]\n all_reviews.append(reviews_content)\n\n print 'extract feature...'\n # construct classifier\n X_total, y_total, restaurant_feature = extracttfidf_user(user_indexed_reviews, all_reviews, restaurant_indexed_reviews)\n\n print 'construct classifier...'\n i = 0\n for user in user_indexed_reviews:\n print i\n i += 1\n classifier = MultinomialNB(alpha=.01)\n X_train = X_total[user]\n y_train = y_total[user]\n if X_train != None:\n try:\n classifier.fit(X_train, y_train)\n update_rating(user, restaurant_feature, classifier, user_indexed_reviews, restaurant_indexed_reviews)\n except:\n continue", "def model_frequency(ref, alt, bases_all_reads, f_vector, error_rates):\n # model Mx: NO alt allele\n if ref == alt:\n return np.array([error_rates[ref][eb] for eb in \"ACGT-\"])\n # model Mxy: two alleles\n mat = np.zeros((K, 5))\n for b, base in enumerate(\"ACGT-\"):\n for i, observed_base in enumerate(bases_all_reads):\n mat[i][b] = error_rates[base][observed_base]\n\n m2 = (mat * f_vector)[:, [bases_index[alt], bases_index[ref]]]\n alt_ref_f_vector = (m2 / m2.sum(axis=1)[:, np.newaxis]).sum(axis=0) / K\n\n l = [alt_ref_f_vector[0], ]*5\n l[bases_index[ref]] = alt_ref_f_vector[1]\n updated_f_vector = np.array(l)\n\n return updated_f_vector", "def __init__(self, customer_vendor_full, valid_rating_mean):\r\n super(User_CF, self).__init__()\r\n self.customer_vendor_full = customer_vendor_full\r\n self.customer_vendor_ratings = self.select_features()\r\n self.customer_vendor_matrix = self.customer_vendor_ratings.pivot(\r\n index='customer_id', columns='vendor_id', values='mean_rating') # (26779, 100)\r\n self.rating_matrix = self.customer_vendor_matrix.fillna(0).values.astype(np.float32)\r\n self.valid_rating_mean = valid_rating_mean\r\n self.vendor2rating = self.get_vendors_mean()\r\n self.customer_similarity, = self.get_similarity()", "def __init__(self, dim=20, nIter=5, lamb=0.05, alph=40,\n user_features=[\"user\"], item_features=[\"item\"]):\n self.setParams(dim,nIter, lamb, alph)\n self.user_features = {}\n self.item_features = {}\n self.factors = {}\n\n self.user_column_names = user_features\n self.item_column_names = item_features", "def evaluate_model(test_user_item_matrix, user_mat, portfolio_mat):\r\n n = np.count_nonzero(~np.isnan(test_user_item_matrix))\r\n\r\n # keep track of the sum of squares\r\n sse = 0\r\n\r\n for user_id in test_user_item_matrix.index:\r\n for offer_id in test_user_item_matrix.columns.values:\r\n if ~np.isnan(test_user_item_matrix.loc[user_id, offer_id]):\r\n pred = predict(test_user_item_matrix, user_mat, portfolio_mat, user_id, offer_id)\r\n if pred:\r\n diff = test_user_item_matrix.loc[user_id, offer_id] - pred\r\n sse += diff ** 2\r\n return sse / n", "def make_predictions(movies, ratings_train, ratings_test):\n ###TODO\n \n user_result = [] \n \n for index,row in ratings_test.iterrows():\n userid_test = row['userId']\n #print(\"userid_test::\",userid_test) \n movieid_test = row['movieId'] \n #print(\"movieid_test::\",movieid_test) \n x = list(movies[movies.movieId==movieid_test]['features'])[0]\n #print(\"CSR_GOTT+X::\",x)\n #print(\"TYPE of CSR_GOTT_X::\",type(x))\n subset_train = ratings_train[ratings_train.userId == userid_test]\n #print(\"SUB MOVIE SET::\",subset_train)\n #print(\"TYPE of SUB MOVIE SET::\",type(x))\n total_if_zero=0\n rating_if_zero=0\n sum_main_result=0\n sum_cosine=0 \n for index1,row1 in subset_train.iterrows():\n userid_train = row1['userId']\n #print(\"userid_train::\",userid_train) \n if(userid_test == userid_train ):\n #print(\"HII IN IFFF:::\")\n movieid_train = row1['movieId']\n #print(\"movieid_train::\",movieid_train)\n rating_train = row1['rating']\n #print(\"rating_train::\",rating_train)\n total_if_zero = total_if_zero + 1 \n rating_if_zero = rating_if_zero + rating_train\n y = list(movies[movies.movieId==movieid_train]['features'])[0]\n #print(\"CSR_GOTT_Y::\",y)\n #print(\"TYPE of CSR_GOTT_Y::\",type(y))\n result_cos = cosine_sim(x,y)\n sum_main_result += result_cos * rating_train\n sum_cosine += result_cos \n \n if(sum_main_result != 0):\n user_result.append(sum_main_result/sum_cosine)\n #print(\"user_result::\",user_result) \n else:\n user_result.append(rating_if_zero / total_if_zero)\n #print(\"user_result::\",user_result) \n \n return_result_arr = np.array(user_result) \n \n return return_result_arr\n \n pass", "def predict(self, user_id, item_id):\n # DONEreturn prediction for given pair\n return self._user_factors[user_id, : ].dot(self._item_factors[item_id, :])", "def get_user_vector(self, user_id):\r\n if user_id in self.df_ratmat.index:\r\n # user from base dataset\r\n return np.array([self.df_ratmat.loc[user_id]])\r\n # construct a vector out of app user rating data\r\n movies_rated = self.get_valid_user_ratings(user_id)\r\n movie_size = self.df_ratmat.shape[1]\r\n cols = [str(i) for i in range(1, movie_size + 1)]\r\n df = pd.DataFrame(columns=cols)\r\n new_row = {}\r\n for i, r in movies_rated[['item_id', 'rating']].iterrows():\r\n new_row[str(int(r['item_id']))] = int(r['rating'])\r\n df = df.append(new_row, ignore_index=True)\r\n # mark 0 (=not rated) if not rated by the user\r\n return df.fillna(0)", "def _factorsX(self, inputs):\n return tensor.dot(inputs[0], self.wxf)", "def print_maxes(mat):\r\n u_to_likes = load_or_create(\"/Matrix/UserIdToLikes.matrix\", create_matrix_user_likes)\r\n dict_names = load_or_create('/DICT/MovieIdToName.dict', create_dict_names)\r\n dict_ecc = load_or_create('/DICT/MovieIdToItemEccentricity.dict', create_dict_ecc)\r\n user_to_ecc = load_or_create('/DICT/UserIdToUserEccentricity.dict',createDictUserIdToUserEccentricity)\r\n dict_userid_to_moviesliked = load_or_create('/DICT/UserIdToLikedMovies.dict', create_dict_user_id_to_liked_items)\r\n\r\n dict_userid_to_recommends = dict()\r\n print(\"STARTING ECC CALC\")\r\n recommends = []\r\n for i in range(int(mat.shape[0]*0.5)):\r\n row = mat.getrow(i)\r\n if len(row.nonzero()[0]) != 0:\r\n # print(u_to_likes.getrow(i).nonzero()[1])\r\n if len(u_to_likes.getrow(i).nonzero()[1])<10 and user_to_ecc[i+1]>0:\r\n # print(\"Amount of recommends:\",len(row.nonzero()[0]))\r\n row = row.toarray()[0].tolist()\r\n max_val = max(val for val in row if str(row.index(val) + 1) not in dict_userid_to_moviesliked[i+1])\r\n print('SUM is:',sum(val for val in row if str(row.index(val) + 1) not in dict_userid_to_moviesliked[i+1]))\r\n print('SUM with all is:',sum(val for val in row))\r\n\r\n index_max=row.index(max_val) + 1\r\n\r\n recommends.append(\r\n [max_val, row.index(max_val) + 1, i + 1, [i + 1 for i in u_to_likes.getrow(i).nonzero()[1]],\r\n [row.index(max_val) + 1],user_to_ecc[i+1]])\r\n\r\n recommends = sorted(recommends, key=itemgetter(0))\r\n\r\n for i in recommends[-100:]:\r\n print(\"MAX id:\", i[1])\r\n print(\"MAX val:\", i[0])\r\n print(\"Users ECC:\",i[5])\r\n print(\"for user:\", i[2])\r\n print(\"MOVIES HE ALREADY LIKED\", 50 * \"=\")\r\n item_names_print(i[3], dict_names, dict_ecc)\r\n print(\"Movie Well recommend:\" + 50 * '*')\r\n item_names_print(i[4], dict_names, dict_ecc)\r\n print(50 * \"#\")", "def test_matrix(self, tol):\n\n res_static = qml.QFT.compute_matrix(2)\n res_dynamic = qml.QFT(wires=[0, 1]).matrix()\n res_reordered = qml.QFT(wires=[0, 1]).matrix([1, 0])\n\n expected = np.array(\n [\n [0.5 + 0.0j, 0.5 + 0.0j, 0.5 + 0.0j, 0.5 + 0.0j],\n [0.5 + 0.0j, 0.0 + 0.5j, -0.5 + 0.0j, -0.0 - 0.5j],\n [0.5 + 0.0j, -0.5 + 0.0j, 0.5 - 0.0j, -0.5 + 0.0j],\n [0.5 + 0.0j, -0.0 - 0.5j, -0.5 + 0.0j, 0.0 + 0.5j],\n ]\n )\n\n assert np.allclose(res_static, expected, atol=tol, rtol=0)\n assert np.allclose(res_dynamic, expected, atol=tol, rtol=0)\n\n expected_permuted = [\n [0.5 + 0.0j, 0.5 + 0.0j, 0.5 + 0.0j, 0.5 + 0.0j],\n [0.5 + 0.0j, 0.5 - 0.0j, -0.5 + 0.0j, -0.5 + 0.0j],\n [0.5 + 0.0j, -0.5 + 0.0j, 0.0 + 0.5j, -0.0 - 0.5j],\n [0.5 + 0.0j, -0.5 + 0.0j, -0.0 - 0.5j, 0.0 + 0.5j],\n ]\n assert np.allclose(res_reordered, expected_permuted, atol=tol, rtol=0)", "def calculate_total(self, calcu_user_n=20, user_n=20, item_n=10, seed=1):\n self._split_data(seed=seed)\n self._set_top(user_n=user_n, item_n=item_n)\n test_user_list = list(set(self.test['userId'].unique()))\n user_list = [test_user_list[random.randint(0, len(test_user_list)) - 1]\n for i in range(calcu_user_n)]\n hit = 0 # Hit score\n all_recom = 0 # num of all recommendations, calculate the accuracy rate\n like_item = 0 # num of the item the user likes in the test set, calculate the recall rate\n all_recom_set = set()\n all_item = set(self.train['movieId'].unique())\n item_popular = Counter(self.train['movieId'].values)\n ret = 0\n n = 0\n print('\\nCalculate all evaluation indicators...')\n for user in tqdm(user_list):\n recom_data = self._get_recommend(user, )\n recom_item = set([data[0] for data in recom_data])\n user_item = set(\n self.test[self.test['userId'] == user]['movieId'].values)\n overlap = recom_item & user_item\n hit += len(overlap)\n like_item += len(user_item)\n all_recom += len(recom_item)\n all_recom_set.update(recom_item)\n for rec in set([data[0] for data in recom_data]):\n ret += math.log(1 + item_popular.get(rec))\n n += 1\n print('\\nCalculate over')\n print('Precision is: ', hit / (all_recom * 1.0))\n print('Recall is: ', hit / (like_item * 1.0))\n print('Coverage is: ', len(all_recom_set) / (len(all_item) * 1.0))\n print('Popularity is:', (ret / n * 1.0))", "def prediction(prediction_file_name, clusters_list, svd_use_flag):\n \n coords = misc_functions.getWindowCoords()\n \n test_users = range(coords[0], coords[2] + 1) \n test_items = range(coords[1], coords[3] + 1)\n \n #print \"len(test_users) = \", len(test_users)\n #print \"len(test_items) = \", len(test_items)\n #print \"test_items = \", test_items\n \n # this matrix to be written as result finally\n #misc_functions.step()\n prediction_matrix = zeros((len(test_users), len(test_items)), dtype = float)\n \n training_matrix = scipy.io.mmio.mmread(\"history.mtx\").tocsr()\n \n item_X_meta_matrix = scipy.io.mmio.mmread(\"../../../well_done/items-metas_global.mtx\").toarray()\n \n # getting meta matrices for corresponding using metas\n meta_ctr = 0\n meta_matrices = []\n for meta in METAS_TO_USE:\n if svd_use_flag:\n meta_matrice_file_name = \"users-\" + METAS_TO_USE[meta] + \".svd.mtx\"\n else:\n meta_matrice_file_name = \"users-\" + METAS_TO_USE[meta] + \".mtx\"\n exec(\"meta_matrices.append(scipy.io.mmio.mmread(\\\"\" + meta_matrice_file_name + \"\\\").toarray())\")\n\n #user_counter = 0\n #for user in test_users:\n for cur_cluster in clusters_list:\n \n #print \"cur_cluster[0] = \", cur_cluster[0]\n user = int (cur_cluster[0].split(\"\\t\")[1])\n #print \"user #\", user\n \n #user_metas = {} - changed to list because of problem with dimension\n user_metas = []\n \n values = zeros((len(METAS_TO_USE), len(test_items)), dtype = float)\n meta_ctr = 0\n for meta in METAS_TO_USE:\n \n #print \" meta_matrices = \", meta_matrices\n #print \" meta_matrices[meta_ctr] = \", meta_matrices[meta_ctr]\n user_vector = meta_matrices[meta_ctr][user]\n #print \" user_vector = \", user_vector\n #print \" len(user_metas) = \", len(user_metas)\n #print \" meta_ctr = \", meta_ctr\n #print \"meta = \", meta\n #misc_functions.step()\n \n # normalizing counts of visited metas to use them as weights later\n if max(user_vector) != 0:\n user_metas.append(1.0 * user_vector / max(user_vector))\n else:\n user_metas.append(zeros((len(user_vector), ), dtype = float))\n #print \" user_metas[meta_ctr] = \", user_metas[meta_ctr]\n #print \" user_metas[meta_ctr].shape = \", user_metas[meta_ctr].shape\n \n #for item in test_items:\n for cluster in cur_cluster[1 : ]:\n start_cluster_item = int(cluster.split(\"\\t\")[0])\n stop_cluster_item = int(cluster.split(\"\\t\")[2])\n \n cluster_items = range(start_cluster_item, stop_cluster_item + 1)\n \n for item in cluster_items:\n meta_value = item_X_meta_matrix[item, meta]\n \n # PRICE\n if meta == 8:\n meta_value = priceToPriceCat(meta_value)\n \n # CITY HEURISTIC\n if meta == 11:\n if user_metas[meta_ctr][meta_value - 1] < CITY_TRESHOLD:\n values[:, item - coords[1]] *= CITY_COEF\n \"\"\"\n # DAYTIME\n if meta == 17:\n meta_value = dayTime(meta_value)\n \"\"\"\n \n #print \" meta_value = \", meta_value\n #print \" item = \", item\n #step()\n values[meta_ctr][item - coords[1]] = (user_metas[meta_ctr])[meta_value - 1]\n \n \"\"\"HEURISTICS \"\"\"\n \n \n \n \n \n \"\"\"\\\\ HEURISTICS \"\"\"\n\n meta_ctr += 1\n #print \"values[:, 0:10] = \", values[:, 0:10]\n prediction_vector = numpy.sum(META_WEIGHTS * values, axis = 0)\n #print \"prediction_vector[0:10] = \", prediction_vector[0:10]\n #print \"sum(prediction_vector) = \", sum(prediction_vector)\n prediction_matrix[user - coords[0]] = prediction_vector\n \n #step()\n \n# ===== END OF MAIN CYCLE ===== \n\n result_matrix = scipy.sparse.csr_matrix(prediction_matrix)\n scipy.io.mmio.mmwrite(prediction_file_name, result_matrix, field = 'real', precision = 5)", "def test_init_ratings():\n env = FixedRating(num_users=50,\n num_items=50,\n rating_frequency=1.0,\n num_init_ratings=100)\n env.seed(0)\n _, _, ratings = env.reset()\n assert len(ratings) == 100\n for (user_id, item_id), (rating, context) in ratings.items():\n assert context.shape == (0,)\n assert user_id < 50\n assert item_id < 50\n if rating == 5.0:\n assert item_id >= 25\n else:\n assert item_id < 25", "def proba_fm(m_pred,f_pred, dataset):\n p = np.zeros(10)\n if dataset == 'cifar10':\n for i in range(10):\n if i <4:\n if i <2:\n p[i] = (m_pred[0])*(f_pred[i]/np.sum(f_pred[0:2]))\n else:\n p[i] = (m_pred[1])*(f_pred[i]/np.sum(f_pred[2:4]))\n else:\n if i <6:\n p[i] = (m_pred[2])*(f_pred[i]/np.sum(f_pred[4:6]))\n elif i <8:\n p[i] = (m_pred[3])*(f_pred[i]/np.sum(f_pred[6:8]))\n else:\n p[i] = (m_pred[4])*(f_pred[i]/np.sum(f_pred[8:]))\n else :\n for i in range(10):\n if i <5:\n if i <3:\n p[i] = (m_pred[0])*(f_pred[i]/np.sum(f_pred[0:3]))\n else:\n p[i] = (m_pred[1])*(f_pred[i]/np.sum(f_pred[3:5]))\n else:\n if i <8:\n p[i] = (m_pred[2])*(f_pred[i]/np.sum(f_pred[5:8]))\n else:\n p[i] = (m_pred[3])*(f_pred[i]/np.sum(f_pred[8:]))\n return(p)", "def _create_sparse_train_and_test(ratings, n_users, n_items):\n \n # pick a random set of data as testing data, sorted ascending\n test_set_size = len(ratings) / TEST_SET_RATIO\n test_set_idx = np.random.choice(xrange(len(ratings)), size=test_set_size, replace=False)\n test_set_idx = sorted(test_set_idx)\n \n # use the remaining data to create a training set\n ts_ratings = ratings[test_set_idx]\n tr_ratings = np.delete(ratings, test_set_idx, axis=0)\n \n # create training and test matrices as coo_matrix\n u_tr, i_tr, r_tr = zip(*tr_ratings)\n tr_sparse = coo_matrix((r_tr, (u_tr, i_tr)), shape=(n_users, n_items))\n u_ts, i_ts, r_ts = zip(*ts_ratings)\n test_sparse = coo_matrix((r_ts, (u_ts, i_ts)), shape=(n_users, n_items))\n \n return tr_sparse, test_sparse", "def prediction(uid, pair, rating_bd, sim_bd, item_bd):\n iid, real_rating = pair[0], pair[1]\n if iid not in sim_bd.value.keys():\n return ()\n iid_neighbors = [\n (i[0], i[1], rating_bd.value[i[0]]) for i in sim_bd.value[iid]]\n average_iid_rating = item_bd.value[iid][0]\n sim_rating = []\n for info in iid_neighbors:\n niid, nsim, ratings = info\n sim_rating += [\n (iid, nsim, rating[1] - item_bd.value[niid][0], rating[2])\n for rating in ratings if uid in rating[0]]\n if len(sim_rating) != 0:\n sim_ratings = [\n (line[1] * line[2], abs(line[1]), line[3])\n for line in sim_rating]\n predicted_rating_no_decay = average_iid_rating + sum(\n map(lambda line: line[0], sim_ratings)) / sum(\n map(lambda line: line[1], sim_ratings))\n predicted_rating_decay = \\\n average_iid_rating + add_decay(sim_ratings)\n else:\n predicted_rating_no_decay = average_iid_rating\n predicted_rating_decay = average_iid_rating\n return iid, real_rating, \\\n self.bound_rating(predicted_rating_no_decay), \\\n self.bound_rating(predicted_rating_decay)", "def select_next(X, U, mu,\n scoremethod='lowhigh',\n missingmethod='none',\n feature_weights=[],\n oldscores=[], oldreproj=[]):\n\n print(\"------------ SELECTING --------------\")\n if len(U) == 0:\n printt(\"Empty DEMUD model: selecting item number %d from data set\" % \\\n (opts['iitem']))\n return opts['iitem'], [], []\n\n if X.shape[1] < 1 or len(U) == 0 or len(mu) == 0:\n printt(\"Error: No data in X and/or U and/or mu.\")\n return None, [], []\n\n if X.shape[0] != U.shape[0] or X.shape[0] != mu.shape[0]:\n printt(\"Mismatch in dimensions; must have X mxn, U mxk, mu mx1.\")\n return None, [], []\n\n # If oldscores is empty, compute the score for each item\n if len(oldscores) == 0:\n (scores, reproj) = score_items(X, U, mu, scoremethod, missingmethod)\n elif len(oldreproj) == 0:\n printt(\"Error: oldscores provided, but not oldreproj.\")\n return None, [], []\n else: # both are valid, so use them here\n (scores, reproj) = (oldscores, oldreproj)\n\n # Select and return index of item with max reconstruction error,\n # plus the updated scores and reproj\n m = scores.argmax()\n #print('mu:',mu)\n #print('selected:', X[:,m])\n #print('selected-mu:', (X-mu)[:,m])\n #print('reproj:', reproj[:,m])\n #print('reproj-mu:', (reproj-mu)[:,m])\n #input()\n\n return m, scores, reproj", "def test_conversion():\r\n f1 = factor([0,1],[2,2],scipy.rand(4))\r\n f2 = factor([1,2],[2,2],scipy.rand(4))\r\n f3 = factor([3],[2],scipy.rand(2))\r\n\r\n F = FactorList([f1,f2,f3])\r\n theta = factors2ExpFam(F)\r\n F2 = expfam2Factors(theta)\r\n ratio = F2.JointDistn().val/ (F.JointDistn().val)\r\n ratio = ratio/ratio[0]\r\n print scipy.allclose(ratio,1)", "def get5x5matrix(self): #modified from nxvasc get3x3matrix()\n try:\n i = na.identity(3)\n \n self.d124 = i.copy()\n self.ds124 = na.zeros(124,na.float64)\n \n for k in range(1,124):\n self.d124 = na.concatenate((self.d124,i))\n# print len(self.d124)\n count = 0\n a = []\n for k in range(-2,3):\n for j in range(-2,3):\n for i in range(-2,3):\n if( i != 0 or j != 0 or k != 0 ):\n self.ds124[count] = math.sqrt(i**2+j**2+k**2)\n count += 1\n a.append(i)\n a.append(j)\n a.append(k)\n# print len(a)\n a = na.reshape(na.array(a),(372,1))\n# print len(self.d124)\n self.d124 = na.concatenate((self.d124,a),axis=1)\n except Exception as error:\n print(\"failed in get5x5matrix(): \", error)", "def fit(self, user_x_product, latent_features_guess=2, learning_rate=0.0002, steps=5000, regularization_penalty=0.02, convergeance_threshold=0.001):\n print 'training model...'\n return self.__factor_matrix(user_x_product, latent_features_guess, learning_rate, steps, regularization_penalty, convergeance_threshold)", "def update_factors(self,u,i,j,update_u=True,update_i=True):\n #print(\"session run\")\n loss_v = self.sess.run(self.train_step , feed_dict={\n self.u: u,\n self.i: i,\n self.j: j})\n\n returnText = \"\"\n\n if self.alg_type == \"TFL\" or self.alg_type == \"TFLWM\":\n sum_lambda = 0\n for k in self.sim_matrix_names:\n sum_lambda += abs(self.sim_lambda[k].eval())\n #print(sum_lambda,self.sim_lambda)\n for k in self.sim_matrix_names:\n if math.isnan(sum_lambda):\n print(\"sim_lambda overflow\")\n tf.assign(self.sim_lambda[k], [self.sim_lambda_zero], validate_shape=False).eval()\n returnText = \"sim_lambda overflow\"\n else:\n tf.assign(self.sim_lambda[k], self.sim_lambda[k].eval()/sum_lambda).eval()\n else:\n for k in self.sim_matrix_names:\n val = self.sim_lambda[k].eval()\n if math.isnan(val[0]):\n print(\"sim_lambda overflow\")\n tf.assign(self.sim_lambda[k], [self.sim_lambda_zero], validate_shape=False).eval()\n returnText = \"sim_lambda overflow\"\n if val[0] <= 0.0:\n tf.assign(self.sim_lambda[k], [self.delta], validate_shape=False).eval()\n elif val[0] >= 1.0:\n tf.assign(self.sim_lambda[k], [self.one - self.delta], validate_shape=False).eval()\n\n return returnText", "def calc_adv_U(self):\n num_U = 0\n adv_U = numpy.zeros((3,3), float)\n\n for atm in self:\n ## use the atom's U matrix if it exists, otherwise use the\n ## temperature factor\n\n if atm.U is not None:\n adv_U += atm.U\n num_U += 1\n\n return adv_U / num_U", "def calculate_user_similarity_profile(self, ratings_vector):\r\n num_users, num_movies = self.ratings_matrix.get_shape()\r\n\r\n user_similarities = sp.dok_matrix((1, num_users))\r\n for i in range(num_users):\r\n\r\n user_similarities[0, i] = self.calculate_pairwise_user_similarity(self.ratings_matrix.getrow(i), ratings_vector)\r\n\r\n return user_similarities.tocsr()", "def calc_metrics(metric_scores_list):\n\n N_split, N_miss, N_add, Q_P, Q_R, Q_F, N_gt, N_pred = [], [], [], [], [], [], [], []\n Q_rand, Q_jaccard, Q_aggregated_jaccard, Q_ctc, Q_piou = [], [], [], [], []\n tp, fp, fn = [], [], []\n\n for score in metric_scores_list:\n N_split.append(score['N_split']), N_miss.append(score['N_miss']), N_add.append(score['N_add'])\n Q_P.append(score['Q_P']), Q_R.append(score['Q_R']), Q_F.append(score['Q_F'])\n Q_rand.append(score['Q_rand']), Q_jaccard.append(score['Q_jaccard'])\n Q_aggregated_jaccard.append(score['Q_aggregated_jaccard'])\n if \"Q_ctc\" in score:\n Q_ctc.append(score['Q_ctc']), \n Q_piou.append(score['Q_piou'])\n N_gt.append(score['N_gt']), N_pred.append(score['N_pred'])\n tp.append(score['tp']), fp.append(score['fp']), fn.append(score['fn'])\n\n N_split, N_miss, N_add = np.array(N_split), np.array(N_miss), np.array(N_add)\n N_gt, N_pred = np.array(N_gt), np.array(N_pred)\n tp, fp, fn = np.array(tp), np.array(fp), np.array(fn)\n Q_P_macro, Q_R_macro, Q_F_macro = np.mean(np.array(Q_P)), np.mean(np.array(Q_R)), np.mean(np.array(Q_F))\n Q_P_micro = np.sum(tp) / (np.sum(tp) + np.sum(fp)) if (np.sum(tp) + np.sum(fp)) > 0 else 0\n Q_R_micro = np.sum(tp) / (np.sum(tp) + np.sum(fn)) if (np.sum(tp) + np.sum(fn)) > 0 else 0\n Q_rand_macro, Q_jaccard_macro = np.mean(np.array(Q_rand)), np.mean(np.array(Q_jaccard))\n Q_aggregated_jaccard_macro = np.mean(np.array(Q_aggregated_jaccard))\n Q_ctc_macro, Q_piou_macro = np.mean(np.array(Q_ctc)), np.mean(np.array(Q_piou))\n\n metrics = {\n 'Q_split_micro': float(np.sum(N_split) / np.sum(N_gt)),\n 'Q_split_macro': float(np.mean(N_split / N_gt)),\n 'Q_miss_micro': float(np.sum(N_miss) / np.sum(N_gt)),\n 'Q_miss_macro': float(np.mean(N_miss / N_gt)),\n 'Q_add_micro': float(np.sum(N_add) / np.sum(N_gt)),\n 'Q_add_macro': float(np.mean(N_add / N_gt)),\n 'N_gt': int(np.sum(N_gt)),\n 'N_pred': int(np.sum(N_pred)),\n 'Q_rand_macro': float(Q_rand_macro),\n 'Q_jaccard_macro': float(Q_jaccard_macro),\n 'Q_aggregated_jaccard_macro': float(Q_aggregated_jaccard_macro),\n 'Q_ctc_macro': float(Q_ctc_macro),\n 'Q_piou_macro': float(Q_piou_macro),\n 'Q_P_micro': float(Q_P_micro),\n 'Q_P_macro': float(Q_P_macro),\n 'Q_R_micro': float(Q_R_micro),\n 'Q_R_macro': float(Q_R_macro),\n 'Q_F_macro': float(Q_F_macro),\n 'Q_F_micro': float(2 * Q_P_micro * Q_R_micro / (Q_P_micro + Q_R_micro)) if (Q_P_micro + Q_R_micro) > 0 else 0\n }\n return metrics", "def multiply_fisher_factor(self, vector: jnp.ndarray) -> jnp.ndarray:\n return utils.scalar_mul(\n self.multiply_fisher_factor_unweighted(vector), jnp.sqrt(self.weight))", "def predict(self, u, i, P=None, Q=None, F=None, w=None, user_bias=None, item_bias=None):\n if P is None:\n P = self.P\n if Q is None:\n Q = self.Q\n if F is None:\n F = self.F\n if w is None:\n w = self.w\n if user_bias is None:\n user_bias = self.user_bias\n if item_bias is None:\n item_bias = self.item_bias\n\n known_user = self._known('user', u)\n known_item = self._known('item', i)\n rui_hat = self.mean\n if known_user:\n rui_hat += user_bias[u]\n if known_item:\n rui_hat += item_bias[i]\n if known_user and known_item:\n F_sum = np.sum(F[self.Fr[u], :], axis=0)\n #F_num = np.sqrt(self.Fr[u].shape[0]) # Try without sqrt\n F_num = self.Fr[u].shape[0]\n if F_num > 0:\n F_sum /= F_num\n rui_hat += (F_sum + w * P[u, :]).dot(Q[:, i])\n # Apply potential non-linearity (activation) g\n rui_hat = self.g(rui_hat)\n return rui_hat", "def build_user_user_similarity_matrix(self, event_data):\n self.compute_user_user_sim_base_on_common_items()\n self.standardize_sim_values()", "def test_lu_factor():\n\t#[A, b] = lu_read('test1.txt')\n\t# it is poor form to read an external file into a test function, as above\n\tA = np.array([\n\t\t[ 2., 3., -4., 2.],\n\t\t[-4., -5., 6., -3.],\n\t\t[ 2., 2., 1., 0.],\n\t\t[-6., -7., 14., -4.]])\t\n\tLU,p = lu_factor(A, pivot=False)\n\tLU_soln = np.array([\n\t\t[ 2, 3,-4, 2],\n\t\t[-2, 1,-2, 1],\n\t\t[ 1,-1, 3,-1],\n\t\t[-3, 2, 2, 2]])\t\n\tassert norm(LU - LU_soln) < 1.e-10\t\n\n\n\t# test 2\n\t[A2, b2] = lu_read('test2.txt')\t\t\t\t\t\t# read a matrix and RHS vector\n\tLU2,p2 = lu_factor(A2) \t\t\t\t\t\t\t\t# change display to False when LU_FACTOR working\n\tLU_soln2 = np.array([\n\t\t [0.01, 0., 0., 0., 0., 0., 0., 0., 0., 0., 1],\n\t\t [-100., 0.01, 0., 0., 0., 0., 0., 0., 0., 0., 100],\n\t\t [0., -100., 0.01, 0., 0., 0., 0., 0., 0., 0., 10000],\n\t\t [0., 0., -100., 0.01, 0., 0., 0., 0., 0., 0., 1000000],\n\t\t [0., 0., 0., -100., 0.01, 0., 0., 0., 0., 0., 100000000],\n\t\t [0., 0., 0., 0., -100., 0.01, 0., 0., 0., 0., 10000000000],\n\t\t [0., 0., 0., 0., 0., -100., 0.01, 0., 0., 0., 1000000000000],\n\t\t [0., 0., 0., 0., 0., 0., -100., 0.01, 0., 0., 100000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., -100., 0.01, 0., 10000000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., 0., -100, 0.01, 1000000000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., 0., 0., -100., 100000000000000000000]])\n\tassert norm(LU2 - LU_soln2) < 1.e-10" ]
[ "0.6282623", "0.62621325", "0.60587424", "0.6045789", "0.6040702", "0.6002245", "0.5951851", "0.59179777", "0.59059614", "0.58943605", "0.589419", "0.587945", "0.5858747", "0.58264637", "0.5798391", "0.57919794", "0.574544", "0.5737683", "0.5693354", "0.5668427", "0.56560904", "0.565145", "0.5631024", "0.5595122", "0.55878645", "0.5560108", "0.55080867", "0.5492173", "0.5471623", "0.54714626", "0.5467762", "0.54663336", "0.5445127", "0.5433413", "0.5393641", "0.53686476", "0.53676146", "0.53589565", "0.53349036", "0.53346205", "0.53162575", "0.52985704", "0.52616876", "0.5258938", "0.5252249", "0.52483475", "0.52311176", "0.52225137", "0.5220475", "0.52171636", "0.5198918", "0.51938695", "0.5181593", "0.51795137", "0.517728", "0.51635116", "0.5155167", "0.5149963", "0.51439816", "0.51246136", "0.5115969", "0.5105796", "0.5086189", "0.50842553", "0.5078923", "0.5075725", "0.5066789", "0.5055791", "0.505126", "0.5040315", "0.50370866", "0.5033983", "0.5028434", "0.5021411", "0.5020064", "0.5016048", "0.4992419", "0.49900427", "0.49797973", "0.49795952", "0.49742973", "0.49736246", "0.49720296", "0.49668035", "0.49609035", "0.49598455", "0.49560553", "0.49484748", "0.4937664", "0.49338803", "0.4931834", "0.4930841", "0.49298579", "0.49219868", "0.4919247", "0.49165457", "0.49121004", "0.49035972", "0.49034655", "0.49016014" ]
0.72323316
0
Return list of choices for combo box, each a tuple of edit text and any annotation text
def getEditChoices(self, currentText=''): return [(text, '') for text in self.formatList]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_choices(self):\n raise NotImplementedError()", "def get_choices(self):\n raise NotImplementedError()", "def choices(cls):\n # return list(map(tuple, cls.__members__.items()))\n return [(int(code), name) for name, code in cls.__members__.items()]", "def getEditChoices(self, currentText=''):\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList\n if text not in currentChoices]\n results = []\n for choice in nonChoices: # menu entries to add a choice\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('add'), choice)))\n if currentChoices:\n results.append((None, None)) # separator\n for choice in currentChoices: # menu entries to remove a choice\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('remove'), choice)))\n return results", "def choices(self, typename, value_field='description'):\n rows = self.type(typename).values('id', value_field)\n return [(r['id'], r[value_field]) for r in rows]", "def get_choices(self, instance):\n if instance.type == BaseParameter.CHOICE_TYPE:\n return [\n x.value\n for x in instance.get_typed_parameter().get_available_choices()\n ]\n else:\n return None", "def get_choices_for_model_field(cls):\n return [c[0:2] for c in cls.attr_choices]", "def initDefaultChoices(self):\n return [entry[0] for entry in self.getEditChoices()]", "def select_combo_text(cb, text, index=0):\n i = 0\n for n in cb.get_model():\n if n[index] == text:\n break\n i += 1\n cb.set_active(i)", "def application_command_autocomplete_choice_builder(name, value):\n return {\n 'name': name,\n 'value': value,\n }", "def choices(self) -> list:\n return [self.mapper(i) for i in self.keys()]", "def choices(self):\n return tuple(self._choices)", "def iter_choices(self):\n\n for pk, obj in self._get_object_list():\n if hasattr(obj, self.id):\n selected = getattr(obj, self.id)\n else:\n selected = obj in self.data\n\n yield (pk, self.get_label(obj), selected)", "def _get_choices_str(self):\n return ', '.join(\n '\"%s\"' % choice\n for choice in self.choices\n )", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')),\n (yesterday, '(%s)' % _('yesterday')),\n (tomorrow, '(%s)' % _('tomorrow'))]", "def fill_combobox_attributes(self):\n\n list_char = [\"\"]\n list_num = [\"\"]\n if self.ui.radioButton_file.isChecked():\n for a in self.attributes:\n if a['caseOrFile'] == \"file\" and a['valuetype'] == \"character\":\n list_char.append(a['name'])\n if a['caseOrFile'] == \"file\" and a['valuetype'] == \"numeric\":\n list_num.append(a['name'])\n else:\n for a in self.attributes:\n if a['caseOrFile'] == \"case\" and a['valuetype'] == \"character\":\n list_char.append(a['name'])\n if a['caseOrFile'] == \"case\" and a['valuetype'] == \"numeric\":\n list_num.append(a['name'])\n self.ui.comboBox_num_attributes.blockSignals(True)\n self.ui.comboBox_char_attributes.blockSignals(True)\n self.ui.comboBox_num_attributes.clear()\n self.ui.comboBox_char_attributes.clear()\n self.ui.comboBox_char_attributes.addItems(list_char)\n self.ui.comboBox_num_attributes.addItems(list_num)\n self.ui.comboBox_num_attributes.blockSignals(False)\n self.ui.comboBox_char_attributes.blockSignals(False)", "def getComboTerms(tuples):\t\t\t\n\t\t\t#return \"[{0}]\".format('; '.join([\"({0})\".format(','.join([text[indices[0]:indices[1]], str(indices[0])])) for indices in tuples]))\n\t\t\treturn \"{0}\".format('; '.join((\"{0}\".format(text[indices[0]:indices[1]]) for indices in tuples)))", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices", "def get_choices(cls):\n return cls.values.items()", "def _find_options(self, inputfield):\r\n elements = inputfield.findall('./options/option')\r\n return [{\r\n 'id': index,\r\n 'description': option.text,\r\n 'choice': option.get('choice')\r\n } for (index, option) in enumerate(elements)]", "def create_combo(self) -> typing.Iterable[Combo]:\n raise NotImplementedError()", "def choices(cls):\n return tuple(item.as_tuple for item in list(cls))", "def choices(self) -> list:\n return [self.mapper(item) for item in self]", "def choices(cls):\n _choices = []\n for attr in _user_attributes(cls):\n val = getattr(cls, attr)\n setattr(cls, attr[1:], val[0])\n _choices.append((val[0], val[1]))\n setattr(cls, 'CHOICES', tuple(_choices))\n return cls", "def extract_choices(element):\r\n\r\n choices = []\r\n\r\n for choice in element:\r\n if choice.tag != 'choice':\r\n raise Exception(\r\n \"[capa.inputtypes.extract_choices] Expected a <choice>\" +\r\n \"tag; got {0} instead\".format(choice.tag)\r\n )\r\n\r\n components = []\r\n choice_text = ''\r\n if choice.text is not None:\r\n choice_text += choice.text\r\n # Initialize our dict for the next content\r\n adder = {\r\n 'type': 'text',\r\n 'contents': choice_text,\r\n 'tail_text': '',\r\n 'value': ''\r\n }\r\n components.append(adder)\r\n\r\n for elt in choice:\r\n # for elements in the choice e.g. <text> <numtolerance_input>\r\n adder = {\r\n 'type': 'text',\r\n 'contents': '',\r\n 'tail_text': '',\r\n 'value': ''\r\n }\r\n tag_type = elt.tag\r\n # If the current `elt` is a <numtolerance_input> set the\r\n # `adder`type to 'numtolerance_input', and 'contents' to\r\n # the `elt`'s name.\r\n # Treat decoy_inputs and numtolerance_inputs the same in order\r\n # to prevent students from reading the Html and figuring out\r\n # which inputs are valid\r\n if tag_type in ('numtolerance_input', 'decoy_input'):\r\n # We set this to textinput, so that we get a textinput html\r\n # element.\r\n adder['type'] = 'textinput'\r\n adder['contents'] = elt.get('name')\r\n else:\r\n adder['contents'] = elt.text\r\n\r\n # Add any tail text(\"is the mean\" in the example)\r\n adder['tail_text'] = elt.tail if elt.tail else ''\r\n components.append(adder)\r\n\r\n # Add the tuple for the current choice to the list of choices\r\n choices.append((choice.get(\"name\"), components))\r\n return choices", "def initDefaultChoices(self):\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, DateFormat.dateStampStrings[1])\n return choices", "def initDefaultChoices(self):\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, TimeFormat.timeStampStrings[1])\n return choices", "def combo_callback(self, eventobj):\n print(self.combo_input.get()) # print name\n print(self.combo_input.current()) # print index", "def choices(self):\n # Needs to be implmented by subclass\n raise Exception(NotImplemented)", "def initDefaultChoices(self):\n return [text for text in self.formatList]", "def __init__(self,choices,caption='ListSelection',default=[],single=False,check=False,sort=False,*args,**kargs):\n InputDialog.__init__(self,caption=caption,items = [\n dict(name='input',value=default,itemtype='list',choices=choices,\n text='',single=single,check=check,sort=sort,*args,**kargs),\n ],)", "def _build_dropdown(options):\n return [(x, x) for x in options]", "def set_choices(self, index, choices):\n if len(choices) == 1:\n self._label(index)\n self._widgets[index][\"text\"] = str(choices[0])\n else:\n self._combo(index)\n self._widgets[index][\"values\"] = [str(t) for t in choices]\n width = max(len(str(t)) for t in choices)\n width = max(5, width)\n self._widgets[index][\"width\"] = width", "def choices() -> List[str]:\n return [t.name.upper() for t in ConfigurationVariable]", "def extract_choices(element):\r\n\r\n choices = []\r\n\r\n for choice in element:\r\n if choice.tag != 'choice':\r\n raise Exception(\r\n \"[capa.inputtypes.extract_choices] Expected a <choice> tag; got %s instead\"\r\n % choice.tag)\r\n choices.append((choice.get(\"name\"), stringify_children(choice)))\r\n return choices", "def __init__(self, \n num_fld=1, \n lab_txt=[\"1\"], \n txt_fld=[\"1\"], \n title_txt=\"test\", \n comb_txt=[],\n comb_lab_txt=[], \n comb_num=0, \n root_x=50, \n root_y=50):\n super().__init__()\n self.geometry(f'+{root_x}+{root_y}') #head=y+20px\n self.str_in=[]\n self.title(title_txt)\n if comb_txt:\n self.name=[0]*num_fld\n ent=[0]*num_fld\n self.comb=[]\n self.act=[]\n lab=[0]*num_fld\n lab_comb=[0]*comb_num\n else:\n self.name=[0]*num_fld\n ent=[0]*num_fld\n lab=[0]*num_fld\n self.comb=[]\n self.act=[]\n for i in range(num_fld):\n self.name[i]=tk.StringVar()\n ent[i]=tk.Entry(self,textvariable=self.name[i])\n ent[i].insert(0, txt_fld[i])\n lab[i] = tk.Label(self,width=15, text=lab_txt[i])\n lab[i].pack()\n ent[i].pack()\n for i in range(comb_num):\n lab_comb[i]=tk.Label(self,width=35, text=comb_lab_txt[i])\n self.comb.append(ttk.Combobox(self, values=comb_txt))\n lab_comb[i].pack()\n self.comb[i].pack()\n self.comb[i].current(1)\n\n but_ac=tk.Button(self, text=\"Accept\", command=self.ins)\n but_ac.pack()\n self.mainloop", "def input_choices_from_list(choices, text):\n no_courses_text = \"\"\"\n init will only list the courses you are enrolled in\n and there seem to be none.\n Either enrol in a course or add the course id as command line argument.\n \"\"\"\n if choices is None or len(choices) == 0:\n print(no_courses_text)\n raise SystemExit(1)\n\n digits = str(math.ceil(math.log10(len(choices))))\n format_str = '{:' + digits + 'd} {}'\n for n, c in enumerate(choices):\n print(format_str.format(n, c))\n try:\n return [int(c) for c in input(text).split()]\n except EOFError:\n return []", "def valid_options(self):\n choices = self.choices()\n\n if not choices:\n return None\n\n return [opt[0] for opt in choices]", "def combobox(self):\n return self._combo", "def widgets_from_abbreviations(self, seq):\n result = []\n for name, abbrev, default in seq:\n widget = self.widget_from_abbrev(abbrev, default)\n if not (isinstance(widget, ValueWidget) or isinstance(widget, fixed)):\n if widget is None:\n raise ValueError(\"{!r} cannot be transformed to a widget\".format(abbrev))\n else:\n raise TypeError(\"{!r} is not a ValueWidget\".format(widget))\n if not widget.description:\n widget.description = name\n widget._kwarg = name\n result.append(widget)\n return result", "def multiple_choice():\n\n return [\"MAR\", \"MAR\", \"NI\", \"NI\", \"MCAR\"]", "def get_chosen_options(user):\n user_profile = user.get_profile()\n user_application = user_profile.application\n np = user_application.np\n ocl = eval(user_application.options_selected)\n chosen_options = []\n for oc in ocl:\n chosen_options.append(Option.objects.get(opt_code=int(oc))) \n return chosen_options", "def build_comboboxes(activities, events):\n global comboboxes\n # For each activity set up a selector for an event\n\n for activity in activities:\n\n # Setup frame for better display in gui\n frame = Frame(main_window)\n frame.configure(background=\"gray30\")\n\n # Label the left column as activity in a model + \"beautify gui\"\n text = \"Activity name (model):\"\n Label(frame, text=text, bg=\"gray30\", fg=\"white\", padx=5).grid(column=0, row=0)\n Label(frame, text=activity, bg=\"gray30\", fg=\"white\").grid(column=0, row=1)\n\n # Set up the combobox for an event\n combo = Combobox(frame)\n combo['values'] = events\n\n # If activity is in events preselect the current one\n if activity in events:\n combo.current(events.index(activity))\n\n # Label the combobox and place label and box in frame\n Label(frame, text=\"Event name (log):\", bg=\"gray30\", fg=\"white\", padx=5).grid(column=1, row=0)\n combo.grid(column=1, row=1)\n\n # If the last activity in the graph is handled then do not write a separator\n if activity != activities[-1]:\n Separator(frame, orient=\"horizontal\").grid(row=2, columnspan=2, sticky=\"ew\", pady=10)\n\n comboboxes[activity] = combo\n # place the frame in the main_window\n frame.grid(column=0)", "def get_generic_choices(model, key, allow_null=False):\n CHOICES = [('', '--------')] if allow_null else []\n for i in model.objects.all().values_list(key, flat=True).distinct():\n CHOICES.append((str(i), str(i)))\n CHOICES.sort()\n\n return CHOICES", "def _combobox_choice(self, _=None):\n combobox_string = self.value_combobox.var.get()\n if combobox_string.startswith(\"Unknown: \"):\n value = int(combobox_string[len(\"Unknown: \"):])\n else:\n value = int(self.value_combobox.var.get().split(\" \")[0])\n self.master.change_field_value(self.field_name, value)", "def widgets(parameter: Parameter):\n widgets = []\n for key in parameter.keys():\n textEdit = QTextEdit()\n textEdit.setText(key)\n widgets.append(textEdit)\n if isinstance(parameter[key], Enum):\n comboBox = MyQtEnumComboBox()\n comboBox.fillValues(type(parameter[key]))\n widgets.append(comboBox)\n elif isinstance(parameter[key], bool):\n comboBox = QComboBox()\n comboBox.addItems((\"False\", \"True\"))\n widgets.append(comboBox)\n else:\n textEdit = QTextEdit()\n textEdit.setText(str(parameter[key]))\n widgets.append(textEdit)\n for widget in widgets:\n widget.setFixedHeight(30)\n return widgets", "def _getBrailleRegionsForComboBox(self, obj):\n\n self._debugGenerator(\"_getBrailleRegionsForComboBox\", obj)\n\n regions = []\n\n focusedRegionIndex = 0\n label = self._script.getDisplayedLabel(obj)\n if label and (len(label) > 0):\n regions.append(braille.Region(label + \" \"))\n focusedRegionIndex = 1\n\n # Check to see if the text is editable. If so, then we want\n # to show the text attributes (such as selection -- see bug\n # 496846 for more details).\n #\n textObj = None\n for child in obj:\n if child and child.getRole() == pyatspi.ROLE_TEXT:\n textObj = child\n if textObj and textObj.getState().contains(pyatspi.STATE_EDITABLE):\n textRegion = braille.Text(textObj)\n regions.append(textRegion)\n else:\n displayedText = self._script.getDisplayedText(obj)\n if displayedText:\n regions.append(braille.Region(displayedText))\n\n regions.append(braille.Region(\n \" \" + rolenames.getBrailleForRoleName(obj)))\n\n # Things may not have gone as expected above, so we'll do some\n # defensive programming to make sure we don't get an index out\n # of bounds.\n #\n if focusedRegionIndex >= len(regions):\n focusedRegionIndex = 0\n if len(regions) == 0:\n focusedRegion = None\n else:\n focusedRegion = regions[focusedRegionIndex]\n\n # [[[TODO: WDW - perhaps if a text area was created, we should\n # give focus to it.]]]\n #\n return [regions, focusedRegion]", "def form_SelectChoiceCallableOptions(request):\n schema = schemaish.Structure()\n schema.add('mySelect', schemaish.Integer())\n def _():\n options = [(1,'a'),(2,'b'),(3,'c')]\n for option in options:\n yield option\n\n form = formish.Form(schema, 'form')\n form['mySelect'].widget = formish.SelectChoice(_)\n return form", "def __init__(self, *args, **kwargs):\n super(TaggedContentItemForm, self).__init__(*args, **kwargs)\n wtf = Tag.objects.filter(group__system=False)\n wlist = [w for t, w in self.fields.items() if t.endswith(\"tags\")]\n choices = []\n for choice in wtf:\n choices.append((choice.id, str(choice)))\n [setattr(w, 'choices', choices) for w in wlist]", "def _comboSlot(self, select):\n select = self.sender().itemText(select)\n if qt4:\n qs = str(self.sender().property(\"dom address\").toPyObject())\n else:\n qs = str(self.sender().property(\"dom address\"))\n item = QtXml.QDomElement()\n\n ind = qs.rfind('/')\n ids = qs[ind:]\n\n item = self.qhash[qs].elem.firstChildElement(\"Item\")\n while(item.isNull() is False):\n itemName = item.firstChildElement(\"Name\")\n if(str(itemName.text()).strip() != select):\n activ = item.firstChildElement(\"Activate\")\n while(activ.isNull() is False):\n s = str(activ.text()).strip() + ids\n h = self.qhash[s]\n widget_enabled = h.elem.attribute(\"Enabled\", \"True\")\n widget_visible = h.elem.attribute(\"Visible\", \"Unknown\")\n h.widget.setEnabled(False)\n if(widget_visible != \"Unknown\"):\n h.label.hide()\n h.widget.hide()\n activ = activ.nextSiblingElement(\"Activate\")\n item = item.nextSiblingElement(\"Item\")\n\n item = self.qhash[qs].elem.firstChildElement(\"Item\")\n while(item.isNull() is False):\n itemName = item.firstChildElement(\"Name\")\n if(str(itemName.text()).strip() == select):\n activ = item.firstChildElement(\"Activate\")\n while(activ.isNull() is False):\n s = str(activ.text()).strip() + ids\n h = self.qhash[s]\n h.widget.setEnabled(True)\n h.label.show()\n h.widget.show()\n activ = activ.nextSiblingElement(\"Activate\")\n item = item.nextSiblingElement(\"Item\")", "def get_choices(self, cutoff=None):\n queryset = self.get_queryset()\n if queryset is None:\n # Ensure that field.choices returns something sensible\n # even when accessed with a read-only field.\n return {}\n\n if cutoff is not None:\n queryset = queryset[:cutoff]\n\n return OrderedDict(\n [\n (\n # This line below is modifed.\n item.pk,\n self.display_value(item),\n )\n for item in queryset\n ]\n )", "def test_rendering_combobox(qtbot):\n layer = Image(np.random.rand(8, 8))\n qtctrl = QtImageControls(layer)\n qtbot.addWidget(qtctrl)\n combo = qtctrl.renderComboBox\n opts = {combo.itemText(i) for i in range(combo.count())}\n rendering_options = {\n 'translucent',\n 'additive',\n 'iso',\n 'mip',\n 'minip',\n 'attenuated_mip',\n 'average',\n }\n assert opts == rendering_options\n # programmatically updating rendering mode updates the combobox\n layer.rendering = 'iso'\n assert combo.findText('iso') == combo.currentIndex()", "def get_choicesdata(self):\n selected_value = self.get_cleaned_value()\n choicesdata = []\n for value, label in self.get_choices_cached():\n is_selected = value == selected_value\n url = self.build_set_values_url(values=[value])\n choicesdata.append({\n 'url': url,\n 'label': label,\n 'is_selected': is_selected,\n 'dom_id': '{}_{}'.format(self.get_inputfield_dom_id(), value)\n })\n return choicesdata", "def choices(self):\n return self.__class__.get_setting_choices(self.key, **self.get_kwargs())", "def get_poll_choices(self, games: [Game]) -> [dict]:\n answer_texts = []\n for g in games:\n answer_texts.append(g.name + \" - \" + g.genre)\n answer_texts = sorted(answer_texts, key=str.lower)\n poll_choices = []\n for at in answer_texts:\n poll_choices.append({\"text\": at})\n return poll_choices", "def __str__(self):\n return \"choice_text: \" + self.choice_text", "def _parse_choices(self, text):\n choices = dict()\n\n matches = re.findall(self.choice_regex, text)\n for match in matches:\n # remove the brackets\n match = match.replace('[[', '')\n match = match.replace(']]', '')\n\n if '|' in match:\n # format is {text}|{node_id}, the text and node id are different\n text, node_id = match.split('|')\n choices[node_id] = text\n else:\n choices[match] = match\n\n return choices", "def get_choices_new_protected():\n ret = []\n ret.append( (1, _(u'Nur Community-Mitglieder dürfen neue Beiträge leisten')) )\n ret.append( (-1, _(u'Offener Zugang')) )\n return ret", "def comboBox(args: list, slot) -> QComboBox:\n comboBox = QComboBox()\n comboBox.addItems(args[0])\n comboBox.currentTextChanged.connect(slot)\n return comboBox", "def choices(self):\n\n if self._choices == None:\n self._choices = [ExperimentChoice(self, choice_name) for choice_name in self.choice_names]\n\n return self._choices", "def make_ddls( self, parent ):\n # ---- 0\n a_widget = ttk.Combobox( parent,\n width = self.arg_width,\n state = \"normal\",\n textvariable = \"self.arg_2_var\" )\n\n a_widget.bind( \"<<ComboboxSelected>>\", self.sync_ddl_0 )\n\n # AppGlobal.gui_style.style_combobox( a_widget )\n self.ddl_0_widget = a_widget\n\n # ---- 1\n a_widget = ttk.Combobox( parent,\n width = self.arg_width,\n state = \"normal\",\n textvariable = \"self.arg_2_var\" )\n\n self.ddl_1_widget = a_widget\n\n # ---- 2\n a_widget = ttk.Combobox( parent,\n width = self.arg_width + 5,\n state = \"normal\",\n textvariable = \"self.arg_2_var\" )\n\n # AppGlobal.gui_style.style_combobox( a_widget )\n self.ddl_2_widget = a_widget\n\n # self.ddl_widgets = [ self.ddl_0_widget, self.ddl_1_widget, self.ddl_2_widget, ]\n # print( self.ddl_widgets[ 0 ] == self.ddl_widgets[ 1 ])\n\n #self.load_ddl_0( )", "def __str__(self):\n return self.choice_text", "def as_choices():\n return (\n # Live is currently disabled as a choice\n # pending implementation\n (\"live\", \"Use working directory\"),\n (\"latest\", \"Use latest snapshot\"),\n (\"pinned\", \"Pinned to snapshot\"),\n )", "def comboBoxes(self):\r\n # Cities Combo Button\r\n self.comboCities = QComboBox()\r\n self.comboCities.setStyleSheet(\"\"\"\r\n font-family: times;\r\n font-size: 15px;\r\n background-color : #A8DBC5;\r\n border: 1px solid white;\r\n\r\n \"\"\")\r\n self.comboCities.addItems(\r\n ['Girón', 'Piedecuesta', 'Floridablanca', 'Bucaramanga'])\r\n self.grid.addWidget(self.comboCities, 6, 1, 1, 2)\r\n self.comboCities.setCurrentText(\"Bucaramanga\")\r\n # Payment Combo Button\r\n self.comboPayment = QComboBox()\r\n self.comboPayment.setStyleSheet(\"\"\"\r\n font-family: times;\r\n font-size: 15px;\r\n background-color : #A8DBC5;\r\n border: 1px solid white;\r\n\r\n \"\"\")\r\n self.comboPayment.addItems(['Efectivo', 'Nequi'])\r\n self.grid.addWidget(self.comboPayment, 7, 1, 1, 2)", "def get_choicesdata(self):\n # selected_value = self.get_cleaned_value()\n # choicesdata = []\n # found_selected_value = False\n # for value, label in self.get_choices():\n # is_selected = value == selected_value\n # if is_selected:\n # found_selected_value = True\n # url = self.build_set_values_url(values=[value])\n # choicesdata.append({\n # 'url': url,\n # 'label': label,\n # 'is_selected': is_selected\n # })\n choicesdata, found_selected_value = self.__make_choicesdata_list(\n choices=self.get_choices(),\n selected_value=self.get_cleaned_value())\n if not found_selected_value and len(choicesdata) > 0:\n selected_index = self.get_default_is_selected_index(choicesdata=choicesdata)\n choicesdata[selected_index]['is_selected'] = True\n return choicesdata", "def _find_options(self):\r\n elements = self.xml.findall('./options/option')\r\n return [{\r\n 'id': index,\r\n 'description': option.text,\r\n 'choice': option.get('choice')\r\n } for (index, option) in enumerate(elements)]", "def __str__(self):\n return gettext('One of %s') % self._get_choices_str()", "def initDefaultChoices(self):\n return []", "def list_selector_widget(members=None,\n preselect=None,\n entry=False,\n callback=None):\n store, i=generate_list_model(members,\n active_element=preselect)\n\n if entry:\n combobox=gtk.ComboBoxEntry(store, column=0)\n else:\n combobox=gtk.ComboBox(store)\n cell = gtk.CellRendererText()\n combobox.pack_start(cell, expand=True)\n combobox.add_attribute(cell, 'text', 0)\n combobox.add_attribute(cell, 'background', 2)\n\n combobox.set_active(-1)\n if i is None:\n i = store.get_iter_first()\n if i is not None:\n combobox.set_active_iter(i)\n\n if entry:\n def get_current_element(combo):\n try:\n return combo.get_model().get_value(combo.get_active_iter(), 1)\n except (TypeError, AttributeError):\n return unicode(combo.child.get_text())\n def set_current_element(combo, t):\n combo.child.set_text(t)\n else:\n def get_current_element(combo):\n if combo.get_active_iter() is not None:\n return combo.get_model().get_value(combo.get_active_iter(), 1)\n else:\n return None\n def set_current_element(combo, el):\n # Find the index of the element\n l=[ t[0] for t in enumerate(combo.get_model()) if t[1][1] == el ]\n if l:\n # The element is present.\n combo.set_active(l[0])\n else:\n combo.set_active_iter(combo.get_model().append( (unicode(el), el, None) ))\n\n # Bind the method to the combobox object\n combobox.get_current_element = get_current_element.__get__(combobox)\n combobox.set_current_element = set_current_element.__get__(combobox)\n\n if callback is not None:\n combobox.connect('changed', callback)\n\n return combobox", "def occurrence_choices():\n OCCURRENCE_CHOICES = [('one_off', 'One Off'), ('weekly', 'Weekly'), ('fortnightly', 'Fortnightly')]\n occurrence = forms.ChoiceField(choices=OCCURRENCE_CHOICES, widget=forms.RadioSelect())\n return occurrence", "def get_action_choices():\n from hardware.management.commands.gpio_buttons import Command\n import re\n pattern = re.compile(r'^on_(?P<name>\\w+)_press$')\n choices = []\n for member in dir(Command):\n match = pattern.match(member)\n if match:\n action = match.groupdict()['name']\n name = action.replace('_', ' ').title()\n choices.append((action, name))\n return choices", "def DrawComboBox(*args, **kwargs):\n return _gdi_.RendererNative_DrawComboBox(*args, **kwargs)", "def fill_combobox(self):\n sqlstring = \"SELECT first_name, last_name from employees WHERE enabled = 1 ORDER BY last_name ASC\"\n self.CB_employee.addItem(\"\")\n for employee in self.ms.c.execute(sqlstring):\n self.CB_employee.addItem(\" \".join((employee[0], employee[1])))\n sqlstring = \"SELECT first_name, last_name from employees WHERE enabled = 0 ORDER BY last_name ASC\"\n for employee in self.ms.c.execute(sqlstring):\n self.CB_employee.addItem(\" \".join((employee[0], employee[1])))", "def _get_choices ( self, context, path = '' ):\n choices = []\n gdc = context.get_data_context\n for name in context.data_contexts:\n next_path = path_for( path, name )\n choices.append( TemplateChoice( choice_value = next_path ) )\n choices.extend( self._get_choices( gdc( name ), next_path ) )\n \n return choices", "def get_context(self, name, value, attrs=None, choices=()):\n context = super(TriStateCheckboxSelectMultiple, self).get_context(\n name, value, attrs\n )\n\n choices = dict(it.chain(self.choices, choices))\n if value is None:\n value = dict.fromkeys(choices, False)\n else:\n value = dict(dict.fromkeys(choices, False).items() +\n value.items())\n\n context['values'] = [\n (choice, label, value[choice])\n for choice, label in choices.iteritems()\n ]\n\n return context", "def get_text_type():\n opt = ['text', 'email', 'password']\n inp = option_menu(opt, 'Select text type:')\n\n # mark text type with option\n OPTIONS['text-type'] = opt[inp]\n\n # add option to collected list\n add_to_collected('text type', opt[inp])\n\n return", "def set_dropdown_b_options(value):\n options_c = []\n if value=='C':\n options_c = [{'label': '1', 'value': '1'},\n {'label': '2', 'value': '2'}]\n if value == 'D':\n options_c = [{'label': '3', 'value': '3'},\n {'label': '4', 'value': '4'}]\n if value=='E':\n options_c = [{'label': '5', 'value': '5'},\n {'label': '6', 'value': '6'}]\n if value == 'F':\n options_c = [{'label': '7', 'value': '7'},\n {'label': '8', 'value': '8'}]\n return options_c", "def objects_to_choices(queryset):\n res = []\n for elm in queryset:\n res.append((elm.pk, unicode(elm)))\n return res", "def choice(text, choices, **kwargs):\n return click.prompt(click.style('> {}'.format(text), fg='blue', bold=True),\n type=click.Choice(choices),\n **kwargs)", "def on_comboBox_enceinte_activated(self, index):\n nom_enceinte = self.comboBox_enceinte.currentText()\n marque = [x[2] for x in self.enceintes if x[1] == nom_enceinte][0]\n n_serie = [x[4] for x in self.enceintes if x[1] == nom_enceinte][0]\n model =[x[3] for x in self.enceintes if x[1] == nom_enceinte][0]\n \n \n self.lineEdit_marque.setText(marque)\n self.lineEdit_n_serie.setText(n_serie)\n self.lineEdit_model.setText(model)", "def display_choose(self, text, choices):\n cur_index = 0\n key = None\n while key != 'KEY_NEWLINE':\n if key == 'KEY_UP':\n cur_index = max(cur_index - 1, 0)\n elif key == 'KEY_DOWN':\n cur_index = min(cur_index + 1, len(choices) - 1)\n self.stdscr.erase()\n for line in text:\n self.stdscr.addstr(f'{PADCHAR}{line}\\n')\n for index, value in enumerate(choices):\n self.stdscr.addstr('\\n')\n self.stdscr.addstr(PADCHAR)\n self.stdscr.addstr(value, color_pair(7 if index == cur_index else 1))\n self.stdscr.addstr(f'\\n\\n{PADCHAR}') \n key = self.get_key() \n return cur_index", "def list_selector(title=None,\n text=None,\n members=None,\n controller=None,\n preselect=None,\n entry=False):\n combobox = list_selector_widget(members=members,\n preselect=preselect,\n entry=entry)\n\n d = gtk.Dialog(title=title,\n parent=None,\n flags=gtk.DIALOG_DESTROY_WITH_PARENT,\n buttons=( gtk.STOCK_OK, gtk.RESPONSE_OK,\n gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL ))\n\n if text is not None:\n l=gtk.Label(text)\n l.show()\n d.vbox.add(l)\n\n d.vbox.add(combobox)\n combobox.show_all()\n\n d.connect('key-press-event', dialog_keypressed_cb)\n\n d.show()\n center_on_mouse(d)\n res=d.run()\n retval=None\n if res == gtk.RESPONSE_OK:\n retval=combobox.get_current_element()\n d.destroy()\n return retval", "def get_classes(self, code):\n \n select = v.Combobox(\n _metadata={'name':code}, \n items=self.items, \n v_model=None, \n dense=True,\n hide_details=True\n )\n \n select.observe(partial(self.store, code), 'v_model')\n \n return select", "def getOptionsNames(self) -> List[unicode]:\n ...", "def __init__(self, *args, **kwargs):\n super(ChoiceFieldType, self).__init__(*args, **kwargs)\n\n self.choices = self.get_field_info_key('choices')", "def the_option_named(text: str) -> \"SelectByText\":\n return SelectByText(text)", "def prepareAutoComplete(editor, text, charPos, lineStartCharPos,\r\n wikiDocument, settings):\r\n return []", "def autocomplete_possibilities():\n try:\n # get data sent by client\n typed_input = request.args.get('q')\n print(' ')\n print('\\n------ getting autocomplete_possibilities ------')\n print(f\"recived: input:{typed_input}\")\n\n # call the google API\n results = gmaps.places_autocomplete(typed_input)\n data = [\n {'value': r['place_id'], 'text': r['description']}\n for r in results\n ]\n\n # Pass data to the front end\n print(f'returning: {data}')\n return jsonify(data)\n\n except Exception as e:\n print(\"AJAX excepted \" + str(e))\n return str(e)", "def _validate_selects(text, response):\n answer_options = re.split(settings.MULTISELECT_DELIMITER_RE, str(text))\n choices = map(lambda choice: choice.lower(), response.event.choices)\n logger.debug('Question (%s) answer choices are: %s, given answers: %s' % (datatype, choices, answer_options))\n new_answers = copy(answer_options)\n for idx, opt in enumerate(answer_options):\n logger.debug('Trying to format (m)select answer: \"%s\"' % opt)\n try: \n #in the case that we accept numbers to indicate option selection\n opt_int = int(opt)\n if not (1 <= opt_int <= len(choices)): \n return text, 'Answer %s must be between 1 and %s' % (opt_int, len(choices))\n else:\n new_answers[idx] = str(opt_int)\n\n except ValueError: \n # in the case where we accept the actual text of the question\n logger.debug('Caught value error, trying to parse answer string choice of: %s' % choices)\n if opt.lower() not in choices:\n return text, 'Answer must be one of the choices'\n else:\n new_answers[idx] = str(choices.index(opt.lower()) + 1)\n return ' '.join(new_answers), None", "def choice(choices=[], message=\"Pick something.\", title=None):\n return dialog(\n \"choice\",\n choices=choices,\n message=message,\n title=title,\n )", "def complete_opt_format(self, text, *_):\n return [t + \" \" for t in FORMATTERS if t.startswith(text)]", "def choices(self, cl):\n # TODO: Determine if non-static choices would be cleaner here.\n # Honestly, I tried a more generic version and it was even harder to\n # follow than this version.\n yield {\n 'selected': not (self.lookup_val_gte or self.lookup_val_lt),\n 'query_string': cl.get_query_string({}, [self.lookup_kwarg_gte,\n self.lookup_kwarg_lt]),\n 'display': 'All'\n }\n\n goal = settings.FACEBOOK_CLICK_GOAL\n yield {\n 'selected': self.lookup_val_gte and not self.lookup_val_lt,\n 'query_string': cl.get_query_string({self.lookup_kwarg_gte: goal},\n [self.lookup_kwarg_lt]),\n 'display': 'Yes'\n }\n yield {\n 'selected': self.lookup_val_lt and not self.lookup_val_gte,\n 'query_string': cl.get_query_string({self.lookup_kwarg_lt: goal},\n [self.lookup_kwarg_gte]),\n 'display': 'No'\n }", "def build_options(slot, snacks):\n \n if slot == 'Fast':\n return [\n {'text': 'Pizza', 'value': 'Pizza'},\n {'text': 'Fries', 'value': 'Fries'},\n {'text': 'Franky', 'value': 'Franky'},\n {'text': 'Burger', 'value': 'Burger'},\n {'text': 'Sandwich', 'value': 'Sandwich'}\n \n \n ]\n elif slot == 'drink':\n return [\n {'text': 'Coca-Cola', 'value': 'Coca-cola'},\n {'text': 'Appy', 'value': 'Appy'},\n \n {'text': 'Beer', 'value': 'Beer'},\n {'text': 'Frooti', 'value': 'Frooti'},\n {'text': 'Pepsi', 'value': 'Pepsi'}\n \n ]", "def sortedChoices(self, inText):\n choices = self.splitText(inText)\n sortedChoices = [text for text in self.formatList if text in choices]\n if len(choices) == len(sortedChoices):\n return (sortedChoices, True)\n else:\n return (sortedChoices, False)", "def _getChoices(self, acronym):\n # get matches from acronymDB\n matches = []\n if(acronym in self.acronymDB):\n matches += self.acronymDB[acronym]\n if(acronym[-1] == \"s\" and acronym[:-1] in self.acronymDB):\n matches += self.acronymDB[acronym]\n\n # create training data\n X_train, y_train = [], []\n for definition, articleID, ignored_var in matches:\n text = self.articleDB[articleID]\n X_train.append(\n ExpansionChoice(article_id=articleID, article_text=text))\n y_train.append(definition)\n\n # create y labels to group similar acronyms\n y_labels, labelToExpansion = self._processChoices(y_train)\n\n return X_train, y_labels, labelToExpansion", "def multiple_choices(self, choices, response):\n for elem in self.method_order:\n if elem in choices:\n return [elem]\n raise NoData", "def set_dropdown_b_options(value):\n options_b = []\n if value=='A':\n options_b = [{'label': 'C', 'value': 'C'},\n {'label': 'D', 'value': 'D'}]\n if value == 'B':\n options_b = [{'label': 'E', 'value': 'E'},\n {'label': 'F', 'value': 'F'}]\n return options_b", "def choices(self, choices: Iterable[Tuple[str, str]]):\n try:\n iter(choices)\n except TypeError:\n raise TypeError(\"'choices' isn't a valid iterable\")\n\n apply_choices = []\n for i, (choice_id, choice_label) in enumerate(choices):\n apply_choices.append((str(choice_id), str(choice_label)))\n\n if len(apply_choices) < 2:\n raise ValueError(\"you need to specify at least two choices\")\n\n self._choices = apply_choices\n self.specific.refresh()\n self._selected = 0", "def before_choose_candidate_listener(self, session, task):\n choices = [PromptChoice('d', 'eDit', self.importer_edit)]\n if task.candidates:\n choices.append(PromptChoice('c', 'edit Candidates',\n self.importer_edit_candidate))\n\n return choices", "def _get_completion(self, original_text, remaining_text, options):\n if self.should_hide_completions(original_text=original_text,\n remaining_text=remaining_text,\n allowed_suffixes=(\" \", \".\")):\n return []\n\n return [(option, len(remaining_text)) for option in options\n if option.startswith(remaining_text) and not option.startswith(\"_\")]" ]
[ "0.64435107", "0.64435107", "0.6406375", "0.6369448", "0.6282898", "0.61602914", "0.61550856", "0.6096663", "0.6079173", "0.6074226", "0.599768", "0.5979722", "0.5946701", "0.59085536", "0.58665997", "0.5852769", "0.5851758", "0.5840527", "0.58387506", "0.5816007", "0.5803215", "0.5797734", "0.57951343", "0.57936877", "0.57807535", "0.5749749", "0.57457596", "0.5732043", "0.57093215", "0.57050306", "0.5695638", "0.5680274", "0.5638338", "0.5617769", "0.56013155", "0.5580123", "0.5573768", "0.55674773", "0.55656695", "0.5561425", "0.5539777", "0.5525614", "0.55243665", "0.55211055", "0.5516054", "0.55135965", "0.5486497", "0.54864573", "0.5484098", "0.5467698", "0.5450487", "0.5444694", "0.5435837", "0.5432833", "0.542561", "0.54099566", "0.5406829", "0.5394251", "0.53907686", "0.5388395", "0.53733003", "0.5353227", "0.5352402", "0.53441244", "0.5335833", "0.5330664", "0.5320152", "0.5317789", "0.53159815", "0.5291184", "0.52660507", "0.5261751", "0.52587485", "0.5247112", "0.52468276", "0.5246636", "0.52386904", "0.523807", "0.52264065", "0.52225775", "0.521434", "0.52137464", "0.5197637", "0.5192666", "0.51882684", "0.5188186", "0.5170487", "0.516354", "0.5163382", "0.5161854", "0.5159188", "0.5158065", "0.51575136", "0.51539713", "0.5146208", "0.5145707", "0.5143474", "0.5142414", "0.51338685", "0.51279086" ]
0.6981332
0
Return list of choices for combo box, each a tuple of edit text and any annotation text
def getEditChoices(self, currentText=''): currentChoices, valid = self.sortedChoices(currentText) nonChoices = [text for text in self.formatList if text not in currentChoices] results = [] for choice in nonChoices: # menu entries to add a choice allChoices = currentChoices + [choice] allChoices = [text for text in self.formatList if text in allChoices] results.append((self.editSep.join(allChoices), '(%s %s)' % (_('add'), choice))) if currentChoices: results.append((None, None)) # separator for choice in currentChoices: # menu entries to remove a choice allChoices = currentChoices[:] allChoices.remove(choice) allChoices = [text for text in self.formatList if text in allChoices] results.append((self.editSep.join(allChoices), '(%s %s)' % (_('remove'), choice))) return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def get_choices(self):\n raise NotImplementedError()", "def get_choices(self):\n raise NotImplementedError()", "def choices(cls):\n # return list(map(tuple, cls.__members__.items()))\n return [(int(code), name) for name, code in cls.__members__.items()]", "def choices(self, typename, value_field='description'):\n rows = self.type(typename).values('id', value_field)\n return [(r['id'], r[value_field]) for r in rows]", "def get_choices(self, instance):\n if instance.type == BaseParameter.CHOICE_TYPE:\n return [\n x.value\n for x in instance.get_typed_parameter().get_available_choices()\n ]\n else:\n return None", "def get_choices_for_model_field(cls):\n return [c[0:2] for c in cls.attr_choices]", "def initDefaultChoices(self):\n return [entry[0] for entry in self.getEditChoices()]", "def select_combo_text(cb, text, index=0):\n i = 0\n for n in cb.get_model():\n if n[index] == text:\n break\n i += 1\n cb.set_active(i)", "def application_command_autocomplete_choice_builder(name, value):\n return {\n 'name': name,\n 'value': value,\n }", "def choices(self) -> list:\n return [self.mapper(i) for i in self.keys()]", "def choices(self):\n return tuple(self._choices)", "def iter_choices(self):\n\n for pk, obj in self._get_object_list():\n if hasattr(obj, self.id):\n selected = getattr(obj, self.id)\n else:\n selected = obj in self.data\n\n yield (pk, self.get_label(obj), selected)", "def _get_choices_str(self):\n return ', '.join(\n '\"%s\"' % choice\n for choice in self.choices\n )", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')),\n (yesterday, '(%s)' % _('yesterday')),\n (tomorrow, '(%s)' % _('tomorrow'))]", "def fill_combobox_attributes(self):\n\n list_char = [\"\"]\n list_num = [\"\"]\n if self.ui.radioButton_file.isChecked():\n for a in self.attributes:\n if a['caseOrFile'] == \"file\" and a['valuetype'] == \"character\":\n list_char.append(a['name'])\n if a['caseOrFile'] == \"file\" and a['valuetype'] == \"numeric\":\n list_num.append(a['name'])\n else:\n for a in self.attributes:\n if a['caseOrFile'] == \"case\" and a['valuetype'] == \"character\":\n list_char.append(a['name'])\n if a['caseOrFile'] == \"case\" and a['valuetype'] == \"numeric\":\n list_num.append(a['name'])\n self.ui.comboBox_num_attributes.blockSignals(True)\n self.ui.comboBox_char_attributes.blockSignals(True)\n self.ui.comboBox_num_attributes.clear()\n self.ui.comboBox_char_attributes.clear()\n self.ui.comboBox_char_attributes.addItems(list_char)\n self.ui.comboBox_num_attributes.addItems(list_num)\n self.ui.comboBox_num_attributes.blockSignals(False)\n self.ui.comboBox_char_attributes.blockSignals(False)", "def getComboTerms(tuples):\t\t\t\n\t\t\t#return \"[{0}]\".format('; '.join([\"({0})\".format(','.join([text[indices[0]:indices[1]], str(indices[0])])) for indices in tuples]))\n\t\t\treturn \"{0}\".format('; '.join((\"{0}\".format(text[indices[0]:indices[1]]) for indices in tuples)))", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices", "def get_choices(cls):\n return cls.values.items()", "def _find_options(self, inputfield):\r\n elements = inputfield.findall('./options/option')\r\n return [{\r\n 'id': index,\r\n 'description': option.text,\r\n 'choice': option.get('choice')\r\n } for (index, option) in enumerate(elements)]", "def create_combo(self) -> typing.Iterable[Combo]:\n raise NotImplementedError()", "def choices(cls):\n return tuple(item.as_tuple for item in list(cls))", "def choices(self) -> list:\n return [self.mapper(item) for item in self]", "def choices(cls):\n _choices = []\n for attr in _user_attributes(cls):\n val = getattr(cls, attr)\n setattr(cls, attr[1:], val[0])\n _choices.append((val[0], val[1]))\n setattr(cls, 'CHOICES', tuple(_choices))\n return cls", "def extract_choices(element):\r\n\r\n choices = []\r\n\r\n for choice in element:\r\n if choice.tag != 'choice':\r\n raise Exception(\r\n \"[capa.inputtypes.extract_choices] Expected a <choice>\" +\r\n \"tag; got {0} instead\".format(choice.tag)\r\n )\r\n\r\n components = []\r\n choice_text = ''\r\n if choice.text is not None:\r\n choice_text += choice.text\r\n # Initialize our dict for the next content\r\n adder = {\r\n 'type': 'text',\r\n 'contents': choice_text,\r\n 'tail_text': '',\r\n 'value': ''\r\n }\r\n components.append(adder)\r\n\r\n for elt in choice:\r\n # for elements in the choice e.g. <text> <numtolerance_input>\r\n adder = {\r\n 'type': 'text',\r\n 'contents': '',\r\n 'tail_text': '',\r\n 'value': ''\r\n }\r\n tag_type = elt.tag\r\n # If the current `elt` is a <numtolerance_input> set the\r\n # `adder`type to 'numtolerance_input', and 'contents' to\r\n # the `elt`'s name.\r\n # Treat decoy_inputs and numtolerance_inputs the same in order\r\n # to prevent students from reading the Html and figuring out\r\n # which inputs are valid\r\n if tag_type in ('numtolerance_input', 'decoy_input'):\r\n # We set this to textinput, so that we get a textinput html\r\n # element.\r\n adder['type'] = 'textinput'\r\n adder['contents'] = elt.get('name')\r\n else:\r\n adder['contents'] = elt.text\r\n\r\n # Add any tail text(\"is the mean\" in the example)\r\n adder['tail_text'] = elt.tail if elt.tail else ''\r\n components.append(adder)\r\n\r\n # Add the tuple for the current choice to the list of choices\r\n choices.append((choice.get(\"name\"), components))\r\n return choices", "def initDefaultChoices(self):\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, DateFormat.dateStampStrings[1])\n return choices", "def initDefaultChoices(self):\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, TimeFormat.timeStampStrings[1])\n return choices", "def combo_callback(self, eventobj):\n print(self.combo_input.get()) # print name\n print(self.combo_input.current()) # print index", "def choices(self):\n # Needs to be implmented by subclass\n raise Exception(NotImplemented)", "def initDefaultChoices(self):\n return [text for text in self.formatList]", "def __init__(self,choices,caption='ListSelection',default=[],single=False,check=False,sort=False,*args,**kargs):\n InputDialog.__init__(self,caption=caption,items = [\n dict(name='input',value=default,itemtype='list',choices=choices,\n text='',single=single,check=check,sort=sort,*args,**kargs),\n ],)", "def _build_dropdown(options):\n return [(x, x) for x in options]", "def set_choices(self, index, choices):\n if len(choices) == 1:\n self._label(index)\n self._widgets[index][\"text\"] = str(choices[0])\n else:\n self._combo(index)\n self._widgets[index][\"values\"] = [str(t) for t in choices]\n width = max(len(str(t)) for t in choices)\n width = max(5, width)\n self._widgets[index][\"width\"] = width", "def choices() -> List[str]:\n return [t.name.upper() for t in ConfigurationVariable]", "def extract_choices(element):\r\n\r\n choices = []\r\n\r\n for choice in element:\r\n if choice.tag != 'choice':\r\n raise Exception(\r\n \"[capa.inputtypes.extract_choices] Expected a <choice> tag; got %s instead\"\r\n % choice.tag)\r\n choices.append((choice.get(\"name\"), stringify_children(choice)))\r\n return choices", "def __init__(self, \n num_fld=1, \n lab_txt=[\"1\"], \n txt_fld=[\"1\"], \n title_txt=\"test\", \n comb_txt=[],\n comb_lab_txt=[], \n comb_num=0, \n root_x=50, \n root_y=50):\n super().__init__()\n self.geometry(f'+{root_x}+{root_y}') #head=y+20px\n self.str_in=[]\n self.title(title_txt)\n if comb_txt:\n self.name=[0]*num_fld\n ent=[0]*num_fld\n self.comb=[]\n self.act=[]\n lab=[0]*num_fld\n lab_comb=[0]*comb_num\n else:\n self.name=[0]*num_fld\n ent=[0]*num_fld\n lab=[0]*num_fld\n self.comb=[]\n self.act=[]\n for i in range(num_fld):\n self.name[i]=tk.StringVar()\n ent[i]=tk.Entry(self,textvariable=self.name[i])\n ent[i].insert(0, txt_fld[i])\n lab[i] = tk.Label(self,width=15, text=lab_txt[i])\n lab[i].pack()\n ent[i].pack()\n for i in range(comb_num):\n lab_comb[i]=tk.Label(self,width=35, text=comb_lab_txt[i])\n self.comb.append(ttk.Combobox(self, values=comb_txt))\n lab_comb[i].pack()\n self.comb[i].pack()\n self.comb[i].current(1)\n\n but_ac=tk.Button(self, text=\"Accept\", command=self.ins)\n but_ac.pack()\n self.mainloop", "def input_choices_from_list(choices, text):\n no_courses_text = \"\"\"\n init will only list the courses you are enrolled in\n and there seem to be none.\n Either enrol in a course or add the course id as command line argument.\n \"\"\"\n if choices is None or len(choices) == 0:\n print(no_courses_text)\n raise SystemExit(1)\n\n digits = str(math.ceil(math.log10(len(choices))))\n format_str = '{:' + digits + 'd} {}'\n for n, c in enumerate(choices):\n print(format_str.format(n, c))\n try:\n return [int(c) for c in input(text).split()]\n except EOFError:\n return []", "def valid_options(self):\n choices = self.choices()\n\n if not choices:\n return None\n\n return [opt[0] for opt in choices]", "def combobox(self):\n return self._combo", "def widgets_from_abbreviations(self, seq):\n result = []\n for name, abbrev, default in seq:\n widget = self.widget_from_abbrev(abbrev, default)\n if not (isinstance(widget, ValueWidget) or isinstance(widget, fixed)):\n if widget is None:\n raise ValueError(\"{!r} cannot be transformed to a widget\".format(abbrev))\n else:\n raise TypeError(\"{!r} is not a ValueWidget\".format(widget))\n if not widget.description:\n widget.description = name\n widget._kwarg = name\n result.append(widget)\n return result", "def multiple_choice():\n\n return [\"MAR\", \"MAR\", \"NI\", \"NI\", \"MCAR\"]", "def get_chosen_options(user):\n user_profile = user.get_profile()\n user_application = user_profile.application\n np = user_application.np\n ocl = eval(user_application.options_selected)\n chosen_options = []\n for oc in ocl:\n chosen_options.append(Option.objects.get(opt_code=int(oc))) \n return chosen_options", "def build_comboboxes(activities, events):\n global comboboxes\n # For each activity set up a selector for an event\n\n for activity in activities:\n\n # Setup frame for better display in gui\n frame = Frame(main_window)\n frame.configure(background=\"gray30\")\n\n # Label the left column as activity in a model + \"beautify gui\"\n text = \"Activity name (model):\"\n Label(frame, text=text, bg=\"gray30\", fg=\"white\", padx=5).grid(column=0, row=0)\n Label(frame, text=activity, bg=\"gray30\", fg=\"white\").grid(column=0, row=1)\n\n # Set up the combobox for an event\n combo = Combobox(frame)\n combo['values'] = events\n\n # If activity is in events preselect the current one\n if activity in events:\n combo.current(events.index(activity))\n\n # Label the combobox and place label and box in frame\n Label(frame, text=\"Event name (log):\", bg=\"gray30\", fg=\"white\", padx=5).grid(column=1, row=0)\n combo.grid(column=1, row=1)\n\n # If the last activity in the graph is handled then do not write a separator\n if activity != activities[-1]:\n Separator(frame, orient=\"horizontal\").grid(row=2, columnspan=2, sticky=\"ew\", pady=10)\n\n comboboxes[activity] = combo\n # place the frame in the main_window\n frame.grid(column=0)", "def get_generic_choices(model, key, allow_null=False):\n CHOICES = [('', '--------')] if allow_null else []\n for i in model.objects.all().values_list(key, flat=True).distinct():\n CHOICES.append((str(i), str(i)))\n CHOICES.sort()\n\n return CHOICES", "def _combobox_choice(self, _=None):\n combobox_string = self.value_combobox.var.get()\n if combobox_string.startswith(\"Unknown: \"):\n value = int(combobox_string[len(\"Unknown: \"):])\n else:\n value = int(self.value_combobox.var.get().split(\" \")[0])\n self.master.change_field_value(self.field_name, value)", "def widgets(parameter: Parameter):\n widgets = []\n for key in parameter.keys():\n textEdit = QTextEdit()\n textEdit.setText(key)\n widgets.append(textEdit)\n if isinstance(parameter[key], Enum):\n comboBox = MyQtEnumComboBox()\n comboBox.fillValues(type(parameter[key]))\n widgets.append(comboBox)\n elif isinstance(parameter[key], bool):\n comboBox = QComboBox()\n comboBox.addItems((\"False\", \"True\"))\n widgets.append(comboBox)\n else:\n textEdit = QTextEdit()\n textEdit.setText(str(parameter[key]))\n widgets.append(textEdit)\n for widget in widgets:\n widget.setFixedHeight(30)\n return widgets", "def _getBrailleRegionsForComboBox(self, obj):\n\n self._debugGenerator(\"_getBrailleRegionsForComboBox\", obj)\n\n regions = []\n\n focusedRegionIndex = 0\n label = self._script.getDisplayedLabel(obj)\n if label and (len(label) > 0):\n regions.append(braille.Region(label + \" \"))\n focusedRegionIndex = 1\n\n # Check to see if the text is editable. If so, then we want\n # to show the text attributes (such as selection -- see bug\n # 496846 for more details).\n #\n textObj = None\n for child in obj:\n if child and child.getRole() == pyatspi.ROLE_TEXT:\n textObj = child\n if textObj and textObj.getState().contains(pyatspi.STATE_EDITABLE):\n textRegion = braille.Text(textObj)\n regions.append(textRegion)\n else:\n displayedText = self._script.getDisplayedText(obj)\n if displayedText:\n regions.append(braille.Region(displayedText))\n\n regions.append(braille.Region(\n \" \" + rolenames.getBrailleForRoleName(obj)))\n\n # Things may not have gone as expected above, so we'll do some\n # defensive programming to make sure we don't get an index out\n # of bounds.\n #\n if focusedRegionIndex >= len(regions):\n focusedRegionIndex = 0\n if len(regions) == 0:\n focusedRegion = None\n else:\n focusedRegion = regions[focusedRegionIndex]\n\n # [[[TODO: WDW - perhaps if a text area was created, we should\n # give focus to it.]]]\n #\n return [regions, focusedRegion]", "def form_SelectChoiceCallableOptions(request):\n schema = schemaish.Structure()\n schema.add('mySelect', schemaish.Integer())\n def _():\n options = [(1,'a'),(2,'b'),(3,'c')]\n for option in options:\n yield option\n\n form = formish.Form(schema, 'form')\n form['mySelect'].widget = formish.SelectChoice(_)\n return form", "def __init__(self, *args, **kwargs):\n super(TaggedContentItemForm, self).__init__(*args, **kwargs)\n wtf = Tag.objects.filter(group__system=False)\n wlist = [w for t, w in self.fields.items() if t.endswith(\"tags\")]\n choices = []\n for choice in wtf:\n choices.append((choice.id, str(choice)))\n [setattr(w, 'choices', choices) for w in wlist]", "def _comboSlot(self, select):\n select = self.sender().itemText(select)\n if qt4:\n qs = str(self.sender().property(\"dom address\").toPyObject())\n else:\n qs = str(self.sender().property(\"dom address\"))\n item = QtXml.QDomElement()\n\n ind = qs.rfind('/')\n ids = qs[ind:]\n\n item = self.qhash[qs].elem.firstChildElement(\"Item\")\n while(item.isNull() is False):\n itemName = item.firstChildElement(\"Name\")\n if(str(itemName.text()).strip() != select):\n activ = item.firstChildElement(\"Activate\")\n while(activ.isNull() is False):\n s = str(activ.text()).strip() + ids\n h = self.qhash[s]\n widget_enabled = h.elem.attribute(\"Enabled\", \"True\")\n widget_visible = h.elem.attribute(\"Visible\", \"Unknown\")\n h.widget.setEnabled(False)\n if(widget_visible != \"Unknown\"):\n h.label.hide()\n h.widget.hide()\n activ = activ.nextSiblingElement(\"Activate\")\n item = item.nextSiblingElement(\"Item\")\n\n item = self.qhash[qs].elem.firstChildElement(\"Item\")\n while(item.isNull() is False):\n itemName = item.firstChildElement(\"Name\")\n if(str(itemName.text()).strip() == select):\n activ = item.firstChildElement(\"Activate\")\n while(activ.isNull() is False):\n s = str(activ.text()).strip() + ids\n h = self.qhash[s]\n h.widget.setEnabled(True)\n h.label.show()\n h.widget.show()\n activ = activ.nextSiblingElement(\"Activate\")\n item = item.nextSiblingElement(\"Item\")", "def get_choices(self, cutoff=None):\n queryset = self.get_queryset()\n if queryset is None:\n # Ensure that field.choices returns something sensible\n # even when accessed with a read-only field.\n return {}\n\n if cutoff is not None:\n queryset = queryset[:cutoff]\n\n return OrderedDict(\n [\n (\n # This line below is modifed.\n item.pk,\n self.display_value(item),\n )\n for item in queryset\n ]\n )", "def test_rendering_combobox(qtbot):\n layer = Image(np.random.rand(8, 8))\n qtctrl = QtImageControls(layer)\n qtbot.addWidget(qtctrl)\n combo = qtctrl.renderComboBox\n opts = {combo.itemText(i) for i in range(combo.count())}\n rendering_options = {\n 'translucent',\n 'additive',\n 'iso',\n 'mip',\n 'minip',\n 'attenuated_mip',\n 'average',\n }\n assert opts == rendering_options\n # programmatically updating rendering mode updates the combobox\n layer.rendering = 'iso'\n assert combo.findText('iso') == combo.currentIndex()", "def get_choicesdata(self):\n selected_value = self.get_cleaned_value()\n choicesdata = []\n for value, label in self.get_choices_cached():\n is_selected = value == selected_value\n url = self.build_set_values_url(values=[value])\n choicesdata.append({\n 'url': url,\n 'label': label,\n 'is_selected': is_selected,\n 'dom_id': '{}_{}'.format(self.get_inputfield_dom_id(), value)\n })\n return choicesdata", "def choices(self):\n return self.__class__.get_setting_choices(self.key, **self.get_kwargs())", "def get_poll_choices(self, games: [Game]) -> [dict]:\n answer_texts = []\n for g in games:\n answer_texts.append(g.name + \" - \" + g.genre)\n answer_texts = sorted(answer_texts, key=str.lower)\n poll_choices = []\n for at in answer_texts:\n poll_choices.append({\"text\": at})\n return poll_choices", "def __str__(self):\n return \"choice_text: \" + self.choice_text", "def _parse_choices(self, text):\n choices = dict()\n\n matches = re.findall(self.choice_regex, text)\n for match in matches:\n # remove the brackets\n match = match.replace('[[', '')\n match = match.replace(']]', '')\n\n if '|' in match:\n # format is {text}|{node_id}, the text and node id are different\n text, node_id = match.split('|')\n choices[node_id] = text\n else:\n choices[match] = match\n\n return choices", "def get_choices_new_protected():\n ret = []\n ret.append( (1, _(u'Nur Community-Mitglieder dürfen neue Beiträge leisten')) )\n ret.append( (-1, _(u'Offener Zugang')) )\n return ret", "def comboBox(args: list, slot) -> QComboBox:\n comboBox = QComboBox()\n comboBox.addItems(args[0])\n comboBox.currentTextChanged.connect(slot)\n return comboBox", "def choices(self):\n\n if self._choices == None:\n self._choices = [ExperimentChoice(self, choice_name) for choice_name in self.choice_names]\n\n return self._choices", "def make_ddls( self, parent ):\n # ---- 0\n a_widget = ttk.Combobox( parent,\n width = self.arg_width,\n state = \"normal\",\n textvariable = \"self.arg_2_var\" )\n\n a_widget.bind( \"<<ComboboxSelected>>\", self.sync_ddl_0 )\n\n # AppGlobal.gui_style.style_combobox( a_widget )\n self.ddl_0_widget = a_widget\n\n # ---- 1\n a_widget = ttk.Combobox( parent,\n width = self.arg_width,\n state = \"normal\",\n textvariable = \"self.arg_2_var\" )\n\n self.ddl_1_widget = a_widget\n\n # ---- 2\n a_widget = ttk.Combobox( parent,\n width = self.arg_width + 5,\n state = \"normal\",\n textvariable = \"self.arg_2_var\" )\n\n # AppGlobal.gui_style.style_combobox( a_widget )\n self.ddl_2_widget = a_widget\n\n # self.ddl_widgets = [ self.ddl_0_widget, self.ddl_1_widget, self.ddl_2_widget, ]\n # print( self.ddl_widgets[ 0 ] == self.ddl_widgets[ 1 ])\n\n #self.load_ddl_0( )", "def __str__(self):\n return self.choice_text", "def as_choices():\n return (\n # Live is currently disabled as a choice\n # pending implementation\n (\"live\", \"Use working directory\"),\n (\"latest\", \"Use latest snapshot\"),\n (\"pinned\", \"Pinned to snapshot\"),\n )", "def comboBoxes(self):\r\n # Cities Combo Button\r\n self.comboCities = QComboBox()\r\n self.comboCities.setStyleSheet(\"\"\"\r\n font-family: times;\r\n font-size: 15px;\r\n background-color : #A8DBC5;\r\n border: 1px solid white;\r\n\r\n \"\"\")\r\n self.comboCities.addItems(\r\n ['Girón', 'Piedecuesta', 'Floridablanca', 'Bucaramanga'])\r\n self.grid.addWidget(self.comboCities, 6, 1, 1, 2)\r\n self.comboCities.setCurrentText(\"Bucaramanga\")\r\n # Payment Combo Button\r\n self.comboPayment = QComboBox()\r\n self.comboPayment.setStyleSheet(\"\"\"\r\n font-family: times;\r\n font-size: 15px;\r\n background-color : #A8DBC5;\r\n border: 1px solid white;\r\n\r\n \"\"\")\r\n self.comboPayment.addItems(['Efectivo', 'Nequi'])\r\n self.grid.addWidget(self.comboPayment, 7, 1, 1, 2)", "def get_choicesdata(self):\n # selected_value = self.get_cleaned_value()\n # choicesdata = []\n # found_selected_value = False\n # for value, label in self.get_choices():\n # is_selected = value == selected_value\n # if is_selected:\n # found_selected_value = True\n # url = self.build_set_values_url(values=[value])\n # choicesdata.append({\n # 'url': url,\n # 'label': label,\n # 'is_selected': is_selected\n # })\n choicesdata, found_selected_value = self.__make_choicesdata_list(\n choices=self.get_choices(),\n selected_value=self.get_cleaned_value())\n if not found_selected_value and len(choicesdata) > 0:\n selected_index = self.get_default_is_selected_index(choicesdata=choicesdata)\n choicesdata[selected_index]['is_selected'] = True\n return choicesdata", "def _find_options(self):\r\n elements = self.xml.findall('./options/option')\r\n return [{\r\n 'id': index,\r\n 'description': option.text,\r\n 'choice': option.get('choice')\r\n } for (index, option) in enumerate(elements)]", "def __str__(self):\n return gettext('One of %s') % self._get_choices_str()", "def initDefaultChoices(self):\n return []", "def list_selector_widget(members=None,\n preselect=None,\n entry=False,\n callback=None):\n store, i=generate_list_model(members,\n active_element=preselect)\n\n if entry:\n combobox=gtk.ComboBoxEntry(store, column=0)\n else:\n combobox=gtk.ComboBox(store)\n cell = gtk.CellRendererText()\n combobox.pack_start(cell, expand=True)\n combobox.add_attribute(cell, 'text', 0)\n combobox.add_attribute(cell, 'background', 2)\n\n combobox.set_active(-1)\n if i is None:\n i = store.get_iter_first()\n if i is not None:\n combobox.set_active_iter(i)\n\n if entry:\n def get_current_element(combo):\n try:\n return combo.get_model().get_value(combo.get_active_iter(), 1)\n except (TypeError, AttributeError):\n return unicode(combo.child.get_text())\n def set_current_element(combo, t):\n combo.child.set_text(t)\n else:\n def get_current_element(combo):\n if combo.get_active_iter() is not None:\n return combo.get_model().get_value(combo.get_active_iter(), 1)\n else:\n return None\n def set_current_element(combo, el):\n # Find the index of the element\n l=[ t[0] for t in enumerate(combo.get_model()) if t[1][1] == el ]\n if l:\n # The element is present.\n combo.set_active(l[0])\n else:\n combo.set_active_iter(combo.get_model().append( (unicode(el), el, None) ))\n\n # Bind the method to the combobox object\n combobox.get_current_element = get_current_element.__get__(combobox)\n combobox.set_current_element = set_current_element.__get__(combobox)\n\n if callback is not None:\n combobox.connect('changed', callback)\n\n return combobox", "def occurrence_choices():\n OCCURRENCE_CHOICES = [('one_off', 'One Off'), ('weekly', 'Weekly'), ('fortnightly', 'Fortnightly')]\n occurrence = forms.ChoiceField(choices=OCCURRENCE_CHOICES, widget=forms.RadioSelect())\n return occurrence", "def get_action_choices():\n from hardware.management.commands.gpio_buttons import Command\n import re\n pattern = re.compile(r'^on_(?P<name>\\w+)_press$')\n choices = []\n for member in dir(Command):\n match = pattern.match(member)\n if match:\n action = match.groupdict()['name']\n name = action.replace('_', ' ').title()\n choices.append((action, name))\n return choices", "def DrawComboBox(*args, **kwargs):\n return _gdi_.RendererNative_DrawComboBox(*args, **kwargs)", "def fill_combobox(self):\n sqlstring = \"SELECT first_name, last_name from employees WHERE enabled = 1 ORDER BY last_name ASC\"\n self.CB_employee.addItem(\"\")\n for employee in self.ms.c.execute(sqlstring):\n self.CB_employee.addItem(\" \".join((employee[0], employee[1])))\n sqlstring = \"SELECT first_name, last_name from employees WHERE enabled = 0 ORDER BY last_name ASC\"\n for employee in self.ms.c.execute(sqlstring):\n self.CB_employee.addItem(\" \".join((employee[0], employee[1])))", "def _get_choices ( self, context, path = '' ):\n choices = []\n gdc = context.get_data_context\n for name in context.data_contexts:\n next_path = path_for( path, name )\n choices.append( TemplateChoice( choice_value = next_path ) )\n choices.extend( self._get_choices( gdc( name ), next_path ) )\n \n return choices", "def get_context(self, name, value, attrs=None, choices=()):\n context = super(TriStateCheckboxSelectMultiple, self).get_context(\n name, value, attrs\n )\n\n choices = dict(it.chain(self.choices, choices))\n if value is None:\n value = dict.fromkeys(choices, False)\n else:\n value = dict(dict.fromkeys(choices, False).items() +\n value.items())\n\n context['values'] = [\n (choice, label, value[choice])\n for choice, label in choices.iteritems()\n ]\n\n return context", "def get_text_type():\n opt = ['text', 'email', 'password']\n inp = option_menu(opt, 'Select text type:')\n\n # mark text type with option\n OPTIONS['text-type'] = opt[inp]\n\n # add option to collected list\n add_to_collected('text type', opt[inp])\n\n return", "def set_dropdown_b_options(value):\n options_c = []\n if value=='C':\n options_c = [{'label': '1', 'value': '1'},\n {'label': '2', 'value': '2'}]\n if value == 'D':\n options_c = [{'label': '3', 'value': '3'},\n {'label': '4', 'value': '4'}]\n if value=='E':\n options_c = [{'label': '5', 'value': '5'},\n {'label': '6', 'value': '6'}]\n if value == 'F':\n options_c = [{'label': '7', 'value': '7'},\n {'label': '8', 'value': '8'}]\n return options_c", "def objects_to_choices(queryset):\n res = []\n for elm in queryset:\n res.append((elm.pk, unicode(elm)))\n return res", "def choice(text, choices, **kwargs):\n return click.prompt(click.style('> {}'.format(text), fg='blue', bold=True),\n type=click.Choice(choices),\n **kwargs)", "def on_comboBox_enceinte_activated(self, index):\n nom_enceinte = self.comboBox_enceinte.currentText()\n marque = [x[2] for x in self.enceintes if x[1] == nom_enceinte][0]\n n_serie = [x[4] for x in self.enceintes if x[1] == nom_enceinte][0]\n model =[x[3] for x in self.enceintes if x[1] == nom_enceinte][0]\n \n \n self.lineEdit_marque.setText(marque)\n self.lineEdit_n_serie.setText(n_serie)\n self.lineEdit_model.setText(model)", "def display_choose(self, text, choices):\n cur_index = 0\n key = None\n while key != 'KEY_NEWLINE':\n if key == 'KEY_UP':\n cur_index = max(cur_index - 1, 0)\n elif key == 'KEY_DOWN':\n cur_index = min(cur_index + 1, len(choices) - 1)\n self.stdscr.erase()\n for line in text:\n self.stdscr.addstr(f'{PADCHAR}{line}\\n')\n for index, value in enumerate(choices):\n self.stdscr.addstr('\\n')\n self.stdscr.addstr(PADCHAR)\n self.stdscr.addstr(value, color_pair(7 if index == cur_index else 1))\n self.stdscr.addstr(f'\\n\\n{PADCHAR}') \n key = self.get_key() \n return cur_index", "def list_selector(title=None,\n text=None,\n members=None,\n controller=None,\n preselect=None,\n entry=False):\n combobox = list_selector_widget(members=members,\n preselect=preselect,\n entry=entry)\n\n d = gtk.Dialog(title=title,\n parent=None,\n flags=gtk.DIALOG_DESTROY_WITH_PARENT,\n buttons=( gtk.STOCK_OK, gtk.RESPONSE_OK,\n gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL ))\n\n if text is not None:\n l=gtk.Label(text)\n l.show()\n d.vbox.add(l)\n\n d.vbox.add(combobox)\n combobox.show_all()\n\n d.connect('key-press-event', dialog_keypressed_cb)\n\n d.show()\n center_on_mouse(d)\n res=d.run()\n retval=None\n if res == gtk.RESPONSE_OK:\n retval=combobox.get_current_element()\n d.destroy()\n return retval", "def get_classes(self, code):\n \n select = v.Combobox(\n _metadata={'name':code}, \n items=self.items, \n v_model=None, \n dense=True,\n hide_details=True\n )\n \n select.observe(partial(self.store, code), 'v_model')\n \n return select", "def getOptionsNames(self) -> List[unicode]:\n ...", "def __init__(self, *args, **kwargs):\n super(ChoiceFieldType, self).__init__(*args, **kwargs)\n\n self.choices = self.get_field_info_key('choices')", "def the_option_named(text: str) -> \"SelectByText\":\n return SelectByText(text)", "def prepareAutoComplete(editor, text, charPos, lineStartCharPos,\r\n wikiDocument, settings):\r\n return []", "def autocomplete_possibilities():\n try:\n # get data sent by client\n typed_input = request.args.get('q')\n print(' ')\n print('\\n------ getting autocomplete_possibilities ------')\n print(f\"recived: input:{typed_input}\")\n\n # call the google API\n results = gmaps.places_autocomplete(typed_input)\n data = [\n {'value': r['place_id'], 'text': r['description']}\n for r in results\n ]\n\n # Pass data to the front end\n print(f'returning: {data}')\n return jsonify(data)\n\n except Exception as e:\n print(\"AJAX excepted \" + str(e))\n return str(e)", "def _validate_selects(text, response):\n answer_options = re.split(settings.MULTISELECT_DELIMITER_RE, str(text))\n choices = map(lambda choice: choice.lower(), response.event.choices)\n logger.debug('Question (%s) answer choices are: %s, given answers: %s' % (datatype, choices, answer_options))\n new_answers = copy(answer_options)\n for idx, opt in enumerate(answer_options):\n logger.debug('Trying to format (m)select answer: \"%s\"' % opt)\n try: \n #in the case that we accept numbers to indicate option selection\n opt_int = int(opt)\n if not (1 <= opt_int <= len(choices)): \n return text, 'Answer %s must be between 1 and %s' % (opt_int, len(choices))\n else:\n new_answers[idx] = str(opt_int)\n\n except ValueError: \n # in the case where we accept the actual text of the question\n logger.debug('Caught value error, trying to parse answer string choice of: %s' % choices)\n if opt.lower() not in choices:\n return text, 'Answer must be one of the choices'\n else:\n new_answers[idx] = str(choices.index(opt.lower()) + 1)\n return ' '.join(new_answers), None", "def choice(choices=[], message=\"Pick something.\", title=None):\n return dialog(\n \"choice\",\n choices=choices,\n message=message,\n title=title,\n )", "def complete_opt_format(self, text, *_):\n return [t + \" \" for t in FORMATTERS if t.startswith(text)]", "def choices(self, cl):\n # TODO: Determine if non-static choices would be cleaner here.\n # Honestly, I tried a more generic version and it was even harder to\n # follow than this version.\n yield {\n 'selected': not (self.lookup_val_gte or self.lookup_val_lt),\n 'query_string': cl.get_query_string({}, [self.lookup_kwarg_gte,\n self.lookup_kwarg_lt]),\n 'display': 'All'\n }\n\n goal = settings.FACEBOOK_CLICK_GOAL\n yield {\n 'selected': self.lookup_val_gte and not self.lookup_val_lt,\n 'query_string': cl.get_query_string({self.lookup_kwarg_gte: goal},\n [self.lookup_kwarg_lt]),\n 'display': 'Yes'\n }\n yield {\n 'selected': self.lookup_val_lt and not self.lookup_val_gte,\n 'query_string': cl.get_query_string({self.lookup_kwarg_lt: goal},\n [self.lookup_kwarg_gte]),\n 'display': 'No'\n }", "def build_options(slot, snacks):\n \n if slot == 'Fast':\n return [\n {'text': 'Pizza', 'value': 'Pizza'},\n {'text': 'Fries', 'value': 'Fries'},\n {'text': 'Franky', 'value': 'Franky'},\n {'text': 'Burger', 'value': 'Burger'},\n {'text': 'Sandwich', 'value': 'Sandwich'}\n \n \n ]\n elif slot == 'drink':\n return [\n {'text': 'Coca-Cola', 'value': 'Coca-cola'},\n {'text': 'Appy', 'value': 'Appy'},\n \n {'text': 'Beer', 'value': 'Beer'},\n {'text': 'Frooti', 'value': 'Frooti'},\n {'text': 'Pepsi', 'value': 'Pepsi'}\n \n ]", "def sortedChoices(self, inText):\n choices = self.splitText(inText)\n sortedChoices = [text for text in self.formatList if text in choices]\n if len(choices) == len(sortedChoices):\n return (sortedChoices, True)\n else:\n return (sortedChoices, False)", "def _getChoices(self, acronym):\n # get matches from acronymDB\n matches = []\n if(acronym in self.acronymDB):\n matches += self.acronymDB[acronym]\n if(acronym[-1] == \"s\" and acronym[:-1] in self.acronymDB):\n matches += self.acronymDB[acronym]\n\n # create training data\n X_train, y_train = [], []\n for definition, articleID, ignored_var in matches:\n text = self.articleDB[articleID]\n X_train.append(\n ExpansionChoice(article_id=articleID, article_text=text))\n y_train.append(definition)\n\n # create y labels to group similar acronyms\n y_labels, labelToExpansion = self._processChoices(y_train)\n\n return X_train, y_labels, labelToExpansion", "def multiple_choices(self, choices, response):\n for elem in self.method_order:\n if elem in choices:\n return [elem]\n raise NoData", "def set_dropdown_b_options(value):\n options_b = []\n if value=='A':\n options_b = [{'label': 'C', 'value': 'C'},\n {'label': 'D', 'value': 'D'}]\n if value == 'B':\n options_b = [{'label': 'E', 'value': 'E'},\n {'label': 'F', 'value': 'F'}]\n return options_b", "def choices(self, choices: Iterable[Tuple[str, str]]):\n try:\n iter(choices)\n except TypeError:\n raise TypeError(\"'choices' isn't a valid iterable\")\n\n apply_choices = []\n for i, (choice_id, choice_label) in enumerate(choices):\n apply_choices.append((str(choice_id), str(choice_label)))\n\n if len(apply_choices) < 2:\n raise ValueError(\"you need to specify at least two choices\")\n\n self._choices = apply_choices\n self.specific.refresh()\n self._selected = 0", "def before_choose_candidate_listener(self, session, task):\n choices = [PromptChoice('d', 'eDit', self.importer_edit)]\n if task.candidates:\n choices.append(PromptChoice('c', 'edit Candidates',\n self.importer_edit_candidate))\n\n return choices", "def _get_completion(self, original_text, remaining_text, options):\n if self.should_hide_completions(original_text=original_text,\n remaining_text=remaining_text,\n allowed_suffixes=(\" \", \".\")):\n return []\n\n return [(option, len(remaining_text)) for option in options\n if option.startswith(remaining_text) and not option.startswith(\"_\")]" ]
[ "0.6981332", "0.64435107", "0.64435107", "0.6406375", "0.6282898", "0.61602914", "0.61550856", "0.6096663", "0.6079173", "0.6074226", "0.599768", "0.5979722", "0.5946701", "0.59085536", "0.58665997", "0.5852769", "0.5851758", "0.5840527", "0.58387506", "0.5816007", "0.5803215", "0.5797734", "0.57951343", "0.57936877", "0.57807535", "0.5749749", "0.57457596", "0.5732043", "0.57093215", "0.57050306", "0.5695638", "0.5680274", "0.5638338", "0.5617769", "0.56013155", "0.5580123", "0.5573768", "0.55674773", "0.55656695", "0.5561425", "0.5539777", "0.5525614", "0.55243665", "0.55211055", "0.5516054", "0.55135965", "0.5486497", "0.54864573", "0.5484098", "0.5467698", "0.5450487", "0.5444694", "0.5435837", "0.5432833", "0.542561", "0.54099566", "0.5406829", "0.5394251", "0.53907686", "0.5388395", "0.53733003", "0.5353227", "0.5352402", "0.53441244", "0.5335833", "0.5330664", "0.5320152", "0.5317789", "0.53159815", "0.5291184", "0.52660507", "0.5261751", "0.52587485", "0.5247112", "0.52468276", "0.5246636", "0.52386904", "0.523807", "0.52264065", "0.52225775", "0.521434", "0.52137464", "0.5197637", "0.5192666", "0.51882684", "0.5188186", "0.5170487", "0.516354", "0.5163382", "0.5161854", "0.5159188", "0.5158065", "0.51575136", "0.51539713", "0.5146208", "0.5145707", "0.5143474", "0.5142414", "0.51338685", "0.51279086" ]
0.6369448
4
Return list of choices for combo box, each a tuple of edit text and any annotation text
def getEditChoices(self, currentText=''): format = globalref.options.strData('EditDateFormat', True) today = GenDate().dateStr(format) yesterday = (GenDate() - 1).dateStr(format) tomorrow = (GenDate() + 1).dateStr(format) return [(today, '(%s)' % _('today')), (yesterday, '(%s)' % _('yesterday')), (tomorrow, '(%s)' % _('tomorrow'))]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def get_choices(self):\n raise NotImplementedError()", "def get_choices(self):\n raise NotImplementedError()", "def choices(cls):\n # return list(map(tuple, cls.__members__.items()))\n return [(int(code), name) for name, code in cls.__members__.items()]", "def getEditChoices(self, currentText=''):\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList\n if text not in currentChoices]\n results = []\n for choice in nonChoices: # menu entries to add a choice\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('add'), choice)))\n if currentChoices:\n results.append((None, None)) # separator\n for choice in currentChoices: # menu entries to remove a choice\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('remove'), choice)))\n return results", "def choices(self, typename, value_field='description'):\n rows = self.type(typename).values('id', value_field)\n return [(r['id'], r[value_field]) for r in rows]", "def get_choices(self, instance):\n if instance.type == BaseParameter.CHOICE_TYPE:\n return [\n x.value\n for x in instance.get_typed_parameter().get_available_choices()\n ]\n else:\n return None", "def get_choices_for_model_field(cls):\n return [c[0:2] for c in cls.attr_choices]", "def initDefaultChoices(self):\n return [entry[0] for entry in self.getEditChoices()]", "def select_combo_text(cb, text, index=0):\n i = 0\n for n in cb.get_model():\n if n[index] == text:\n break\n i += 1\n cb.set_active(i)", "def application_command_autocomplete_choice_builder(name, value):\n return {\n 'name': name,\n 'value': value,\n }", "def choices(self) -> list:\n return [self.mapper(i) for i in self.keys()]", "def choices(self):\n return tuple(self._choices)", "def iter_choices(self):\n\n for pk, obj in self._get_object_list():\n if hasattr(obj, self.id):\n selected = getattr(obj, self.id)\n else:\n selected = obj in self.data\n\n yield (pk, self.get_label(obj), selected)", "def _get_choices_str(self):\n return ', '.join(\n '\"%s\"' % choice\n for choice in self.choices\n )", "def fill_combobox_attributes(self):\n\n list_char = [\"\"]\n list_num = [\"\"]\n if self.ui.radioButton_file.isChecked():\n for a in self.attributes:\n if a['caseOrFile'] == \"file\" and a['valuetype'] == \"character\":\n list_char.append(a['name'])\n if a['caseOrFile'] == \"file\" and a['valuetype'] == \"numeric\":\n list_num.append(a['name'])\n else:\n for a in self.attributes:\n if a['caseOrFile'] == \"case\" and a['valuetype'] == \"character\":\n list_char.append(a['name'])\n if a['caseOrFile'] == \"case\" and a['valuetype'] == \"numeric\":\n list_num.append(a['name'])\n self.ui.comboBox_num_attributes.blockSignals(True)\n self.ui.comboBox_char_attributes.blockSignals(True)\n self.ui.comboBox_num_attributes.clear()\n self.ui.comboBox_char_attributes.clear()\n self.ui.comboBox_char_attributes.addItems(list_char)\n self.ui.comboBox_num_attributes.addItems(list_num)\n self.ui.comboBox_num_attributes.blockSignals(False)\n self.ui.comboBox_char_attributes.blockSignals(False)", "def getComboTerms(tuples):\t\t\t\n\t\t\t#return \"[{0}]\".format('; '.join([\"({0})\".format(','.join([text[indices[0]:indices[1]], str(indices[0])])) for indices in tuples]))\n\t\t\treturn \"{0}\".format('; '.join((\"{0}\".format(text[indices[0]:indices[1]]) for indices in tuples)))", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices", "def get_choices(cls):\n return cls.values.items()", "def _find_options(self, inputfield):\r\n elements = inputfield.findall('./options/option')\r\n return [{\r\n 'id': index,\r\n 'description': option.text,\r\n 'choice': option.get('choice')\r\n } for (index, option) in enumerate(elements)]", "def create_combo(self) -> typing.Iterable[Combo]:\n raise NotImplementedError()", "def choices(cls):\n return tuple(item.as_tuple for item in list(cls))", "def choices(self) -> list:\n return [self.mapper(item) for item in self]", "def choices(cls):\n _choices = []\n for attr in _user_attributes(cls):\n val = getattr(cls, attr)\n setattr(cls, attr[1:], val[0])\n _choices.append((val[0], val[1]))\n setattr(cls, 'CHOICES', tuple(_choices))\n return cls", "def extract_choices(element):\r\n\r\n choices = []\r\n\r\n for choice in element:\r\n if choice.tag != 'choice':\r\n raise Exception(\r\n \"[capa.inputtypes.extract_choices] Expected a <choice>\" +\r\n \"tag; got {0} instead\".format(choice.tag)\r\n )\r\n\r\n components = []\r\n choice_text = ''\r\n if choice.text is not None:\r\n choice_text += choice.text\r\n # Initialize our dict for the next content\r\n adder = {\r\n 'type': 'text',\r\n 'contents': choice_text,\r\n 'tail_text': '',\r\n 'value': ''\r\n }\r\n components.append(adder)\r\n\r\n for elt in choice:\r\n # for elements in the choice e.g. <text> <numtolerance_input>\r\n adder = {\r\n 'type': 'text',\r\n 'contents': '',\r\n 'tail_text': '',\r\n 'value': ''\r\n }\r\n tag_type = elt.tag\r\n # If the current `elt` is a <numtolerance_input> set the\r\n # `adder`type to 'numtolerance_input', and 'contents' to\r\n # the `elt`'s name.\r\n # Treat decoy_inputs and numtolerance_inputs the same in order\r\n # to prevent students from reading the Html and figuring out\r\n # which inputs are valid\r\n if tag_type in ('numtolerance_input', 'decoy_input'):\r\n # We set this to textinput, so that we get a textinput html\r\n # element.\r\n adder['type'] = 'textinput'\r\n adder['contents'] = elt.get('name')\r\n else:\r\n adder['contents'] = elt.text\r\n\r\n # Add any tail text(\"is the mean\" in the example)\r\n adder['tail_text'] = elt.tail if elt.tail else ''\r\n components.append(adder)\r\n\r\n # Add the tuple for the current choice to the list of choices\r\n choices.append((choice.get(\"name\"), components))\r\n return choices", "def initDefaultChoices(self):\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, DateFormat.dateStampStrings[1])\n return choices", "def initDefaultChoices(self):\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, TimeFormat.timeStampStrings[1])\n return choices", "def combo_callback(self, eventobj):\n print(self.combo_input.get()) # print name\n print(self.combo_input.current()) # print index", "def choices(self):\n # Needs to be implmented by subclass\n raise Exception(NotImplemented)", "def initDefaultChoices(self):\n return [text for text in self.formatList]", "def __init__(self,choices,caption='ListSelection',default=[],single=False,check=False,sort=False,*args,**kargs):\n InputDialog.__init__(self,caption=caption,items = [\n dict(name='input',value=default,itemtype='list',choices=choices,\n text='',single=single,check=check,sort=sort,*args,**kargs),\n ],)", "def _build_dropdown(options):\n return [(x, x) for x in options]", "def set_choices(self, index, choices):\n if len(choices) == 1:\n self._label(index)\n self._widgets[index][\"text\"] = str(choices[0])\n else:\n self._combo(index)\n self._widgets[index][\"values\"] = [str(t) for t in choices]\n width = max(len(str(t)) for t in choices)\n width = max(5, width)\n self._widgets[index][\"width\"] = width", "def choices() -> List[str]:\n return [t.name.upper() for t in ConfigurationVariable]", "def extract_choices(element):\r\n\r\n choices = []\r\n\r\n for choice in element:\r\n if choice.tag != 'choice':\r\n raise Exception(\r\n \"[capa.inputtypes.extract_choices] Expected a <choice> tag; got %s instead\"\r\n % choice.tag)\r\n choices.append((choice.get(\"name\"), stringify_children(choice)))\r\n return choices", "def __init__(self, \n num_fld=1, \n lab_txt=[\"1\"], \n txt_fld=[\"1\"], \n title_txt=\"test\", \n comb_txt=[],\n comb_lab_txt=[], \n comb_num=0, \n root_x=50, \n root_y=50):\n super().__init__()\n self.geometry(f'+{root_x}+{root_y}') #head=y+20px\n self.str_in=[]\n self.title(title_txt)\n if comb_txt:\n self.name=[0]*num_fld\n ent=[0]*num_fld\n self.comb=[]\n self.act=[]\n lab=[0]*num_fld\n lab_comb=[0]*comb_num\n else:\n self.name=[0]*num_fld\n ent=[0]*num_fld\n lab=[0]*num_fld\n self.comb=[]\n self.act=[]\n for i in range(num_fld):\n self.name[i]=tk.StringVar()\n ent[i]=tk.Entry(self,textvariable=self.name[i])\n ent[i].insert(0, txt_fld[i])\n lab[i] = tk.Label(self,width=15, text=lab_txt[i])\n lab[i].pack()\n ent[i].pack()\n for i in range(comb_num):\n lab_comb[i]=tk.Label(self,width=35, text=comb_lab_txt[i])\n self.comb.append(ttk.Combobox(self, values=comb_txt))\n lab_comb[i].pack()\n self.comb[i].pack()\n self.comb[i].current(1)\n\n but_ac=tk.Button(self, text=\"Accept\", command=self.ins)\n but_ac.pack()\n self.mainloop", "def input_choices_from_list(choices, text):\n no_courses_text = \"\"\"\n init will only list the courses you are enrolled in\n and there seem to be none.\n Either enrol in a course or add the course id as command line argument.\n \"\"\"\n if choices is None or len(choices) == 0:\n print(no_courses_text)\n raise SystemExit(1)\n\n digits = str(math.ceil(math.log10(len(choices))))\n format_str = '{:' + digits + 'd} {}'\n for n, c in enumerate(choices):\n print(format_str.format(n, c))\n try:\n return [int(c) for c in input(text).split()]\n except EOFError:\n return []", "def combobox(self):\n return self._combo", "def valid_options(self):\n choices = self.choices()\n\n if not choices:\n return None\n\n return [opt[0] for opt in choices]", "def widgets_from_abbreviations(self, seq):\n result = []\n for name, abbrev, default in seq:\n widget = self.widget_from_abbrev(abbrev, default)\n if not (isinstance(widget, ValueWidget) or isinstance(widget, fixed)):\n if widget is None:\n raise ValueError(\"{!r} cannot be transformed to a widget\".format(abbrev))\n else:\n raise TypeError(\"{!r} is not a ValueWidget\".format(widget))\n if not widget.description:\n widget.description = name\n widget._kwarg = name\n result.append(widget)\n return result", "def multiple_choice():\n\n return [\"MAR\", \"MAR\", \"NI\", \"NI\", \"MCAR\"]", "def build_comboboxes(activities, events):\n global comboboxes\n # For each activity set up a selector for an event\n\n for activity in activities:\n\n # Setup frame for better display in gui\n frame = Frame(main_window)\n frame.configure(background=\"gray30\")\n\n # Label the left column as activity in a model + \"beautify gui\"\n text = \"Activity name (model):\"\n Label(frame, text=text, bg=\"gray30\", fg=\"white\", padx=5).grid(column=0, row=0)\n Label(frame, text=activity, bg=\"gray30\", fg=\"white\").grid(column=0, row=1)\n\n # Set up the combobox for an event\n combo = Combobox(frame)\n combo['values'] = events\n\n # If activity is in events preselect the current one\n if activity in events:\n combo.current(events.index(activity))\n\n # Label the combobox and place label and box in frame\n Label(frame, text=\"Event name (log):\", bg=\"gray30\", fg=\"white\", padx=5).grid(column=1, row=0)\n combo.grid(column=1, row=1)\n\n # If the last activity in the graph is handled then do not write a separator\n if activity != activities[-1]:\n Separator(frame, orient=\"horizontal\").grid(row=2, columnspan=2, sticky=\"ew\", pady=10)\n\n comboboxes[activity] = combo\n # place the frame in the main_window\n frame.grid(column=0)", "def get_chosen_options(user):\n user_profile = user.get_profile()\n user_application = user_profile.application\n np = user_application.np\n ocl = eval(user_application.options_selected)\n chosen_options = []\n for oc in ocl:\n chosen_options.append(Option.objects.get(opt_code=int(oc))) \n return chosen_options", "def get_generic_choices(model, key, allow_null=False):\n CHOICES = [('', '--------')] if allow_null else []\n for i in model.objects.all().values_list(key, flat=True).distinct():\n CHOICES.append((str(i), str(i)))\n CHOICES.sort()\n\n return CHOICES", "def _combobox_choice(self, _=None):\n combobox_string = self.value_combobox.var.get()\n if combobox_string.startswith(\"Unknown: \"):\n value = int(combobox_string[len(\"Unknown: \"):])\n else:\n value = int(self.value_combobox.var.get().split(\" \")[0])\n self.master.change_field_value(self.field_name, value)", "def widgets(parameter: Parameter):\n widgets = []\n for key in parameter.keys():\n textEdit = QTextEdit()\n textEdit.setText(key)\n widgets.append(textEdit)\n if isinstance(parameter[key], Enum):\n comboBox = MyQtEnumComboBox()\n comboBox.fillValues(type(parameter[key]))\n widgets.append(comboBox)\n elif isinstance(parameter[key], bool):\n comboBox = QComboBox()\n comboBox.addItems((\"False\", \"True\"))\n widgets.append(comboBox)\n else:\n textEdit = QTextEdit()\n textEdit.setText(str(parameter[key]))\n widgets.append(textEdit)\n for widget in widgets:\n widget.setFixedHeight(30)\n return widgets", "def _getBrailleRegionsForComboBox(self, obj):\n\n self._debugGenerator(\"_getBrailleRegionsForComboBox\", obj)\n\n regions = []\n\n focusedRegionIndex = 0\n label = self._script.getDisplayedLabel(obj)\n if label and (len(label) > 0):\n regions.append(braille.Region(label + \" \"))\n focusedRegionIndex = 1\n\n # Check to see if the text is editable. If so, then we want\n # to show the text attributes (such as selection -- see bug\n # 496846 for more details).\n #\n textObj = None\n for child in obj:\n if child and child.getRole() == pyatspi.ROLE_TEXT:\n textObj = child\n if textObj and textObj.getState().contains(pyatspi.STATE_EDITABLE):\n textRegion = braille.Text(textObj)\n regions.append(textRegion)\n else:\n displayedText = self._script.getDisplayedText(obj)\n if displayedText:\n regions.append(braille.Region(displayedText))\n\n regions.append(braille.Region(\n \" \" + rolenames.getBrailleForRoleName(obj)))\n\n # Things may not have gone as expected above, so we'll do some\n # defensive programming to make sure we don't get an index out\n # of bounds.\n #\n if focusedRegionIndex >= len(regions):\n focusedRegionIndex = 0\n if len(regions) == 0:\n focusedRegion = None\n else:\n focusedRegion = regions[focusedRegionIndex]\n\n # [[[TODO: WDW - perhaps if a text area was created, we should\n # give focus to it.]]]\n #\n return [regions, focusedRegion]", "def form_SelectChoiceCallableOptions(request):\n schema = schemaish.Structure()\n schema.add('mySelect', schemaish.Integer())\n def _():\n options = [(1,'a'),(2,'b'),(3,'c')]\n for option in options:\n yield option\n\n form = formish.Form(schema, 'form')\n form['mySelect'].widget = formish.SelectChoice(_)\n return form", "def __init__(self, *args, **kwargs):\n super(TaggedContentItemForm, self).__init__(*args, **kwargs)\n wtf = Tag.objects.filter(group__system=False)\n wlist = [w for t, w in self.fields.items() if t.endswith(\"tags\")]\n choices = []\n for choice in wtf:\n choices.append((choice.id, str(choice)))\n [setattr(w, 'choices', choices) for w in wlist]", "def _comboSlot(self, select):\n select = self.sender().itemText(select)\n if qt4:\n qs = str(self.sender().property(\"dom address\").toPyObject())\n else:\n qs = str(self.sender().property(\"dom address\"))\n item = QtXml.QDomElement()\n\n ind = qs.rfind('/')\n ids = qs[ind:]\n\n item = self.qhash[qs].elem.firstChildElement(\"Item\")\n while(item.isNull() is False):\n itemName = item.firstChildElement(\"Name\")\n if(str(itemName.text()).strip() != select):\n activ = item.firstChildElement(\"Activate\")\n while(activ.isNull() is False):\n s = str(activ.text()).strip() + ids\n h = self.qhash[s]\n widget_enabled = h.elem.attribute(\"Enabled\", \"True\")\n widget_visible = h.elem.attribute(\"Visible\", \"Unknown\")\n h.widget.setEnabled(False)\n if(widget_visible != \"Unknown\"):\n h.label.hide()\n h.widget.hide()\n activ = activ.nextSiblingElement(\"Activate\")\n item = item.nextSiblingElement(\"Item\")\n\n item = self.qhash[qs].elem.firstChildElement(\"Item\")\n while(item.isNull() is False):\n itemName = item.firstChildElement(\"Name\")\n if(str(itemName.text()).strip() == select):\n activ = item.firstChildElement(\"Activate\")\n while(activ.isNull() is False):\n s = str(activ.text()).strip() + ids\n h = self.qhash[s]\n h.widget.setEnabled(True)\n h.label.show()\n h.widget.show()\n activ = activ.nextSiblingElement(\"Activate\")\n item = item.nextSiblingElement(\"Item\")", "def get_choices(self, cutoff=None):\n queryset = self.get_queryset()\n if queryset is None:\n # Ensure that field.choices returns something sensible\n # even when accessed with a read-only field.\n return {}\n\n if cutoff is not None:\n queryset = queryset[:cutoff]\n\n return OrderedDict(\n [\n (\n # This line below is modifed.\n item.pk,\n self.display_value(item),\n )\n for item in queryset\n ]\n )", "def test_rendering_combobox(qtbot):\n layer = Image(np.random.rand(8, 8))\n qtctrl = QtImageControls(layer)\n qtbot.addWidget(qtctrl)\n combo = qtctrl.renderComboBox\n opts = {combo.itemText(i) for i in range(combo.count())}\n rendering_options = {\n 'translucent',\n 'additive',\n 'iso',\n 'mip',\n 'minip',\n 'attenuated_mip',\n 'average',\n }\n assert opts == rendering_options\n # programmatically updating rendering mode updates the combobox\n layer.rendering = 'iso'\n assert combo.findText('iso') == combo.currentIndex()", "def get_choicesdata(self):\n selected_value = self.get_cleaned_value()\n choicesdata = []\n for value, label in self.get_choices_cached():\n is_selected = value == selected_value\n url = self.build_set_values_url(values=[value])\n choicesdata.append({\n 'url': url,\n 'label': label,\n 'is_selected': is_selected,\n 'dom_id': '{}_{}'.format(self.get_inputfield_dom_id(), value)\n })\n return choicesdata", "def choices(self):\n return self.__class__.get_setting_choices(self.key, **self.get_kwargs())", "def get_poll_choices(self, games: [Game]) -> [dict]:\n answer_texts = []\n for g in games:\n answer_texts.append(g.name + \" - \" + g.genre)\n answer_texts = sorted(answer_texts, key=str.lower)\n poll_choices = []\n for at in answer_texts:\n poll_choices.append({\"text\": at})\n return poll_choices", "def __str__(self):\n return \"choice_text: \" + self.choice_text", "def _parse_choices(self, text):\n choices = dict()\n\n matches = re.findall(self.choice_regex, text)\n for match in matches:\n # remove the brackets\n match = match.replace('[[', '')\n match = match.replace(']]', '')\n\n if '|' in match:\n # format is {text}|{node_id}, the text and node id are different\n text, node_id = match.split('|')\n choices[node_id] = text\n else:\n choices[match] = match\n\n return choices", "def get_choices_new_protected():\n ret = []\n ret.append( (1, _(u'Nur Community-Mitglieder dürfen neue Beiträge leisten')) )\n ret.append( (-1, _(u'Offener Zugang')) )\n return ret", "def comboBox(args: list, slot) -> QComboBox:\n comboBox = QComboBox()\n comboBox.addItems(args[0])\n comboBox.currentTextChanged.connect(slot)\n return comboBox", "def choices(self):\n\n if self._choices == None:\n self._choices = [ExperimentChoice(self, choice_name) for choice_name in self.choice_names]\n\n return self._choices", "def make_ddls( self, parent ):\n # ---- 0\n a_widget = ttk.Combobox( parent,\n width = self.arg_width,\n state = \"normal\",\n textvariable = \"self.arg_2_var\" )\n\n a_widget.bind( \"<<ComboboxSelected>>\", self.sync_ddl_0 )\n\n # AppGlobal.gui_style.style_combobox( a_widget )\n self.ddl_0_widget = a_widget\n\n # ---- 1\n a_widget = ttk.Combobox( parent,\n width = self.arg_width,\n state = \"normal\",\n textvariable = \"self.arg_2_var\" )\n\n self.ddl_1_widget = a_widget\n\n # ---- 2\n a_widget = ttk.Combobox( parent,\n width = self.arg_width + 5,\n state = \"normal\",\n textvariable = \"self.arg_2_var\" )\n\n # AppGlobal.gui_style.style_combobox( a_widget )\n self.ddl_2_widget = a_widget\n\n # self.ddl_widgets = [ self.ddl_0_widget, self.ddl_1_widget, self.ddl_2_widget, ]\n # print( self.ddl_widgets[ 0 ] == self.ddl_widgets[ 1 ])\n\n #self.load_ddl_0( )", "def __str__(self):\n return self.choice_text", "def as_choices():\n return (\n # Live is currently disabled as a choice\n # pending implementation\n (\"live\", \"Use working directory\"),\n (\"latest\", \"Use latest snapshot\"),\n (\"pinned\", \"Pinned to snapshot\"),\n )", "def comboBoxes(self):\r\n # Cities Combo Button\r\n self.comboCities = QComboBox()\r\n self.comboCities.setStyleSheet(\"\"\"\r\n font-family: times;\r\n font-size: 15px;\r\n background-color : #A8DBC5;\r\n border: 1px solid white;\r\n\r\n \"\"\")\r\n self.comboCities.addItems(\r\n ['Girón', 'Piedecuesta', 'Floridablanca', 'Bucaramanga'])\r\n self.grid.addWidget(self.comboCities, 6, 1, 1, 2)\r\n self.comboCities.setCurrentText(\"Bucaramanga\")\r\n # Payment Combo Button\r\n self.comboPayment = QComboBox()\r\n self.comboPayment.setStyleSheet(\"\"\"\r\n font-family: times;\r\n font-size: 15px;\r\n background-color : #A8DBC5;\r\n border: 1px solid white;\r\n\r\n \"\"\")\r\n self.comboPayment.addItems(['Efectivo', 'Nequi'])\r\n self.grid.addWidget(self.comboPayment, 7, 1, 1, 2)", "def get_choicesdata(self):\n # selected_value = self.get_cleaned_value()\n # choicesdata = []\n # found_selected_value = False\n # for value, label in self.get_choices():\n # is_selected = value == selected_value\n # if is_selected:\n # found_selected_value = True\n # url = self.build_set_values_url(values=[value])\n # choicesdata.append({\n # 'url': url,\n # 'label': label,\n # 'is_selected': is_selected\n # })\n choicesdata, found_selected_value = self.__make_choicesdata_list(\n choices=self.get_choices(),\n selected_value=self.get_cleaned_value())\n if not found_selected_value and len(choicesdata) > 0:\n selected_index = self.get_default_is_selected_index(choicesdata=choicesdata)\n choicesdata[selected_index]['is_selected'] = True\n return choicesdata", "def _find_options(self):\r\n elements = self.xml.findall('./options/option')\r\n return [{\r\n 'id': index,\r\n 'description': option.text,\r\n 'choice': option.get('choice')\r\n } for (index, option) in enumerate(elements)]", "def __str__(self):\n return gettext('One of %s') % self._get_choices_str()", "def list_selector_widget(members=None,\n preselect=None,\n entry=False,\n callback=None):\n store, i=generate_list_model(members,\n active_element=preselect)\n\n if entry:\n combobox=gtk.ComboBoxEntry(store, column=0)\n else:\n combobox=gtk.ComboBox(store)\n cell = gtk.CellRendererText()\n combobox.pack_start(cell, expand=True)\n combobox.add_attribute(cell, 'text', 0)\n combobox.add_attribute(cell, 'background', 2)\n\n combobox.set_active(-1)\n if i is None:\n i = store.get_iter_first()\n if i is not None:\n combobox.set_active_iter(i)\n\n if entry:\n def get_current_element(combo):\n try:\n return combo.get_model().get_value(combo.get_active_iter(), 1)\n except (TypeError, AttributeError):\n return unicode(combo.child.get_text())\n def set_current_element(combo, t):\n combo.child.set_text(t)\n else:\n def get_current_element(combo):\n if combo.get_active_iter() is not None:\n return combo.get_model().get_value(combo.get_active_iter(), 1)\n else:\n return None\n def set_current_element(combo, el):\n # Find the index of the element\n l=[ t[0] for t in enumerate(combo.get_model()) if t[1][1] == el ]\n if l:\n # The element is present.\n combo.set_active(l[0])\n else:\n combo.set_active_iter(combo.get_model().append( (unicode(el), el, None) ))\n\n # Bind the method to the combobox object\n combobox.get_current_element = get_current_element.__get__(combobox)\n combobox.set_current_element = set_current_element.__get__(combobox)\n\n if callback is not None:\n combobox.connect('changed', callback)\n\n return combobox", "def initDefaultChoices(self):\n return []", "def occurrence_choices():\n OCCURRENCE_CHOICES = [('one_off', 'One Off'), ('weekly', 'Weekly'), ('fortnightly', 'Fortnightly')]\n occurrence = forms.ChoiceField(choices=OCCURRENCE_CHOICES, widget=forms.RadioSelect())\n return occurrence", "def get_action_choices():\n from hardware.management.commands.gpio_buttons import Command\n import re\n pattern = re.compile(r'^on_(?P<name>\\w+)_press$')\n choices = []\n for member in dir(Command):\n match = pattern.match(member)\n if match:\n action = match.groupdict()['name']\n name = action.replace('_', ' ').title()\n choices.append((action, name))\n return choices", "def DrawComboBox(*args, **kwargs):\n return _gdi_.RendererNative_DrawComboBox(*args, **kwargs)", "def fill_combobox(self):\n sqlstring = \"SELECT first_name, last_name from employees WHERE enabled = 1 ORDER BY last_name ASC\"\n self.CB_employee.addItem(\"\")\n for employee in self.ms.c.execute(sqlstring):\n self.CB_employee.addItem(\" \".join((employee[0], employee[1])))\n sqlstring = \"SELECT first_name, last_name from employees WHERE enabled = 0 ORDER BY last_name ASC\"\n for employee in self.ms.c.execute(sqlstring):\n self.CB_employee.addItem(\" \".join((employee[0], employee[1])))", "def get_text_type():\n opt = ['text', 'email', 'password']\n inp = option_menu(opt, 'Select text type:')\n\n # mark text type with option\n OPTIONS['text-type'] = opt[inp]\n\n # add option to collected list\n add_to_collected('text type', opt[inp])\n\n return", "def get_context(self, name, value, attrs=None, choices=()):\n context = super(TriStateCheckboxSelectMultiple, self).get_context(\n name, value, attrs\n )\n\n choices = dict(it.chain(self.choices, choices))\n if value is None:\n value = dict.fromkeys(choices, False)\n else:\n value = dict(dict.fromkeys(choices, False).items() +\n value.items())\n\n context['values'] = [\n (choice, label, value[choice])\n for choice, label in choices.iteritems()\n ]\n\n return context", "def _get_choices ( self, context, path = '' ):\n choices = []\n gdc = context.get_data_context\n for name in context.data_contexts:\n next_path = path_for( path, name )\n choices.append( TemplateChoice( choice_value = next_path ) )\n choices.extend( self._get_choices( gdc( name ), next_path ) )\n \n return choices", "def set_dropdown_b_options(value):\n options_c = []\n if value=='C':\n options_c = [{'label': '1', 'value': '1'},\n {'label': '2', 'value': '2'}]\n if value == 'D':\n options_c = [{'label': '3', 'value': '3'},\n {'label': '4', 'value': '4'}]\n if value=='E':\n options_c = [{'label': '5', 'value': '5'},\n {'label': '6', 'value': '6'}]\n if value == 'F':\n options_c = [{'label': '7', 'value': '7'},\n {'label': '8', 'value': '8'}]\n return options_c", "def objects_to_choices(queryset):\n res = []\n for elm in queryset:\n res.append((elm.pk, unicode(elm)))\n return res", "def choice(text, choices, **kwargs):\n return click.prompt(click.style('> {}'.format(text), fg='blue', bold=True),\n type=click.Choice(choices),\n **kwargs)", "def on_comboBox_enceinte_activated(self, index):\n nom_enceinte = self.comboBox_enceinte.currentText()\n marque = [x[2] for x in self.enceintes if x[1] == nom_enceinte][0]\n n_serie = [x[4] for x in self.enceintes if x[1] == nom_enceinte][0]\n model =[x[3] for x in self.enceintes if x[1] == nom_enceinte][0]\n \n \n self.lineEdit_marque.setText(marque)\n self.lineEdit_n_serie.setText(n_serie)\n self.lineEdit_model.setText(model)", "def list_selector(title=None,\n text=None,\n members=None,\n controller=None,\n preselect=None,\n entry=False):\n combobox = list_selector_widget(members=members,\n preselect=preselect,\n entry=entry)\n\n d = gtk.Dialog(title=title,\n parent=None,\n flags=gtk.DIALOG_DESTROY_WITH_PARENT,\n buttons=( gtk.STOCK_OK, gtk.RESPONSE_OK,\n gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL ))\n\n if text is not None:\n l=gtk.Label(text)\n l.show()\n d.vbox.add(l)\n\n d.vbox.add(combobox)\n combobox.show_all()\n\n d.connect('key-press-event', dialog_keypressed_cb)\n\n d.show()\n center_on_mouse(d)\n res=d.run()\n retval=None\n if res == gtk.RESPONSE_OK:\n retval=combobox.get_current_element()\n d.destroy()\n return retval", "def display_choose(self, text, choices):\n cur_index = 0\n key = None\n while key != 'KEY_NEWLINE':\n if key == 'KEY_UP':\n cur_index = max(cur_index - 1, 0)\n elif key == 'KEY_DOWN':\n cur_index = min(cur_index + 1, len(choices) - 1)\n self.stdscr.erase()\n for line in text:\n self.stdscr.addstr(f'{PADCHAR}{line}\\n')\n for index, value in enumerate(choices):\n self.stdscr.addstr('\\n')\n self.stdscr.addstr(PADCHAR)\n self.stdscr.addstr(value, color_pair(7 if index == cur_index else 1))\n self.stdscr.addstr(f'\\n\\n{PADCHAR}') \n key = self.get_key() \n return cur_index", "def get_classes(self, code):\n \n select = v.Combobox(\n _metadata={'name':code}, \n items=self.items, \n v_model=None, \n dense=True,\n hide_details=True\n )\n \n select.observe(partial(self.store, code), 'v_model')\n \n return select", "def getOptionsNames(self) -> List[unicode]:\n ...", "def the_option_named(text: str) -> \"SelectByText\":\n return SelectByText(text)", "def __init__(self, *args, **kwargs):\n super(ChoiceFieldType, self).__init__(*args, **kwargs)\n\n self.choices = self.get_field_info_key('choices')", "def prepareAutoComplete(editor, text, charPos, lineStartCharPos,\r\n wikiDocument, settings):\r\n return []", "def _validate_selects(text, response):\n answer_options = re.split(settings.MULTISELECT_DELIMITER_RE, str(text))\n choices = map(lambda choice: choice.lower(), response.event.choices)\n logger.debug('Question (%s) answer choices are: %s, given answers: %s' % (datatype, choices, answer_options))\n new_answers = copy(answer_options)\n for idx, opt in enumerate(answer_options):\n logger.debug('Trying to format (m)select answer: \"%s\"' % opt)\n try: \n #in the case that we accept numbers to indicate option selection\n opt_int = int(opt)\n if not (1 <= opt_int <= len(choices)): \n return text, 'Answer %s must be between 1 and %s' % (opt_int, len(choices))\n else:\n new_answers[idx] = str(opt_int)\n\n except ValueError: \n # in the case where we accept the actual text of the question\n logger.debug('Caught value error, trying to parse answer string choice of: %s' % choices)\n if opt.lower() not in choices:\n return text, 'Answer must be one of the choices'\n else:\n new_answers[idx] = str(choices.index(opt.lower()) + 1)\n return ' '.join(new_answers), None", "def choice(choices=[], message=\"Pick something.\", title=None):\n return dialog(\n \"choice\",\n choices=choices,\n message=message,\n title=title,\n )", "def autocomplete_possibilities():\n try:\n # get data sent by client\n typed_input = request.args.get('q')\n print(' ')\n print('\\n------ getting autocomplete_possibilities ------')\n print(f\"recived: input:{typed_input}\")\n\n # call the google API\n results = gmaps.places_autocomplete(typed_input)\n data = [\n {'value': r['place_id'], 'text': r['description']}\n for r in results\n ]\n\n # Pass data to the front end\n print(f'returning: {data}')\n return jsonify(data)\n\n except Exception as e:\n print(\"AJAX excepted \" + str(e))\n return str(e)", "def complete_opt_format(self, text, *_):\n return [t + \" \" for t in FORMATTERS if t.startswith(text)]", "def choices(self, cl):\n # TODO: Determine if non-static choices would be cleaner here.\n # Honestly, I tried a more generic version and it was even harder to\n # follow than this version.\n yield {\n 'selected': not (self.lookup_val_gte or self.lookup_val_lt),\n 'query_string': cl.get_query_string({}, [self.lookup_kwarg_gte,\n self.lookup_kwarg_lt]),\n 'display': 'All'\n }\n\n goal = settings.FACEBOOK_CLICK_GOAL\n yield {\n 'selected': self.lookup_val_gte and not self.lookup_val_lt,\n 'query_string': cl.get_query_string({self.lookup_kwarg_gte: goal},\n [self.lookup_kwarg_lt]),\n 'display': 'Yes'\n }\n yield {\n 'selected': self.lookup_val_lt and not self.lookup_val_gte,\n 'query_string': cl.get_query_string({self.lookup_kwarg_lt: goal},\n [self.lookup_kwarg_gte]),\n 'display': 'No'\n }", "def build_options(slot, snacks):\n \n if slot == 'Fast':\n return [\n {'text': 'Pizza', 'value': 'Pizza'},\n {'text': 'Fries', 'value': 'Fries'},\n {'text': 'Franky', 'value': 'Franky'},\n {'text': 'Burger', 'value': 'Burger'},\n {'text': 'Sandwich', 'value': 'Sandwich'}\n \n \n ]\n elif slot == 'drink':\n return [\n {'text': 'Coca-Cola', 'value': 'Coca-cola'},\n {'text': 'Appy', 'value': 'Appy'},\n \n {'text': 'Beer', 'value': 'Beer'},\n {'text': 'Frooti', 'value': 'Frooti'},\n {'text': 'Pepsi', 'value': 'Pepsi'}\n \n ]", "def sortedChoices(self, inText):\n choices = self.splitText(inText)\n sortedChoices = [text for text in self.formatList if text in choices]\n if len(choices) == len(sortedChoices):\n return (sortedChoices, True)\n else:\n return (sortedChoices, False)", "def _getChoices(self, acronym):\n # get matches from acronymDB\n matches = []\n if(acronym in self.acronymDB):\n matches += self.acronymDB[acronym]\n if(acronym[-1] == \"s\" and acronym[:-1] in self.acronymDB):\n matches += self.acronymDB[acronym]\n\n # create training data\n X_train, y_train = [], []\n for definition, articleID, ignored_var in matches:\n text = self.articleDB[articleID]\n X_train.append(\n ExpansionChoice(article_id=articleID, article_text=text))\n y_train.append(definition)\n\n # create y labels to group similar acronyms\n y_labels, labelToExpansion = self._processChoices(y_train)\n\n return X_train, y_labels, labelToExpansion", "def multiple_choices(self, choices, response):\n for elem in self.method_order:\n if elem in choices:\n return [elem]\n raise NoData", "def set_dropdown_b_options(value):\n options_b = []\n if value=='A':\n options_b = [{'label': 'C', 'value': 'C'},\n {'label': 'D', 'value': 'D'}]\n if value == 'B':\n options_b = [{'label': 'E', 'value': 'E'},\n {'label': 'F', 'value': 'F'}]\n return options_b", "def choices(self, choices: Iterable[Tuple[str, str]]):\n try:\n iter(choices)\n except TypeError:\n raise TypeError(\"'choices' isn't a valid iterable\")\n\n apply_choices = []\n for i, (choice_id, choice_label) in enumerate(choices):\n apply_choices.append((str(choice_id), str(choice_label)))\n\n if len(apply_choices) < 2:\n raise ValueError(\"you need to specify at least two choices\")\n\n self._choices = apply_choices\n self.specific.refresh()\n self._selected = 0", "def before_choose_candidate_listener(self, session, task):\n choices = [PromptChoice('d', 'eDit', self.importer_edit)]\n if task.candidates:\n choices.append(PromptChoice('c', 'edit Candidates',\n self.importer_edit_candidate))\n\n return choices", "def _get_completion(self, original_text, remaining_text, options):\n if self.should_hide_completions(original_text=original_text,\n remaining_text=remaining_text,\n allowed_suffixes=(\" \", \".\")):\n return []\n\n return [(option, len(remaining_text)) for option in options\n if option.startswith(remaining_text) and not option.startswith(\"_\")]" ]
[ "0.6981832", "0.64423245", "0.64423245", "0.6404376", "0.63695693", "0.62808853", "0.615837", "0.61527145", "0.60969144", "0.60806596", "0.6074747", "0.59950155", "0.59769976", "0.59458613", "0.59082", "0.5853147", "0.5850785", "0.58412063", "0.58367765", "0.58151263", "0.58029526", "0.57948583", "0.5792428", "0.5791611", "0.57795537", "0.575053", "0.57465565", "0.5732782", "0.57096493", "0.57060355", "0.56966597", "0.5679937", "0.563918", "0.5615509", "0.5599425", "0.55810773", "0.557214", "0.55665624", "0.5565737", "0.55615264", "0.5538559", "0.55243224", "0.552361", "0.5519529", "0.5518102", "0.55137795", "0.5486652", "0.5486258", "0.5484074", "0.5468984", "0.54495764", "0.5447661", "0.5433353", "0.54303634", "0.5424338", "0.5411812", "0.5406592", "0.5394407", "0.5391574", "0.53862125", "0.53735393", "0.53548867", "0.5352198", "0.5345724", "0.5333625", "0.53295", "0.53213644", "0.5318512", "0.53181034", "0.5290123", "0.526435", "0.5263117", "0.5259166", "0.5246472", "0.5245967", "0.5245039", "0.5238816", "0.52362823", "0.52285147", "0.5223594", "0.5216064", "0.5216059", "0.5197635", "0.51916355", "0.5189832", "0.51882505", "0.5170863", "0.5163809", "0.5163533", "0.51623505", "0.51605934", "0.5157795", "0.5157584", "0.5154029", "0.5144694", "0.5143628", "0.5143539", "0.5141602", "0.5133782", "0.5127406" ]
0.5867416
15
Note this initialization command will start spawning traffic and select the specified human demonstrators for imitation learning
def initialize_element(self): init_command = { "StartLearning": True, "AgentID": 1854 } msg = json.dumps(init_command).encode('unicode_escape') self.socket_control.send(msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def starting_tests(self):\n# disable menus during testing, because their message loop seems to interfere\n# with the natlink message loop which waits for recognitionMimic to\n# finish\n self.testing = 1", "def initMana():\n run(\"chariot-me -i\")", "def starting_tests(self):\n# disable menus during testing, because their message loop seems to interfere\n# with the natlink message loop which waits for recognitionMimic to\n# finish\n self.enable_menus(0)\n self.testing = 1\n self.parent.starting_tests()", "def main(argv):\n\n IP_ADDRESS = \"10.0.1.16\"\n\n robot = MistyRobot(IP_ADDRESS)\n\n print \"HELP: %s\\r\\n\" % pprint(robot.GetHelp())\n print \"DEVICE INFORMATION: %s\\r\\n\" % pprint(robot.GetDeviceInformation())\n print \"BATTERY LEVEL: %s\\r\\n\" % pprint(robot.GetBatteryLevel())\n print \"AUDIO CLIPS: %s\\r\\n\" % pprint(robot.GetListOfAudioClips())\n print \"AUDIO FILES: %s\\r\\n\" % pprint(robot.GetListOfAudioFiles())\n print \"VIDEO CLIPS: %s\\r\\n\" % pprint(robot.GetListOfVideoClips())\n\n print \"SENSORS: %s\\r\\n\" % pprint(robot.GetStringSensorValues())\n\n robot.LocomotionTrack(leftTrackSpeed=3, rightTrackSpeed=3)\n robot.Stop(delay=4)\n\n # This API call doesn't seem to work properly or consistently,\n # only moves head down, regardless of values\n #robot.MoveHead(pitch=-5, roll=0, yaw=0, velocity=4)\n #robot.MoveHead(pitch=5, roll=0, yaw=0, velocity=4, delay=3)\n\n # This API call doesn't seem to work\n robot.DriveTime(linearVelocity=3, angularVelocity=5, timeMS=5000, degrees=0)\n\n # This API call doesn't seem to work\n robot.Drive(linearVelocity=3, angularVelocity=5)\n robot.Stop(delay=4)\n\n robot.StartFaceTraining(faceId=\"person1\")\n robot.CancelFaceTraining(delay=5)\n\n print \"LEARNED FACES: %s\\r\\n\" % pprint(robot.GetLearnedFaces())\n\n robot.ClearLearnedFaces()\n\n print \"LEARNED FACES AFTER CLEAR: %s\\r\\n\" % pprint(robot.GetLearnedFaces())\n\n robot.SetMood(\"sad\")\n robot.SetMood(\"angry\", delay=3)\n robot.SetMood(\"groggy\", delay=3)\n robot.SetMood(\"confused\", delay=3)\n robot.SetMood(\"content\", delay=3)\n robot.SetMood(\"concerned\", delay=3)\n robot.SetMood(\"unamused\", delay=3)\n robot.SetMood(\"happy\", delay=3)\n robot.SetMood(\"love\", delay=3)", "def setUp(self):\n super().setUp()\n self.devices = _DEVICE_STRATEGY()\n command_line = [\"pool\", \"create\", self._POOLNAME] + self.devices\n RUNNER(command_line)", "def init(self, sevabot):\n self.sevabot = sevabot\n self.standard_xml = \"sevabot/alice/std-startup.xml\"\n\n self.commands = {\n \"!alice start\": self.start,\n \"!alice stop\" : self.stop\n }", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.setup_start_agents = False", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.5) # reduce update_delay to speed up simulation\n sim.run(n_trials=100) # press Esc or close pygame window to quit", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline= True ) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.5) # reduce update_delay to speed up simulation\n sim.run(n_trials=100) # press Esc or close pygame window to quit", "def discovery():\n launch_training_on_all_splits(experiment='discovery', splits=DISCOVERY_SPLIT, base_model='pretrained', dropout=0.7987, learning_rate=0.00009659)", "def setup_interactive(shared):\n parser = setup_args()\n parser.add_argument('--port', type=int, default=PORT, help='Port to listen on.')\n SHARED['opt'] = parser.parse_args(print_args=False)\n\n SHARED['opt']['task'] = 'parlai.agents.local_human.local_human:LocalHumanAgent'\n\n # Create model and assign it to the specified task\n agent = create_agent(SHARED.get('opt'), requireModelExists=True)\n SHARED['agent'] = agent\n # SHARED['world'] = create_task(SHARED.get('opt'), SHARED['agent'])\n\n # show args after loading model\n parser.opt = agent.opt\n parser.print_args()\n return agent.opt", "def setup(self):\n self.machine = Machine(['a', 'b', 'c', '_'])", "def train_main(cls):\n launcher = cls()\n launcher.launch()", "def __init__(self):\n super().__init__(sys.argv)\n self.s1 = serverControl()\n self.c1 = clientControl(\"Markus\")\n self.c2 = clientControl(\"Hannes\")", "def init_mission(agent_host, port=0, agent_type='Unknown', mission_type='Unknown', mission_seed=0,\n movement_type='Continuous'):\n\n # -- Set up the mission via XML definition --#\n mission_xml, msize, reward_goal, reward_intermediate, n_intermediate_rewards, reward_timeout, reward_sendcommand, timeout = GetMissionInstance(\n mission_type, mission_seed, agent_type)\n my_mission = MalmoPython.MissionSpec(mission_xml, True)\n my_mission.forceWorldReset()\n\n # -- Enforce the specific restriction for the assessed exercise --#\n # -- If you want a super agent, define one for you self --#\n my_mission.setModeToCreative()\n if agent_type.lower() == 'random':\n n = msize\n my_mission.observeGrid(-n, -1, -n, n, -1, n, 'grid')\n my_mission.requestVideoWithDepth(320, 240)\n elif agent_type.lower() == 'simple':\n n = msize\n my_mission.observeGrid(-n, -1, -n, n, -1, n, 'grid');\n my_mission.requestVideo(320, 240)\n elif agent_type.lower() == 'realistic':\n n = 1 # n=1 means local info only !\n my_mission.observeGrid(-n, -1, -n, n, -1, n, 'grid');\n my_mission.requestVideoWithDepth(320, 240)\n my_mission.observeFullInventory()\n elif agent_type.lower() == 'helper':\n n = 100\n my_mission.observeGrid(-n, -1, -n, n, -1, n, 'grid');\n my_mission.requestVideoWithDepth(320, 240)\n else:\n # -- Define a custom agent and add the sensors you need --#\n n = 100\n my_mission.observeGrid(-n, -1, -n, n, 1, n, 'grid');\n my_mission.requestVideoWithDepth(320, 240)\n\n # -- Add support for the specific movement type requested (and given the constraints of the assignment) --#\n # -- See e.g. http://microsoft.github.io/malmo/0.17.0/Schemas/MissionHandlers.html --#\n if movement_type.lower() == 'absolute':\n my_mission.allowAllAbsoluteMovementCommands()\n elif movement_type.lower() == 'continuous':\n my_mission.allowContinuousMovementCommand('move')\n my_mission.allowContinuousMovementCommand('strafe')\n my_mission.allowContinuousMovementCommand('pitch')\n my_mission.allowContinuousMovementCommand('turn')\n my_mission.allowContinuousMovementCommand('crouch')\n elif movement_type.lower() == 'discrete':\n my_mission.allowDiscreteMovementCommand('turn')\n my_mission.allowDiscreteMovementCommand('move')\n my_mission.allowDiscreteMovementCommand('movenorth')\n my_mission.allowDiscreteMovementCommand('moveeast')\n my_mission.allowDiscreteMovementCommand('movesouth')\n my_mission.allowDiscreteMovementCommand('movewest')\n my_mission.allowDiscreteMovementCommand('look')\n\n # -- Get the resulting xml (and return in order to check that conditions match the report) --#\n final_xml = my_mission.getAsXML(True)\n\n # Set up a recording for later inspection\n my_mission_record = MalmoPython.MissionRecordSpec('tmp' + \".tgz\")\n my_mission_record.recordRewards()\n my_mission_record.recordMP4(24, 400000)\n\n # -- Attempt to start a mission --#\n max_retries = 5\n for retry in range(max_retries):\n try:\n agent_host.startMission(my_mission, my_mission_record)\n break\n except RuntimeError as e:\n if retry == max_retries - 1:\n print(\"Error starting mission:\", e)\n exit(1)\n else:\n time.sleep(2)\n\n # -- Loop until mission starts: --#\n print(\"Waiting for the mission to start \")\n state_t = agent_host.getWorldState()\n while not state_t.has_mission_begun:\n sys.stdout.write(\".\")\n time.sleep(0.1)\n state_t = agent_host.getWorldState()\n for error in state_t.errors:\n print(\"Error:\", error.text)\n\n print\n print(\"Mission started (xml returned)... \")\n return final_xml, reward_goal, reward_intermediate, n_intermediate_rewards, reward_timeout, reward_sendcommand, timeout", "def __init__(self, agent_id=\"default\", experiment_id=\"default\"):\n self.runtime = runtime()\n self.agent_id = agent_id\n self.experiment_id = experiment_id", "def initialize_robot():\n\n proxy_motion = naoqi.ALProxy(\"ALMotion\", IP_ROBOT, PORT_ROBOT)\n proxy_motion.wakeUp()\n\n proxy_autonomous_life = naoqi.ALProxy(\"ALAutonomousLife\", IP_ROBOT, PORT_ROBOT)\n proxy_autonomous_life.setState(\"disabled\")\n\n proxy_motion = naoqi.ALProxy(\"ALMotion\", IP_ROBOT, PORT_ROBOT)\n proxy_motion.wakeUp()", "def __init__(self, machine):\n super().__init__(machine)\n self.features['has_steppers'] = True", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.00000001, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.001, display=True) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line", "def __init__(self, numOfGames, muteOutput, randomAI, AIforHuman):\n self.numOfGames = numOfGames\n self.muteOutput = muteOutput\n self.maxTimeOut = 10000\n\n self.AIforHuman = AIforHuman\n self.gameRules = GameRules()\n self.AIPlayer = TicTacToeAgent()\n\n if randomAI:\n self.AIPlayer = randomAgent()\n else:\n self.AIPlayer = TicTacToeAgent()\n if AIforHuman:\n self.HumanAgent = randomAgent()\n else:\n self.HumanAgent = TicTacToeAgent1()", "def main():\n print(\"runner\")\n runner = Runner()\n stop_on_idle = True\n probes = []\n for url in urls:\n probe_cls = random.choice((HttpProbe, ThreadProbe, ShellProbe))\n runner.probes.append(probe_cls(url))\n\n runner.run()", "def setUp(self):\n lang = self._sim_lang\n self._simulator = self._find_resource(\n f\"drake/examples/hardware_sim/hardware_sim_{lang}\")\n self._example_scenarios = self._find_resource(\n \"drake/examples/hardware_sim/example_scenarios.yaml\")\n self._test_scenarios = self._find_resource(\n \"drake/examples/hardware_sim/test/test_scenarios.yaml\")\n self._default_extra = {\n # For our smoke test, exit fairly quickly.\n \"simulation_duration\": 0.0625,\n }", "def __init__(self, numOfGames, muteOutput, randomAI, AIforHuman):\n self.numOfGames = numOfGames\n self.muteOutput = muteOutput\n self.maxTimeOut = 30 \n\n self.AIforHuman = AIforHuman\n self.gameRules = GameRules()\n self.AIPlayer = TicTacToeAgent()\n\n if randomAI:\n self.AIPlayer = randomAgent()\n else:\n self.AIPlayer = TicTacToeAgent()\n if AIforHuman:\n self.HumanAgent = randomAgent()\n else:\n self.HumanAgent = keyboardAgent()", "def run(num_trials):\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.1, display=True) \n # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=num_trials) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n a.performace_report(num_trials)", "def multi_agent_example():\n env = holodeck.make(\"CyberPunkCity-FollowSight\")\n\n cmd0 = np.array([0, 0, -2, 10])\n cmd1 = np.array([0, 0, 0])\n for i in range(10):\n env.reset()\n env.tick()\n env.act(\"uav0\", cmd0)\n env.act(\"nav0\", cmd1)\n for _ in range(1000):\n states = env.tick()\n pixels = states[\"uav0\"][\"RGBCamera\"]", "def experiment_init(self):\n pass", "def __init__(self, network: Network):\n if LOG[\"ExperimentAI\"]:\n print(\"[ExperimentAI] Initializing AI\")\n self.network = network", "def init():\n\n @click.group(cls=cli.make_commands(__name__))\n def run():\n \"\"\"Cross-cell supervision tools.\"\"\"\n cli.init_logger('daemon.conf')\n\n return run", "def main():\n\n\t# Run setup\n\ts = Setup()\n\tcontrolRoom, studio, newscaster = s.beginSetup()\n\n\t# Start cameras\n\tcontrolRoom.startCameras()\n\tprint 'Cameras started'\n\tcontrolRoom.setCameraSize()\n\n\tif len(controlRoom.studio.cameras) > 1:\n\t\tprint 'Everything up and running...'\n\n\t\t# Fetch a camera that best matches the headpose angle\n\t\tcamera = controlRoom.getClosestCamera()\n\t\twhile True:\n\t\t\t# If advance camera selection algo indicates true, fetch camera closest to headpose\n\t\t\tif controlRoom.cameraSelectionADV():\n\t\t\t\tcamera = controlRoom.getClosestCamera()\n\t\t\tprint 'Active camera: ' + str(camera.cameraID)\n\t\t\t\n\t\t\t# Capture frame or in simulation mode, light up led\n\t\t\tcamera.capture()\n\n\telif len(controlRoom.studio.cameras) == 1:\n\t\twhile True:\n\t\t\tcontrolRoom.studio.cameras[0].capture()\n\t\t\ttime.sleep(2)\n\telse:\n\t\tprint 'No cameras found! Something seems to be wrong...'\n\n\t# Shutdown all cameras and kill all windows\n\tcontrolRoom.shutdownCameras()", "def parley(self):\n wait_times = constants.TUTORIAL_WAIT_TIMES\n self.introduce_chat_interface()\n self.wait_for_response(\n message='Please type a greeting message to continue.',\n delay_time=wait_times['chat-interface'],\n )\n self.introduce_persona()\n self.wait_for_response(\n message=constants.APPRENTICE_PERSONA_ROLE_INSTRUCTION,\n delay_time=wait_times['persona'],\n )\n self.introduce_partner_entity()\n self.wait_for_response(\n message=constants.APPRENTICE_CHITCHAT_INSTRUCTION,\n delay_time=wait_times['persona'],\n )\n self.introduce_partner_knowledge()\n self.wait_for_response(\n message=constants.APPRENTICE_PERSONA_MSG_INSTRUCTION,\n delay_time=wait_times['knowledge'],\n )\n self.go_for_start()\n self.episodeDone = True", "def agent_init(self):\n pass", "def main(_):\n description = xm.ExperimentDescription(\n 'HIS - trial=%d' % FLAGS.trial, tags=['his'])\n experiment = build_experiment()\n xm.launch_experiment(description, experiment)", "def initialize_home_hub(argv):\n parse_cmd_line_opts(argv)\n init_logging()\n init_error_reporting()\n \n # Verify we have a valid home id\n if HOME_ID is None:\n print('Home ID is invalid or missing. Please provide an integer following the -i flag')\n exit()\n\n # Begin Home Hub Specific Setup\n logger.info('Starting the Home Hub main program for Home: %s', HOME_ID)\n\n # Get the email and password for this HH's user from the env vars\n powernet_user_email = os.getenv('POWERNET_USER_EMAIL', None)\n powernet_user_password = os.getenv('POWERNET_USER_PASSWORD', None)\n \n if powernet_user_email is None:\n logger.info('Missing the required login email address')\n logger.info('Please set the POWERNET_USER_EMAIL environment variable and try again')\n exit()\n \n if powernet_user_password is None:\n logger.info('Missing the required login password')\n logger.info('Please set the POWERNET_USER_PASSWORD environment variable and try again')\n exit()\n \n # attempt to authenticate against our API\n form_payload = {'email': powernet_user_email, 'password': powernet_user_password}\n response = requests.post('https://pwrnet-158117.appspot.com/api/v1/powernet_user/auth/', data=form_payload)\n auth_token = response.json()['token']\n\n # Initializing variables for queue and threads\n rpi = HardwareInterface(house_id=HOME_ID, gpio_map=None, auth_token=auth_token)\n buffer_size = 8\n q_ai = Queue(buffer_size)\n\n # Initialize threads\n producer_ai_thread = Thread(name='Producer', target=rpi.producer_ai, args=(q_ai,))\n producer_ai_thread.start()\n\n consumer_ai_thread = Thread(name='Consumer', target=rpi.consumer_ai, args=(q_ai,))\n consumer_ai_thread.start()\n\n devices_thread = Thread(name='Device', target=rpi.devices_th)\n devices_thread.start()\n\n load_control_thread = Thread(name=\"LoadControl\", target=rpi.local_controller_th)\n load_control_thread.start()", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.start_agents_once = False\n self.start_servers_once = False\n self.setup_start_agents = False\n self.setup_start_servers = False", "def __init__(self, a):\n # sanity check\n assert(isinstance(a, agent.Agent))\n \n # save the agent reference\n self.a = a\n \n self.a.tc_h1(\"CONNECT TESTS\")\n \n # create the host instance\n self.host = host.Host(a, STA)\n # create the access point instances\n self.ap1 = accesspoint.AccessPoint(a, AP1, channel=5, period=100, ssid=\"louis\")\n self.ap2 = accesspoint.AccessPoint(a, AP2, channel=11, period=100, ssid=\"louis\")\n\n # reset the host (resetting the MIBs)\n self.host.reset(True)\n\n # set the host MAC address\n self.host.dbg_macaddr()", "def start(self):\n self.__init__()\n self.set_n_players()\n self.init_players()\n self.init_territory_selection_phase()\n self.init_troop_deployment_phase()\n # self.game_phase()", "def main():\n utils.vip_main(actuator_agent, identity='platform.d.actuator')", "def onSpawn(self):\n self.spawned = True\n self._interactor.initialiseDevices()", "def _starting_up():\n global bus, skill_manager, event_scheduler\n\n bus.on('intent_failure', FallbackSkill.make_intent_failure_handler(bus))\n\n # Create the Intent manager, which converts utterances to intents\n # This is the heart of the voice invoked skill system\n service = IntentService(bus)\n try:\n PadatiousService(bus, service)\n except Exception as e:\n LOG.exception('Failed to create padatious handlers '\n '({})'.format(repr(e)))\n event_scheduler = EventScheduler(bus)\n\n # Create a thread that monitors the loaded skills, looking for updates\n try:\n skill_manager = SkillManager(bus)\n except MsmException:\n # skill manager couldn't be created, wait for network connection and\n # retry\n LOG.info('Msm is uninitialized and requires network connection',\n 'to fetch skill information\\n'\n 'Waiting for network connection...')\n while not connected():\n time.sleep(30)\n skill_manager = SkillManager(bus)\n\n skill_manager.daemon = True\n # Wait until priority skills have been loaded before checking\n # network connection\n skill_manager.load_priority()\n skill_manager.start()\n check_connection()", "def main():\n\n run_manual_session()\n # run_automated_session()", "def __init__(self, scenario):\n client.__init__(self, scenario)\n # TODO: Your initialization, if any (not likely). Oh, and remove the next line.\n raise Exception( \"DO NOT instantiate the skeleton implementation\" )", "def setup(client):\n client.add_cog(ProcessDisplay(client))", "def main():\n driver = Driver()\n driver.start()", "def launch(**kwargs):\n\n logger, loghost, logport, clients, guis, params = unpack_launcher(**kwargs)\n config = load_config(kwargs['config'], logger=logger)\n\n\n ao_client = find_client(logger, clients, 'nidaqmx')\n ai_client = find_client(logger, clients, 'nidaqmx_ai')\n\n # Instantiate Monitor script\n laser_stabilizer = LaserStabilizer(\n config=kwargs['config'],\n ao_client=ao_client,\n ai_client=ai_client\n )\n\n update_service = Service()\n update_service.assign_module(module=laser_stabilizer)\n update_service.assign_logger(logger=logger)\n update_server, update_port = create_server(update_service, logger, host=get_ip())\n logger.update_data(data={'port': update_port})\n laser_stabilizer.gui.set_network_info(port=update_port)\n update_server.start()\n\n # Run continuously\n # Note that the actual operation inside run() can be paused using the update server\n while True:\n\n laser_stabilizer.run()", "def setUp(self):\n _, instance_path, shared_inputs = sys.argv\n app = lnt.server.ui.app.App.create_standalone(instance_path)\n app.testing = True\n self.client = app.test_client()\n self.shared_inputs = shared_inputs", "def main():\n tester = Tester()\n # parse args, load configuration and create all required objects.\n tester.setup_experiment()\n # GO!\n tester.run_experiment()", "def minimal_interactive_cli_bootstrap(client):\n # Fetch available TAN mechanisms by the bank, if we don't know it already. If the client was created with cached data,\n # the function is already set.\n if not client.get_current_tan_mechanism():\n client.fetch_tan_mechanisms()\n mechanisms = list(client.get_tan_mechanisms().items())\n if len(mechanisms) > 1:\n print(\"Multiple tan mechanisms available. Which one do you prefer?\")\n for i, m in enumerate(mechanisms):\n print(i, \"Function {p.security_function}: {p.name}\".format(p=m[1]))\n choice = input(\"Choice: \").strip()\n client.set_tan_mechanism(mechanisms[int(choice)][0])\n\n if client.is_tan_media_required() and not client.selected_tan_medium:\n print(\"We need the name of the TAN medium, let's fetch them from the bank\")\n m = client.get_tan_media()\n if len(m[1]) == 1:\n client.set_tan_medium(m[1][0])\n else:\n print(\"Multiple tan media available. Which one do you prefer?\")\n for i, mm in enumerate(m[1]):\n print(i,\n \"Medium {p.tan_medium_name}: Phone no. {p.mobile_number_masked}, Last used {p.last_use}\".format(\n p=mm))\n choice = input(\"Choice: \").strip()\n client.set_tan_medium(m[1][int(choice)])", "async def start(self, roles, dialogs):\n self.__init__(self.active_role, (self.active_help, self.external_help))", "def init():\r\n\t# add grabber tools based on proxy tools\r\n\tfor proxyWrapper in vizconnect.getToolsWithMode('Proxy'):\r\n\t\tgrabberTool = tools.grabber.HandGrabber(usingPhysics=False,\r\n\t\t\t\t\t\t\t\t\t\t\t\tusingSprings=False,\r\n\t\t\t\t\t\t\t\t\t\t\t\tplacementMode=tools.placer.MODE_DROP_DOWN)\r\n\t\t\r\n\t\tname = 'grabber_tool_based_on_'+proxyWrapper.getName()\r\n\t\tgrabberWrapper = vizconnect.addTool(raw=grabberTool,\r\n\t\t\t\t\t\t\t\t\t\t\tname=name,\r\n\t\t\t\t\t\t\t\t\t\t\tmake='Virtual',\r\n\t\t\t\t\t\t\t\t\t\t\tmodel='Grabber')\r\n\t\t# parent the grabber wrapper to the proxy's parent\r\n\t\tgrabberWrapper.setParent(proxyWrapper)\r\n\t\t\r\n\t\tgrabberTool.setItems(grabbableItems)\r\n\t\r\n\tviz.callback(viz.getEventID('RESET_THE_LOFT_LAYOUT'), lambda e: resetMovedObjects())", "def main():\n args = _parse_arguments()\n\n util.log_init(\"sitl_A%s_%s.txt\" % (args.id, util.get_latest_log(\"latest_sitl.txt\")), util.log_level[args.level])\n\n shared.AGENT_ID = 'A%s' % args.id\n shared.AGENT_COUNT = args.n\n shared.CURRENT_ALGORITHM = args.algorithm\n shared.AGENT_CHARACTER = args.character\n shared.des_alt = args.alt\n \n util.log_info(\"AGENT_ID = %s\" % shared.AGENT_ID)\n util.log_info(\"Algorithm: %s\" % shared.CURRENT_ALGORITHM)\n util.log_info(\"Agent type: %s\" % shared.AGENT_CHARACTER)\n\n print \"Start simulator (SITL)\"\n sitl = SITL(args.pix) # initialize SITL with firmware path\n\n if shared.AGENT_ID in start_loc:\n sitl_args = ['--home=%s' % start_loc[shared.AGENT_ID]]\n else:\n sitl_args = ['--home=%s' % start_loc['FFF']]\n \n # Pre-recorded coordinates.\n #sitl_args = ['-I0', '--model', 'quad', '--home=31.301201,121.498192,9,353']\t\n sitl.launch(sitl_args, await_ready=True, restart=True)\n\n # Connect to the vehicle. (Spawn an instance of Vehicle named \"vehicle\")\n # connection port is coded in the file name of the firmware like \"ac3.4.5_port5760\"\n # use regular expression to search the string and extract port number\n port = re.search(r'port\\d{4}', args.pix)\n port = re.search(r'\\d{4}', port.group()).group()\n\n print \"Connecting to copter on: TCP: 127.0.0.1:%s\" % port\n copter = nav.connect('tcp:127.0.0.1:%s' % port, wait_ready=True, rate=20)\n util.log_info(\"Copter connected. Firmware: %s\" % copter.version)\n \n if not args.xbee: # simulate XBee using ZeroMQ\n [pub, sub] = comm.zmq_init(comm_port_list[shared.AGENT_ID], comm_port_list)\n subscriber_thread = comm.Subscriber(shared.AGENT_ID, sub)\n subscriber_thread.start()\n xbee = pub # make xbee the publisher\n util.log_info(\"ZeroMQ initialzied.\") \n \n else: # use actual xbee ports\n ser = serial.Serial(args.xbee, 57600)\n xbee = comm.xbee_init(ser)\n util.log_info(\"Xbee initialzed.\")\n\n info = \"IFO,%s connected with firmware %s\" % (shared.AGENT_ID, copter.version)\n comm.xbee_broadcast(xbee, info)\n\n _add_listeners(copter)\n\n takeoff_thread = nav.Takeoff(copter, xbee, shared.des_alt, 3)\n purge_thread = comm.Purge(shared.neighbors)\n broadcast_thread = comm.Broadcast(shared.AGENT_ID, copter, xbee)\n flocking_thread = _choose_algorithm(copter, xbee, shared.neighbors)\n\n takeoff_thread.start()\n takeoff_thread.join() # wait until takeoff procedure completed\n\n if shared.status['airborne']: # only execute the threads when airborne\n util.log_info(\"Copter is airborne, starting threads.\")\n broadcast_thread.start()\n purge_thread.start()\n flocking_thread.start()\n\n # main loop\n while True:\n try: time.sleep(.2)\n except KeyboardInterrupt: break\n \n if shared.status['airborne']:\n # echo exiting status\n if shared.status['exiting']:\n info = \"IFO,%s %s-ing.\" % (shared.AGENT_ID,shared.status['command'])\n comm.xbee_broadcast(xbee, info)\n util.log_info(info)\n\n # if an rtl or land command is received, kill flocking and set the `exiting` flag\n elif shared.status['command'] == 'RTL' or shared.status['command'] == 'LAND':\n shared.status['thread_flag'] |= shared.FLOCKING_FLAG\n nav.set_mode(copter, shared.status['command'])\n shared.status['exiting'] = True\n\n if not flocking_thread.is_alive(): # break the loop if finished\n break\n\n nav.wait_for_disarm(copter) # wait for disarm\n comm.xbee_broadcast(xbee, 'IFO,%s terminated.' % shared.AGENT_ID)\n\n # clean up\n purge_thread.stop()\n while purge_thread.is_alive(): \n util.log_info('Waiting for purge to shutdown') \n purge_thread.join(3)\n util.log_info('Purge killed.')\n\n broadcast_thread.stop()\n while broadcast_thread.is_alive(): \n util.log_info('Waiting for broadcast to shutdown') \n broadcast_thread.join(3)\n util.log_info('Broadcast killed.')\n\n copter.close()\n util.log_info(\"Copter shutdown.\")\n\n if args.xbee:\n xbee.halt()\n ser.close()\n util.log_info(\"Xbee and serial closed.\")\n else:\n subscriber_thread.stop()\n while subscriber_thread.is_alive(): \n util.log_info('Waiting for Subscriber to shutdown') \n subscriber_thread.join(3)\n util.log_info('Subscriber killed.')\n\n sitl.stop()\n util.log_info(\"SITL shutdown.\")", "def initialize(self):\n if self.real:\n self.agent.connect(self)\n else:\n self.connect() # Connect python client to VREP\n self.agent.connect(self)", "def setup(cls):\n cls.runner = CliRunner()\n cls.agent_name = \"myagent\"\n cls.cwd = os.getcwd()\n cls.t = tempfile.mkdtemp()\n # copy the 'packages' directory in the parent of the agent folder.\n shutil.copytree(Path(CUR_PATH, \"..\", \"packages\"), Path(cls.t, \"packages\"))\n\n os.chdir(cls.t)\n result = cls.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"init\", \"--author\", AUTHOR],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n result = cls.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"create\", \"--local\", cls.agent_name],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n os.chdir(cls.agent_name)\n # add connection first time", "def setup(bot):\n bot.add_cog(MyAnimeList())", "def launch_devices(self):\n self.data[0], temp = alghoritm.temperature(self.data[0], self.set_thermostat, 0) # get value\n HC35_3S.launch(self.data_path, self.samples_size, temp) # set it via device\n\n self.data[1], humidi = alghoritm.humidity(self.data[1], self.set_humidifier, 0)\n humidifier.launch(self.data_path, self.samples_size, humidi)\n\n self.data[2], moistu = alghoritm.moisture(self.data[2], self.set_sprinklers, 0)\n HUNTER.launch(self.data_path, self.samples_size, moistu)\n\n self.data[3], o2 = alghoritm.o2(self.data[3], self.set_ventilation, 0)\n ventilation.launch_o2(self.data_path, self.samples_size, o2)\n\n self.data[4], co2 = alghoritm.co2(self.data[4], self.set_ventilation, 0)\n ventilation.launch_co2(self.data_path, self.samples_size, co2)", "def parley(self):\n wait_times = constants.TUTORIAL_WAIT_TIMES\n self.introduce_chat_interface()\n self.wait_for_response(\n message='Please type a greeting message to continue.',\n delay_time=wait_times['chat-interface'],\n )\n self.introduce_knowledgeable_entity()\n self.wait_for_response(\n message=constants.ONBOARDING_ACKNOWLEDGE_UNDERSTOOD,\n delay_time=wait_times['chat-interface'],\n )\n self.introduce_search()\n self.wait_for_response(\n message=constants.ONBOARDING_ACKNOWLEDGE_UNDERSTOOD,\n delay_time=wait_times['knowledge'],\n )\n self.try_search()\n self.wait_for_response_with_search()\n self.introduce_persona()\n self.wait_for_response_with_search()\n self.go_for_start()\n self.episodeDone = True", "def start_run(self, context: RobotRunnerContext) -> None:\n rospy.init_node(\"robot_runner\", disable_signals=True)\n self.ina219_profiler = INA219ProfilerClient()\n self.cpu_mem_profiler = ResourceProfilerClient()", "def setup(bot: Bot) -> None:\n bot.add_cog(Latency(bot))", "def setUp(self):\n # Direct connection used to match the property values\n self.sockobj = socket(AF_INET, SOCK_STREAM)\n self.sockobj.settimeout(socket_timeout)\n # Connect to the selected server\n self.sockobj.connect(server) \n self.pyclient = PySimpleClient()\n self.cmd_num = 0\n for servo_type in app_nr.values():\n self.__dict__[servo_type] = self.pyclient.getComponent(\"MINORSERVO/\" + servo_type)", "def load_scenario(self):\n logger.info(\"Loading scenario...\")\n for manager in self.config[\"fleets\"]:\n name = manager[\"name\"]\n password = manager[\"password\"] if \"password\" in manager else faker_factory.password()\n fleet_type = manager[\"fleet_type\"]\n strategy = manager.get(\"strategy\")\n icon = manager.get(\"icon\")\n agent = self.create_fleetmanager_agent(name, password, fleet_type=fleet_type, strategy=strategy)\n\n self.set_icon(agent, icon, default=fleet_type)\n\n while len(self.manager_agents) < self.config.num_managers:\n time.sleep(0.1)\n\n for transport in self.config[\"transports\"]:\n name = transport[\"name\"]\n password = transport[\"password\"] if \"password\" in transport else faker_factory.password()\n position = transport[\"position\"]\n fleetmanager = transport[\"fleet\"]\n fleet_type = transport[\"fleet_type\"]\n speed = transport.get(\"speed\")\n trust = transport.get(\"trust\")\n rates = transport.get(\"rates\")\n velocity_factor = transport.get(\"velocity_factor\")\n fuel = transport.get(\"fuel\")\n autonomy = transport.get(\"autonomy\")\n current_autonomy = transport.get(\"current_autonomy\")\n strategy = transport.get(\"strategy\")\n icon = transport.get(\"icon\")\n agent = self.create_transport_agent(name, password, position=position, speed=speed, fleet_type=fleet_type,\n fleetmanager=fleetmanager, strategy=strategy, autonomy=autonomy,\n current_autonomy=current_autonomy, trust=trust, rates=rates, velocity_factor=velocity_factor)\n\n if icon:\n self.set_icon(agent, icon, default=fleet_type)\n\n for customer in self.config[\"customers\"]:\n name = customer[\"name\"]\n password = customer[\"password\"] if \"password\" in customer else faker_factory.password()\n fleet_type = customer[\"fleet_type\"]\n position = customer[\"position\"]\n target = customer[\"destination\"]\n strategy = customer.get(\"strategy\")\n icon = customer.get(\"icon\")\n agent = self.create_customer_agent(name, password, fleet_type, position=position, target=target,\n strategy=strategy)\n\n self.set_icon(agent, icon, default=\"customer\")\n\n for station in self.config[\"stations\"]:\n password = station[\"password\"] if \"password\" in station else faker_factory.password()\n strategy = station.get(\"strategy\")\n icon = station.get(\"icon\")\n agent = self.create_station_agent(station[\"name\"], password, position=station[\"position\"],\n power=station[\"power\"], places=station[\"places\"], strategy=strategy)\n self.set_icon(agent, icon, default=\"electric_station\")", "def initialize(self):\n\n # --------- BEGIN YOUR CODE ----------\n\n # This is exactly the same as Human.initialize, just copy the code over\n\n # --------- END YOUR CODE ----------\n pass", "def __init__(self, configs, simulator, wait_time=3):\n self.configs = configs\n self.sim = simulator.sim\n self.gripper = VREP_Gripper()\n self.open()", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Always start the servers for each test variant\n self.start_agents_once = False\n self.start_servers_once = False\n\n # Whether to skip tearDown\n self.skip_teardown = False", "def run(): \n learning_rate = 0.42\n discount_rate = 0.15\n initial_q_hat = 4\n \n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent, learning_rate, discount_rate, initial_q_hat) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n print \"Failed trials: \"\n print a.get_failed_trials()\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line", "def setUp(self):\n bed_patient = PatientsGenerator(0, 1, 0, 'a')\n self.admitgen = AdmissionsGenerator(bed_patient, [-1])", "def initialize_ai(self):\n\n self.gid, self.genome = constants.genomes_to_run[self.identifier]\n self.genome.fitness = -1\n self.net = neat.nn.FeedForwardNetwork.create(self.genome, constants.conf)\n # self.net = neat.nn.RecurrentNetwork\n # .create(self.genome, constants.conf)", "def test_init_experiment(self):\n optimizer = \"RandomSearch\"\n name = \"test_init_experiment\"\n param_defs = {\n \"x\": MinMaxNumericParamDef(0, 1),\n \"name\": NominalParamDef([\"A\", \"B\", \"C\"])\n }\n minimization = True\n\n LAss = PrettyLabAssistant()\n LAss.init_experiment(name, optimizer, param_defs, minimization=minimization)\n\n exp_ass = LAss.exp_assistants[name]\n\n assert_equal(exp_ass.optimizer, optimizer)\n assert_is_none(exp_ass.optimizer_arguments, None)\n assert_equal(exp_ass.experiment.minimization_problem, minimization)\n with assert_raises(ValueError):\n LAss.init_experiment(name, optimizer, param_defs, minimization=minimization)", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=False) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0000001, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n print 'alpha, gamma:', a.alpha, a.gamma\n print 'penalties:', a.total_penalties\n print 'total rewards:', a.total_rewards", "def test_create_tang_1(self):\n command_line = (\n self._MENU\n + [self._POOLNAME]\n + self._DEVICES\n + [\"--clevis=tang\", \"--trust-url\", \"--tang-url=http\"]\n )\n TEST_RUNNER(command_line)", "def _init_agents(self):\n self.agents = [Agent(e=0.1, a=0.1, row=self.row, col=self.col) for i in range(2)]", "def __init__(self,\n environment_spec: specs.EnvironmentSpec,\n ):\n # Create the actor\n actor = delta_hedge_actor.DeltaHedgeActor(environment_spec.actions)\n learner = fake_learner.FakeLeaner()\n\n super().__init__(\n actor=actor,\n learner=learner,\n min_observations=100,\n observations_per_step=1e9)", "def initialize(self):\n self.ha_url = self.args.get(\"ha_url\", None)\n self.use_current_brightness = self.args.get(\"use_current_brightness\", False)\n self.condition = self.args.get(\"condition\")\n self.lights = self.args[\"lights\"]\n self.listen_state(self.change_lights_color, self.args[\"media_player\"], attribute = self.args.get(\"photo_attribute\", \"entity_picture\"))", "def main():\n import optparse\n import sys\n parser = optparse.OptionParser()\n parser.add_option('-c', '--controller', default='NewController',\n help='Controller class to instantiate.')\n parser.add_option('-o', '--outputs', type='int', default=5,\n help='Number of outputs to use on the hardware.')\n options, _arguments = parser.parse_args()\n try:\n Demo(options.controller, options.outputs)\n except controller.ConnectionError:\n sys.exit('ABORT: Could not find a suitable device.')", "def __init__(self, machine):\n super().__init__(machine)\n\n # Set default platform features. Each platform interface can change\n # these to notify the framework of the specific features it supports.\n self.features['has_drivers'] = True\n self.features['max_pulse'] = 255", "def __init__(self, machine):\n super().__init__(machine)\n\n # Set default platform features. Each platform interface can change\n # these to notify the framework of the specific features it supports.\n self.features['has_drivers'] = True\n self.features['max_pulse'] = 255", "def editor_multi_agent_example():\n agent_definitions = [\n AgentDefinition(\"uav0\", agents.UavAgent, [sensors.RGBCamera, sensors.LocationSensor]),\n AgentDefinition(\"uav1\", agents.UavAgent, [sensors.LocationSensor, sensors.VelocitySensor])\n ]\n env = HolodeckEnvironment(agent_definitions, start_world=False)\n\n cmd0 = np.array([0, 0, -2, 10])\n cmd1 = np.array([0, 0, 5, 10])\n\n for i in range(10):\n env.reset()\n env.act(\"uav0\", cmd0)\n env.act(\"uav1\", cmd1)\n for _ in range(1000):\n states = env.tick()", "def setUp(self):\n # Start the servers and agents\n super().setUp()\n\n # Get the parameters\n self.ior_flags = self.params.get(\n \"ior_flags\", \"/run/ior/*\")\n self.test_file = self.ior_cmd.test_file.value", "def test_defaults():\n model = torch.nn.Module()\n dataset = torch.utils.data.Dataset()\n dataloader = torch.utils.data.DataLoader(dataset)\n loaders = OrderedDict()\n loaders[\"train\"] = dataloader\n\n test_callbacks = OrderedDict(\n [\n (\"_timer\", TimerCallback),\n (\"_metrics\", MetricManagerCallback),\n (\"_validation\", ValidationManagerCallback),\n (\"_saver\", CheckpointCallback),\n (\"_console\", ConsoleLogger),\n (\"_tensorboard\", TensorboardLogger),\n (\"_exception\", ExceptionCallback),\n ]\n )\n\n exp = SupervisedExperiment(model=model, loaders=loaders)\n _test_callbacks(test_callbacks, exp)", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create learning agent\n # a = e.create_agent(RandomAgent) # create random agent\n e.set_primary_agent(a, enforce_deadline=True) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.01)\n # reduce update_delay to speed up simulation\n sys.stdout = open(\"./output.txt\", \"w\")\n tic = time()\n sim.run(n_trials=100) # press Esc or close pygame window to quit\n toc = time()\n sys.stdout = sys.__stdout__\n\n print \"Totoal time used: {}.\".format(toc - tic)\n parse(\"./output.txt\")", "def test_mcts_agent(self):\n logging.info(\"Starting test_mcts_agent\")\n dirname = os.path.dirname(__file__)\n filename = os.path.join(dirname, \"../configs/factory_floor_simple.yaml\")\n parameters = getParameters(filename)\n env = FactoryFloor(parameters)\n obs = env.reset()\n\n mctsAgents = []\n\n randomagent = 'aiagents.single.RandomAgent.RandomAgent'\n for robotId in env.action_space.spaces.keys():\n mctsparams = {'treeAgent':{'class': randomagent, 'id':robotId, 'parameters':{} },\n 'rolloutAgent':{'class': randomagent, 'id':robotId, 'parameters':{} }} \n mctsparams['simulator'] = dict(parameters)\n mctsparams['simulator']['fullname'] = \"aienvs.FactoryFloor.FactoryFloor.FactoryFloor\"\n \n mctsAgents.append(MctsAgent(robotId, env.action_space, env.observation_space , mctsparams))\n\n complexAgent = BasicComplexAgent(mctsAgents, env.action_space, env.observation_space)\n\n episode = Episode(complexAgent, env, obs, render=True)\n episode.run()", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--identifier\", required=True,\n help=\"A short name/identifier for your experiment, e.g. 'ex42b'.\")\n args = parser.parse_args()\n\n train(args)", "def _initialize_runners_startup(self):\n if self.command_group.is_cmd0_runner():\n self._initialize_runner(self.command_group.cmd0)\n if self.command_group.is_cmd1_runner():\n self._initialize_runner(self.command_group.cmd1)\n if self.command_group.is_cmd2_runner():\n self._initialize_runner(self.command_group.cmd2)", "def setUp(self):\n self.anime_link = \"https://animepertutti.com/sword-art-online-alicization-war-of-underworld-sub-ita-streaming-download-z\" #noqa", "def start_with_console():\n print_welcome()\n option = input(\"Choose a number [1/2/3]: \")\n cexc.check_start_exceptions(option)\n if option == \"1\":\n picture = create_white_picture_with_inputs()\n elif option == \"2\":\n picture = load_picture_with_inputs()\n elif option == \"3\":\n picture = create_probability_picture_with_inputs()\n steps = get_steps(input(\"Give a number of steps to do (max=30000): \"))\n print_big_number_announcement(steps)\n Simulator(steps, picture).simulate()", "def setup_class(cls):\n super(TestCliTransferFetchAINetwork, cls).setup_class()\n cls.agent_name2 = \"agent_\" + \"\".join(\n random.choices(string.ascii_lowercase, k=5) # nosec\n )\n cls.create_agents(cls.agent_name2)\n\n cls.gen_key(cls.agent_name)\n cls.gen_key(cls.agent_name2)", "def setUp(self):\n self.s = Simulation()\n self.s['Retina']=GeneratorSheet(nominal_density=4.0)\n self.s['V1']= CFSheet(nominal_density=4.0)\n self.s['V2'] = CFSheet(nominal_density=4.0)\n\n self.s.connect('Retina','V1',delay=0.5,connection_type=CFProjection,\n name='RtoV1',learning_fn=CFPLF_Hebbian())\n\n self.s.connect('Retina','V2',delay=0.5,connection_type=CFProjection,\n name='RtoV2',learning_fn=CFPLF_Hebbian())", "def setUp(self):\n self.delegate = AlwaysHitDelegate(\"\")\n self.environment = BattleEnvironment()", "def setup(bot: Bot) -> None:\n bot.add_cog(VoiceGate(bot))", "def pre_launch(mission):\n started_since = mission.ut() - mission.current_step[\"start_ut\"]\n if started_since > 5:\n mission.next()\n elif mission.current_step[\"first_call\"]:\n vessel = mission.conn.space_center.active_vessel\n ap = vessel.auto_pilot\n\n ap.engage()\n ap.target_pitch_and_heading(90, 90)\n vessel.control.throttle = 1\n vessel.control.sas = False\n vessel.control.rcs = mission.parameters.get('use_rcs', False)", "async def setup(bot):\n await bot.add_cog(People(bot))", "def start(self):\n # iPhone\n #driver = webdriver.Remote(browser_name=\"iphone\", command_executor='http://172.24.101.36:3001/hub')\n # Android\n #driver = webdriver.Remote(browser_name=\"android\", command_executor='http://127.0.0.1:8080/hub')\n # Google Chrome \n #driver = webdriver.Chrome()\n # Firefox \n #FirefoxProfile fp = new FirefoxProfile();\n #fp.setPreference(\"webdriver.load.strategy\", \"unstable\");\n #WebDriver driver = new FirefoxDriver(fp);\n \n #driver = webdriver.Firefox(firefox_profile=self.disableImages())\n driver = webdriver.Firefox()\n \n self.driver = driver", "def start(self):\n # iPhone\n #driver = webdriver.Remote(browser_name=\"iphone\", command_executor='http://172.24.101.36:3001/hub')\n # Android\n #driver = webdriver.Remote(browser_name=\"android\", command_executor='http://127.0.0.1:8080/hub')\n # Google Chrome \n #driver = webdriver.Chrome()\n # Firefox \n #FirefoxProfile fp = new FirefoxProfile();\n #fp.setPreference(\"webdriver.load.strategy\", \"unstable\");\n #WebDriver driver = new FirefoxDriver(fp);\n \n #driver = webdriver.Firefox(firefox_profile=self.disableImages())\n driver = webdriver.Firefox()\n \n self.driver = driver", "def setUp(self):\n os.environ[\"PADDLE_TRAINERS_NUM\"] = \"2\"\n os.environ[\n \"PADDLE_PSERVERS_IP_PORT_LIST\"\n ] = \"127.0.0.1:36001,127.0.0.2:36001\"", "def initialise_sim(self):\n pass", "def generate(simulator, p, starts, goals, environment, r):\n num_gen_humans = min(len(starts), len(goals))\n print(\"Generating auto humans:\", num_gen_humans)\n from agents.humans.human_configs import HumanConfigs\n for i in range(num_gen_humans):\n start_config = generate_config_from_pos_3(starts[i])\n goal_config = generate_config_from_pos_3(goals[i])\n start_goal_configs = HumanConfigs(start_config, goal_config)\n human_i_name = \"auto_%04d\" % i\n # Generates a random human from the environment\n new_human_i = Human.generate_human_with_configs(\n start_goal_configs,\n generate_appearance=p.render_3D,\n name=human_i_name\n )\n # update renderer and get human traversible if it exists\n if p.render_3D:\n r.add_human(new_human_i)\n environment[\"human_traversible\"] = \\\n np.array(r.get_human_traversible())\n\n # Input human fields into simulator\n simulator.add_agent(new_human_i)", "def init_process(mech):\n gases[mech] = ct.Solution(mech)\n gases[mech].transport_model = 'Multi'", "def main():\n if input(\"Mode 0=raw / 1=wrapper: ? [0]\") == \"1\":\n # Wrapped mode\n try:\n number = os.environ[\"ROBOT_ID\"]\n except KeyError:\n number = int(input(\"Enter robot number (1-5):\"))\n robot = PiBot.PiBot(robot_nr=number, directory=\"../\")\n suite = get_suite(robot, number)\n gripper = input(\"Include gripper tests (0=no, 1=yes)? [1]\")\n if gripper != \"0\":\n suite.add(\"Clear gripper space... testing gripper up-down\",\n \"gripper up-down\", robot.set_grabber_height,\n [60, 10], [5, 5], [], [], 1)\n suite.add(\"Clear gripper space... testing gripper open-close\",\n \"gripper open-close\", robot.close_grabber,\n [80, 5], [5, 5], [], [], 1)\n else:\n # Raw mode\n robot = commRaspMain.PiBot()\n while not all(map(lambda fn: fn(), [robot._motors_enable,\n robot._encoders_enable,\n robot._servo_enable])):\n time.sleep(0.05)\n robot._tof_init()\n robot._gyro_start()\n robot._adc_conf(3)\n number = int(input(\"Enter robot number (1-5):\"))\n suite = get_suite(robot, number)\n\n robot._motorL_set(0)\n robot._motorR_set(0)\n\n suite.execute()", "def swait_setup_random_number(swait, **kw):\n swait.reset()\n swait.scan.put(\"Passive\")\n swait.calc.put(\"RNDM\")\n swait.scan.put(\".1 second\")\n swait.desc.put(\"uniform random numbers\")", "def maya_start_up():\n import maya.utils as mu\n mu.executeDeferred(\"import mliber;reload(mliber);mliber.show_in_maya()\")", "def test_Demo(self):\n self._run(self._example_scenarios, \"Demo\")" ]
[ "0.63875884", "0.61016667", "0.60508204", "0.60147184", "0.5999624", "0.5965473", "0.59024185", "0.587887", "0.58662325", "0.5809647", "0.58030933", "0.5800339", "0.5754827", "0.574139", "0.5726314", "0.5693041", "0.56924486", "0.5661824", "0.5648796", "0.5642869", "0.5640304", "0.56317896", "0.56188244", "0.5617767", "0.561329", "0.55837846", "0.55784434", "0.5563329", "0.5561879", "0.55580723", "0.5550675", "0.5546213", "0.55422115", "0.5540608", "0.5537232", "0.5533412", "0.5530924", "0.5514835", "0.55099905", "0.5509989", "0.5509385", "0.5506967", "0.5500754", "0.55005676", "0.5496803", "0.5491379", "0.5471824", "0.5468343", "0.5450747", "0.544664", "0.5439867", "0.54340297", "0.54304373", "0.54292995", "0.54171884", "0.5415282", "0.54124653", "0.53878367", "0.53864515", "0.5381608", "0.5380916", "0.5371356", "0.5370087", "0.53618115", "0.53581744", "0.5353703", "0.534307", "0.53407735", "0.53384405", "0.5335234", "0.5334325", "0.5330372", "0.5323556", "0.5322977", "0.5322977", "0.53112054", "0.5307562", "0.5300312", "0.5290742", "0.5287836", "0.5286958", "0.52771133", "0.52755904", "0.5273739", "0.52717316", "0.5269291", "0.5255589", "0.52404785", "0.5233554", "0.5225836", "0.5224202", "0.5224202", "0.5221116", "0.52200943", "0.5220085", "0.5217254", "0.52085954", "0.52057254", "0.5204776", "0.5200377" ]
0.5699686
15
Ensure that a folder exists and create it if it doesn't, including any parent folders, as necessary.
def create_folder(target_folder): try: os.makedirs(target_folder) except OSError as e: pass return os.path.exists(target_folder)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ensure_folder(*arg):\n if len(arg) == 0:\n raise Exception(\"No input to ensure_folder\")\n path = get_dir(Path(*arg))\n path.mkdir(parents=True, exist_ok=True)", "def create_folder_if_needed(path):\n if os.path.exists(path):\n print(\"{} dir exists\".format(path))\n else:\n print(\"{} dir does not exist. Creating dir.\".format(path))\n os.mkdir(path)", "def ensure_folder_exists(folder_path: str) -> None:\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)", "def create_folder(path):\n folder_missing = not os.path.exists(path)\n\n if folder_missing:\n # Using makedirs since the path hierarchy might not fully exist.\n try:\n os.makedirs(path)\n except OSError as e:\n if (e.errno, e.strerror) == (17, 'File exists'):\n print(e)\n else:\n raise\n\n print('Created folder {0}'.format(path))\n\n return folder_missing", "def create_folder(path):\n if not exists(path):\n os.makedirs(path)", "def _create_folder(file_path):\r\n file_base = os.path.dirname(file_path)\r\n if not os.path.exists(file_base):\r\n try:\r\n os.makedirs(file_base)\r\n except OSError as e:\r\n if e.errno != errno.EEXIST:\r\n raise", "def _create_folder_if_not_exist(filename):\n os.makedirs(os.path.dirname(filename), exist_ok=True)", "def create_folder(folder):\n import errno\n try:\n os.makedirs(folder)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise e", "def CreateFolderIfNotExisting(folder_path, communicator):\n if not os.path.isdir(folder_path) and communicator.MyPID() == 0:\n os.makedirs(folder_path)\n communicator.Barrier()", "def check_make(folder_check):\n if not os.path.isdir(folder_check):\n os.mkdir(folder_check)", "def create_folder(path: str):\n try:\n Path(path).mkdir(parents=True, exist_ok=True)\n return True\n except:\n print(\"An error occured.\")", "def create_folder(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def create_folder(folder):\n if not os.path.exists(folder):\n os.makedirs(folder)", "def create_folder(folder):\n if not os.path.exists(folder):\n os.makedirs(folder)\n logging.debug(\"Folder %s Created!\" % folder)\n else:\n logging.debug(\"Folder %s Exists!\" % folder)", "def folder_guard(folder_path):\n if not os.path.isdir(folder_path):\n print('INFO:folder_guard(): Creating folder: ' + folder_path + '...')\n os.mkdir(folder_path)", "def create_folder(folder):\n flag = True\n if not os.path.exists(folder):\n try:\n os.makedirs(folder)\n initlog('Folder path:%s created by me; ' % folder) \n except Exception, e:\n initlog('failed to create Folder path; %s' % str(e))\n flag = False\n return flag", "def create_folder(folder_name):\n\n try:\n os.makedirs(folder_name)\n except FileExistsError:\n pass", "def create_dir_if_necessary(path):\n try:\n os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise", "def ensure_dirs(cls, folder_path):\n try:\n cls.mkdirs(folder_path)\n except exceptions.PlotlyRequestError as e:\n if \"already exists\" in e.message:\n pass\n else:\n raise e", "def create_test_folder_if_does_not_exist(path):\n print('')\n if os.path.exists(path):\n print(' Skip creation of existing folder: {}'.format(path))\n else:\n print(' Create non-existing test folder: {}'.format(path))\n os.makedirs(path, mode=0o775)", "def ensure_dir(path):\n parent = os.path.dirname(path)\n if not os.path.exists(parent):\n os.makedirs(parent)", "def _check_directory(my_folder):\n if not os.path.exists(my_folder):\n os.makedirs(my_folder)", "def createFolder(folder):\n folder_ = os.path.join(os.getcwd(),folder)\n if not(os.path.isdir(folder_)):\n os.mkdir(folder_)", "def folder_guard(folder_path):\n \n if not os.path.isdir(folder_path):\n print('INFO:folder_guard(): Creating folder: ' + folder_path + '...')\n os.mkdir(folder_path)", "def createFolder(self):\n self.destination = self.getPath() #Find the destination to create the folder\n try:\n os.makedirs(self.destination) #Try and make a folder\n except FileExistsError:\n pass #Otherwise continue if an error is encountered because the file exists already", "def ensure_dir_exists(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def ensure_folders_if(path, condition=True):\n if not os.path.exists(path) and condition:\n os.makedirs(path)", "def create_folder(folder_path: str) -> None:\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)", "def create_folder(path):\n try:\n os.listdir(path)\n except:\n os.makedirs(path)\n else:\n shutil.rmtree(path)\n os.makedirs(path)\n return path", "def create_folder(name_folder: str):\n try:\n # Create a new direcctory\n os.mkdir(name_folder)\n except FileExistsError:\n # If the direcctory already exits print.\n print(f\"The directory {name_folder} already exists.\")", "def check_folder(filepath):\n if not os.path.exists(filepath):\n os.mkdir(filepath)\n return filepath", "def create_folder(path: str):\n if not os.path.exists(path):\n os.makedirs(path)", "def mkdir(folder_name: str) -> None:\n if exist(folder_name):\n print(\"The folder is already exist\")\n return \n\n os.mkdir(folder_name)", "def prepare_folder(path):\n if not os.path.isdir(path):\n os.makedirs(path)", "def _folderCheck(self, folder):\n logger.debug(\"Func: _folderCheck\")\n\n if not os.path.isdir(os.path.normpath(folder)):\n os.makedirs(os.path.normpath(folder))", "def create_folder(location: str):\n try:\n os.mkdir(location)\n except FileExistsError:\n pass", "def EnsureDirExists(path):\n try:\n os.makedirs(os.path.dirname(path))\n except OSError:\n pass", "def mkdir_if_not_exists(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def maybe_makedirs(path_to_create):\n try: \n os.makedirs(path_to_create)\n except OSError:\n if not os.path.isdir(path_to_create):\n raise", "def check_folder(directory):\n global path_checked\n if not os.path.exists(directory):\n os.makedirs(directory)\n else:\n path_checked = True", "def ensure_path(full_path):\n full_path = Path(full_path)\n if not full_path.exists():\n full_path.mkdir(parents=True, exist_ok=True)", "def create_folder_path(folder_path):\n try:\n if os.path.exists(folder_path):\n shutil.rmtree(folder_path)\n os.makedirs(folder_path)\n except Exception:\n raise Error('Create {folder_path} exception'.format(folder_path))", "def create_folder(folder_name):\n if not os.path.exists(folder_name):\n os.makedirs(folder_name)", "def create_folder(folder_name):\n if not os.path.exists(folder_name):\n os.makedirs(folder_name)", "def ensure_dir(dir_path):\n try:\n os.mkdir(dir_path)\n except FileExistsError:\n pass", "def _ensure_dir(dir_name):\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)", "def create_directory(parent_path, new_folder):\n newdir = os.path.join(parent_path, new_folder)\n if os.path.isdir(newdir):\n return False\n else:\n os.mkdir(newdir)\n return True", "def ensure_dir(path):\n\n \n try:\n os.makedirs(path)\n except (EnvironmentError) as e:\n if not(e.errno == errno.EEXIST and \n e.filename == path):\n raise\n return", "def ensure_directory_exists(path):\n directory = os.path.dirname(path)\n if not os.path.exists(directory):\n os.makedirs(directory, exist_ok=True)\n return", "def _mkdir_if_not_exist(path):\n if not(os.path.isdir(path)):\n os.mkdir(path)\n else:\n _logger.info('Skipping existing directory %s' % path)", "def create_folder(folder):\n\n try:\n os.mkdir(folder, 0740)\n except OSError:\n return False\n else:\n return True", "def ensure_directory(path):\n\tdir_path = os.path.dirname(path)\n\tif os.path.exists(dir_path):\n\t\treturn\n\tensure_directory(dir_path)\n\ttry:\n\t\tos.mkdir(dir_path)\n\texcept OSError as e:\n\t\t# Ignore if EEXISTS. This is needed to avoid a race if two getters run at once.\n\t\tif e.errno != errno.EEXIST:\n\t\t\traise", "def ensure_dir_exists(path: Union[str,Path]) -> None:\n# path = str(path)\n assert not os.path.isfile(path)\n os.makedirs(path, exist_ok=True)\n assert os.path.isdir(path)", "def _ensure_dir(directory):\r\n try:\r\n os.makedirs(directory)\r\n except OSError as exc:\r\n if exc.errno == errno.EEXIST:\r\n pass\r\n else:\r\n raise", "def ensure_dir_exists(dir_path):\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)", "def ensure_dirpath_exists(path: Path) -> Path:\n assert path\n out_path: Path = path\n\n if not out_path.exists():\n out_path.mkdir(parents=True, exist_ok=True)\n\n return out_path", "def ensure_directory(explorer, parent_id, dirname):\n cache_key = (parent_id, dirname)\n if cache_key in DIR_CACHE:\n return DIR_CACHE[cache_key]\n\n for folder in explorer.list_folder(parent_id):\n if folder['name'] == dirname:\n folder_id = folder['id']\n break\n else:\n print(\"Creating folder {!r} in parent {}\".format(dirname, parent_id))\n folder_id = explorer.create_folder(dirname, parent_id)\n DIR_CACHE[cache_key] = folder_id\n return folder_id", "def create_folder(self, unformatted_path):\n os.makedirs(self.format_path(unformatted_path), exist_ok=True)", "def maybe_makedirs(path_to_create):\n try:\n os.makedirs(path_to_create)\n except OSError:\n if not os.path.isdir(path_to_create):\n raise", "def make_dir_if_needed(path):\n\n if not os.path.exists(path):\n os.makedirs(path)\n return path", "def create_folder(directory):\n try:\n if not os.path.exists(directory):\n os.makedirs(directory)\n except OSError:\n print('Error: The folder wasn\\'t able to be created: ' + directory)", "def _ensure_directory(self, dirname):\n if not os.path.exists(dirname):\n os.makedirs(dirname)", "def create_folder(name):\n try:\n dirname = os.path.dirname(__file__)\n filename = os.path.join(dirname, name)\n os.makedirs(filename)\n return 0\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n return -1", "def _check_path(path):\n os.system(\"if [ ! -d \" + path + \" ]; then mkdir -p \" + path + \"; fi\")", "def create_folder(folders_to_create=[]):\n for f in folders_to_create:\n if not os.path.exists(f):\n os.makedirs(f)", "def check_dir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def create_directory(folder_name):\n if not os.path.exists(folder_name):\n os.makedirs(folder_name)", "def create_dir():\n if check_dir_exist():\n return False\n else:\n os.makedirs(path_structure)\n return True", "def make_sure_path_exists(path):\n try:\n os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise", "def make_sure_path_exists(path):\n try: os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST: raise", "def _ensure_dir_exists(self, directory):\n directory = directory.strip()\n if not Path(directory).exists():\n os.mkdir(directory)", "def createFolder(directory) -> None:\n try:\n if not os.path.exists(directory):\n os.makedirs(directory)\n except Exception as error:\n print(f\"Error: createFolder({directory}) -> {error}\")", "def ensure_dir(d):\n\n if not os.path.exists(d):\n os.makedirs(d, exist_ok=True)\n\n return", "def _mkdir_if_not_exist(path):\n if not os.path.exists(path):\n try:\n os.makedirs(path)\n except OSError as e:\n if e.errno == errno.EEXIST and os.path.isdir(path):\n logger.warning(\n 'be happy if some process has already created {}'.format(\n path))\n else:\n raise OSError('Failed to mkdir {}'.format(path))", "def create_dir_if_doesnt_exist(dir_path):\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n return", "def make_dir_if_needed(dir) :\n\tif not exists(dir) :\n\t\tos.makedirs(dir)", "def exist_ok_mkdir (path, mode=0777):\n try:\n os.mkdir (path, mode)\n except OSError:\n if not os.path.isdir (path):\n raise", "def ensure_dir(f):\n\td=os.path.dirname(f)\n\tif not os.path.exists(d):\n\t\tos.makedirs(d)", "def ensure_dir(root, path):\n full_path = root\n for seg in path.split(os.sep):\n full_path += os.sep + seg\n if os.path.exists(full_path):\n if not os.path.isdir(full_path):\n raise ValueError(\"'{}' is not a directory\".format(full_path))\n else:\n os.makedirs(full_path)", "def mkdir(path):\n\tif not Path(path).exists():\n\t\tPath(path).mkdir(parents=True, exist_ok=True)", "def assure_path_exists(self, path):\n\n dir = os.path.dirname(path)\n if not os.path.exists(dir):\n os.makedirs(dir)", "def ensuredir(path):\n # Copied from sphinx.util.osutil.ensuredir(): BSD licensed code, so it's OK\n # to add to this project.\n EEXIST = getattr(errno, 'EEXIST', 0)\n try:\n os.makedirs(path)\n except OSError as err:\n # 0 for Jython/Win32\n if err.errno not in [0, EEXIST]:\n raise", "def _check_or_create_dir(directory):\n if not tf.gfile.Exists(directory):\n tf.gfile.MakeDirs(directory)", "def maybe_make_dir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def ensure_dir( dirName ):\r\n if not os.path.exists( dirName ):\r\n os.makedirs( dirName )", "def ensure_dir(dir_):\n try:\n os.mkdir(dir_)\n except OSError:\n assert os.path.isdir(dir_)", "def ensure_dirs_exist(path):\n os.makedirs(path, exist_ok=True)", "def mkdir_if_notexists(path):\n try:\n os.mkdir(path)\n except FileExistsError:\n pass", "def create_folders(folder_name):\n\n if os.path.exists(downloads_path + '\\\\' + folder_name):\n pass\n else:\n os.makedirs(folder_name)\n print(f'Folder: {folder_name} has been created in {downloads_path}')", "def checking_path():\n path = Path(\"phonebook\")\n try:\n path.mkdir(parents=True, exist_ok=False)\n except FileExistsError:\n pass\n else:\n pass", "def mkdir_if_missing(d):\n if not os.path.exists(d):\n os.makedirs(d)", "def create_directory_if_not_exists(directory_path):\n os.makedirs(directory_path, exist_ok=True)", "def ifnotexistmkdir(directory):\n if not os.path.exists(directory):\n os.mkdir(directory)\n return Path(directory)", "def check_path(dir_path):\n if not os.path.exists(dir_path):\n os.mkdir(dir_path, 0755)", "def mkdir(folder_path, exist_ok=True):\n\n # Generate dir\n os.makedirs(folder_path, exist_ok=exist_ok)\n\n return True", "def ensure_path(directory):\n if not path.exists(directory):\n os.makedirs(directory)\n # end if\n return directory", "def createFolder(folderFullPath):\n os.makedirs(folderFullPath, exist_ok=True)", "def exist_ok_makedirs (path, mode=0777):\n if not os.path.isdir (path):\n head, tail = os.path.split (path)\n if not tail:\n head, tail = os.path.split (head)\n if head and tail:\n exist_ok_makedirs (head, mode)\n exist_ok_mkdir (path, mode)", "def ensure_dirs_exists(self):\n os.makedirs(os.path.join(self.location, \"batches\"), exist_ok=True)\n os.makedirs(os.path.join(self.location, \"results\"), exist_ok=True)" ]
[ "0.82459253", "0.8094383", "0.8059282", "0.788112", "0.7779735", "0.7752992", "0.77248335", "0.77168036", "0.77027094", "0.76686084", "0.7639529", "0.76316696", "0.76276046", "0.7627525", "0.7594649", "0.7567482", "0.7552197", "0.75503033", "0.7529557", "0.7526078", "0.75254", "0.7512916", "0.7490926", "0.7458066", "0.7444984", "0.7444063", "0.7442587", "0.7422026", "0.741305", "0.7402993", "0.73841494", "0.73776454", "0.7368256", "0.7350468", "0.7343404", "0.73284996", "0.73280597", "0.73096055", "0.7301028", "0.7300524", "0.7294661", "0.7283377", "0.7275137", "0.7275137", "0.72726554", "0.7258377", "0.72576815", "0.7249299", "0.7245505", "0.72423774", "0.72401345", "0.7233169", "0.7230621", "0.7229388", "0.72276115", "0.721832", "0.7214569", "0.7208516", "0.7191891", "0.7187161", "0.7185111", "0.71689636", "0.7161454", "0.715934", "0.71543145", "0.7151872", "0.7141743", "0.7138145", "0.7137444", "0.7129204", "0.71253014", "0.7108062", "0.7104926", "0.70908177", "0.7080884", "0.70764124", "0.70741844", "0.7073596", "0.70734847", "0.7070537", "0.7059517", "0.70580304", "0.7049929", "0.70492524", "0.70486563", "0.7041085", "0.7037446", "0.70371246", "0.7037098", "0.7030301", "0.7027226", "0.7018647", "0.7014373", "0.70121783", "0.70119876", "0.6997651", "0.69945335", "0.6990592", "0.69841814" ]
0.7339701
36
Sends a POST request every second to the monitoring server indicating that the process is still running.
def post_heartbeat(host, name, auth=None): data = {'name': name, 'status': 'ok'} try: response = post('{host}/monitoring/heartbeat'.format(host=host), data, auth) except urllib2.URLError: print("Failed to send heartbeat.", file=sys.stderr) else: if response.strip() != 'ok': print('POST got response {response}'.format(response=response), file=sys.stderr) timer = threading.Timer(1.0, post_heartbeat, args=(host, name, auth)) # set the thread as a daemon to exit the program cleanly # when the main thread finishes timer.daemon = True timer.start()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def periodic_timer(self):\n while self.running:\n self.sendStatusQuery()\n time.sleep(REPORT_INTERVAL)", "def keep_alive():\r\n app = Flask(\"\")\r\n @app.route(\"/\")\r\n def home():\r\n return \"Your bot is now alive!\"\r\n\r\n def run():\r\n app.run(host=\"0.0.0.0\", port=8080)\r\n\r\n\r\n server = Thread(target=run)\r\n server.start()", "def background_catch_up():\n while True:\n time.sleep(interval)\n s = 'http://{0}:{1}'.format(args.host, port)\n req = urllib2.Request(s)\n try:\n response = urllib2.urlopen(req)\n response.read()\n except Exception as e:\n pass", "def your_process(seconds):\r\n global STATUS\r\n sleep(seconds)\r\n STATUS = True", "def process_request(t):\n time.sleep(t)", "def process_request(t):\n time.sleep(t)", "def process_request(t):\n time.sleep(t)", "def keepalive(self) -> None:", "def perform_action(self):\n logger.info(\"Now sending a keepalive to the primary\")\n self.connection_handler.send_message(\"I am still alive, client: {num}\".format(num=self.uuid))\n time.sleep(5)", "def console_server(request):\n return run_server(interval='1')", "def ping_moonrat():\n threading.Timer(3600, ping_moonrat).start()\n text = \"Moonrat is still active\\n\"\n slack_client.api_call(\n \"chat.postMessage\",\n channel='G9P7X8Q0H',\n text=text,\n )", "def inform_alive(self):\n\n response = requests.get(\n \"https://brokenprotocol.xyz/Device/Heartbeat\",\n headers={\"auth\": self.token}\n )\n response.raise_for_status()", "def restart():\n info = request.get_json() or {}\n delay_secs = int(info.get('delay', 0))\n\n t = threading.Timer(delay_secs, update_trigger_file)\n t.start()\n\n return jsonify('Success')", "def _keep_alive(self):\n self.send_keep_alive(self.server.startup_time_delta)\n return TaskSignal.AGAIN", "def keepAliveReceived(self):", "def run( self ):\n while True:\n try:\n time.sleep( 5 )\n self._monitorProcess()\n except Exception, e:\n self.logger.exception( \"Error starting monitor process\" )", "def post(host):\n redis.setex('dispatcher',host,60)\n timer = threading.Timer(20.0, post, args=[host])\n timer.daemon = True\n timer.start()", "def startHeartbeat(self):\n self.post.__sendHeartbeat()", "def background_thread():\n count = 0\n while True:\n socketio.sleep(10)\n count += 1\n socketio.emit('my_response',\n {'data': 'Server generated event', 'count': count},\n namespace='/test')", "async def check_status(self):\n while True:\n async with self._loop_lock:\n new_monitor_processes = {}\n for class_name in self.monitor_processes:\n monitor = self.monitor_processes[class_name][\"process\"]\n if monitor.poll() is not None:\n log = f\"Monitor {class_name} has stopped with code: {monitor.returncode}\"\n if monitor.returncode:\n self.general_logger.warning(log)\n if self.config[\"WebhookConfig\"][\"crash_webhook\"]:\n embed = get_mm_crash_embed(\n \"Monitor \" + class_name,\n monitor.returncode,\n monitor.pid,\n )\n ts = datetime.now().strftime(\n self.config[\"WebhookConfig\"][\"timestamp_format\"]\n )\n\n embed.set_footer(\n text=f\"{self.config['WebhookConfig']['provider']} | {ts}\",\n icon_url=self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n )\n data = json.dumps(\n {\n \"embeds\": [embed.to_dict()],\n \"username\": \"MonitorManager process watcher\",\n \"avatar_url\": self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n }\n )\n r = await self.client.fetch(\n self.config[\"WebhookConfig\"][\"crash_webhook\"],\n method=\"POST\",\n body=data,\n headers={\"content-type\": \"application/json\"},\n raise_error=False,\n )\n else:\n self.general_logger.info(log)\n else:\n new_monitor_processes[class_name] = self.monitor_processes[\n class_name\n ]\n self.monitor_processes = new_monitor_processes\n\n new_scraper_processes = {}\n for class_name in self.scraper_processes:\n scraper = self.scraper_processes[class_name][\"process\"]\n if scraper.poll() is not None:\n log = f\"Scraper {class_name} has stopped with code: {scraper.returncode}\"\n if scraper.returncode:\n self.general_logger.warning(log)\n if self.config[\"WebhookConfig\"][\"crash_webhook\"]:\n embed = get_mm_crash_embed(\n \"Scraper \" + class_name,\n scraper.returncode,\n scraper.pid,\n )\n ts = datetime.now().strftime(\n self.config[\"WebhookConfig\"][\"timestamp_format\"]\n )\n\n embed.set_footer(\n text=f\"{self.config['WebhookConfig']['provider']} | {ts}\",\n icon_url=self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n )\n data = json.dumps(\n {\n \"embeds\": [embed.to_dict()],\n \"username\": \"MonitorManager process watcher\",\n \"avatar_url\": self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n }\n )\n r = await self.client.fetch(\n self.config[\"WebhookConfig\"][\"crash_webhook\"],\n method=\"POST\",\n body=data,\n headers={\"content-type\": \"application/json\"},\n raise_error=False,\n )\n else:\n self.general_logger.info(log)\n else:\n new_scraper_processes[class_name] = self.scraper_processes[\n class_name\n ]\n self.scraper_processes = new_scraper_processes\n await asyncio.sleep(1)", "def _keep_alive(self):\n while self._is_running:\n secs_since_last_cmd = time.time() - self._last_cmd_time\n if not self._is_action_active and secs_since_last_cmd > self._keep_alive_secs:\n self._transport.send(activity_cmd(\" \"))\n self._last_cmd_time = time.time()\n self._log.debug(\"Keeping alive\")\n self._keep_alive_event.wait(timeout=self._keep_alive_secs)", "async def monitor(self, session: ClientSession, params: MonitoringData) -> None:\n while True:\n result = await self.check_website(session, params)\n self._producer.send(result)\n await asyncio.sleep(params.interval)", "def background_thread():\n count = 0\n while True:\n time.sleep(10)\n count += 1\n socketio.emit('my response',\n {'data': 'Server generated event', 'count': count},\n namespace='/test')", "def watch(self, url):\n self.__url = url\n self.downtime_info = None\n self.__timer.start()", "def run(self):\n while True:\n print(\"I'm running in the background\")\n time.sleep(self.interval)", "def background_thread():\n count = 0\n while True:\n socketio.sleep(1)\n count += 1\n t = time.strftime('%M:%S', time.localtime())\n cpus = [1,2,3,4] #\n print('sending')\n socketio.emit('server_response',\n {'data': [t, cpus[0],cpus[1],cpus[2],cpus[3]], 'count': count})", "def _keep_alive(self):\n self.send_keep_alive()\n return TaskSignal.AGAIN", "def slot_keepalive_timer(self, _sender, _data):\r\n if self.connected:\r\n #self.debug(\"### sending keepalive\")\r\n self._try_send_raw(\"2::\")", "def thread_function(client):\n threading.Timer(30.0, thread_function).start()\n client.publish(\"serverCommand/keepalive\", \"0\")\n print(\"Message Sent. (keepalive)\")", "def continue_server():\n update_server_status({'ready': True})", "def run(self):\n while self.running:\n self.handle_request()", "def monitorOne(self,website):\n checkInterval = website.checkInterval\n time.sleep(checkInterval)\n while self\\\n .queueTermination.empty():\n startSubProcess = time.time()\n # todo define timeout for requests\n try :\n req = requests.get(website.url, timeout=checkInterval)\n reqCode = req.status_code\n reqTime = req.elapsed\n # Generic to handle all kind of http exceptions\n # Possible enhancement\n except Exception:\n continue\n # unix epoch time good for comparison\n currentTime = time.time()\n website.log[currentTime] = {'code': reqCode, 'responseTime': reqTime}\n # 2 mins\n twoMinsDic = self.getTimeframedData(website, 120, currentTime=currentTime)\n self.queueTwoMin.put(twoMinsDic)\n # 10 mins\n tenMinsDic = self.getTimeframedData(website, 600, currentTime=currentTime)\n self.queueTenMin.put(tenMinsDic)\n # 1 hour\n hourDic = self.getTimeframedData(website, 3600, currentTime=currentTime)\n self.queueHour.put(hourDic)\n\n endSubProcess = time.time()\n # Wait for the next check\n try:\n time.sleep(checkInterval-(endSubProcess-startSubProcess))\n except ValueError:\n pass\n\n # Terminate all processes\n self._terminateAll()\n return", "async def handle_live(self, websocket):\n while True:\n payload = self.generate_payload()\n await websocket.send(json.dumps(payload))\n await asyncio.sleep(self.update_frequency_milliseconds / 1000)", "def monitor(self):\n while not self.terminated:\n try:\n if (time.time() - self.updated_time) < 5:\n messages = self.messages.copy()\n # procs = np.min([ len(messages), 9 ]) + 1\n # pool = ThreadPool(procs)\n # pool.map(self.process, messages)\n # pool.close()\n # pool.join()\n for message in messages:\n self.process(message)\n elif self.ws:\n self.updated_time += 10\n self.ws.close()\n except Exception as e:\n self.on_error(None, \"Monitoring Error: {}\".format(e))\n continue\n finally:\n time.sleep(0.1)", "def admin_server(request):\n return run_server(interval='10000')", "def periodicCall(self):\n self.gui.processIncoming()\n if not self.running:\n import sys\n sys.exit(1)\n self.master.after(UPDATE_DELAY, self.periodicCall)", "def Listen(self):\n while True:\n time.sleep(1)", "def poll_http(interval, port_no):\n import requests\n\n url_string = 'http://0.0.0.0:{}/refresh'.format(port_no)\n\n print('DBG: poll_http({}): got to thread @ {}'.format(\n interval, time.strftime(\"%I:%M:%S\")))\n print('url_string = {}'.format(repr(url_string)))\n\n while True:\n time.sleep(interval)\n print('DBG: thread woke up @ {}'.format(time.strftime(\"%I:%M:%S\")))\n r = requests.get(url_string)\n print('DBG: Requests.text = {}'.format(repr(r.text)))", "def daemon_job(interval):\n time.sleep(3) # Wait for api server to start first\n while True:\n try:\n crawl()\n process_notification()\n except Exception:\n traceback.print_exc()\n time.sleep(interval)", "def service( self ):\n\n self.alive = time.time()", "def watch_process(self):\n psutil.wait_procs([psutil.Process(self._proc.pid)],\n callback=self.start)", "async def monitor():\n global counter\n while True:\n time.sleep(1)\n print(counter, 'reqs/sec')\n counter = 0", "def on_keep_alive(self):\n log.debug(\"Received keep-alive signal\")", "def status_task():\n props = [\n (STAT_TIME, current_time),\n (STAT_CONDITION, weather_condition)\n ]\n\n # Send the status request with the current time and condition.\n send_status_request(props)\n\n # Create and start a timer to repeat this task periodically.\n t = Timer(report_interval, status_task)\n t.setDaemon(True)\n t.start()", "def status(self):\n # process running ?\n pid = self.get_pidfile()\n \n running = True\n \n # process is not running\n if pid is None:\n running = False\n else:\n if not self.send_signal(pid,0):\n running = False\n # abnormal state, delete the file\n self.delete_pidfile()\n \n if running:\n message = \"server is running\\n\"\n else:\n message = \"server is not running\\n\"\n sys.stdout.write(message)\n \n return running", "def monitor(self):\r\n while True:\r\n for worker, start_time in self.workers.items():\r\n if (not worker.isAlive() or\r\n self.timeout\r\n and datetime.now() - start_time > self.timeout): \r\n\r\n self.work_count.get_nowait()\r\n self.jobs.task_done()\r\n del self.workers[worker]\r\n\r\n time.sleep(1)", "def serve_forever(self, unused_parameter=0.5):\r\n self.stop = False\r\n while not self.stop:\r\n self.handle_request()", "def run_while_true(server_class=BaseHTTPServer.HTTPServer,\n handler_class=BaseHTTPServer.BaseHTTPRequestHandler):\n server_address = ('localhost', 8080)\n httpd = server_class(server_address, handler_class)\n while keep_running():\n httpd.handle_request()", "def periodically_request_websites_metrics() -> int:\n # Making a get request\n configuration_path = os.path.abspath('configuration/service_configuration.json')\n list_of_websites_to_check = read_service_configuration(\n configuration_file=configuration_path, section='list_of_websites_to_check')\n try:\n\n remote_service_url = os.environ.get('websites_checker_service_url','http://192.168.1.101:8080/api/v1/websites_metrics')\n response = requests.post(url=remote_service_url, json=list_of_websites_to_check)\n if response:\n logger.info(f\"The request has been sent to {remote_service_url} with payload: {list_of_websites_to_check}\")\n\n else:\n logger.error(f\"Error contacting the service {remote_service_url}\")\n except Exception as error:\n logger.error(f\"The Exception {error} occurred\")\n return 1", "def KeepAlive(self) -> bool:", "def is_alive():\n return jsonify({'message': 'Service is alive'}), 200", "def pingEsp(self):\n\t\twhile True:\n\t\t\tprint (\"[{}] Keeping alive the ESP8266 connection\".format(\n\t\t\t\tint(time.time()),\n\t\t\t))\n\t\t\tmqttCli.publish(\"ping\", mqttJsonDump('void'))\n\t\t\ttime.sleep(30)", "def do_monitor(self):\n while not self.expired:\n self.expired = True\n time.sleep(self.interval)\n self.dead_fn()", "def background_thread():\n count = 0\n with open(\"logs.txt\", \"r\") as logfile:\n while True:\n socketio.sleep(1)\n count += 1\n\n line = logfile.readline()\n if line:\n socketio.emit('my_response',\n {'data': line, 'count': count},\n namespace='/test')", "def check_loop(\n url, period=5, timeout=10, body_check_re='',\n producer=None, oneshot=False):\n while True:\n worker = threading.Thread(target=check, kwargs={\n 'url': url,\n 'timeout': timeout,\n 'body_check_re': body_check_re,\n 'producer': producer,\n })\n logger.info('check url=%s' % url)\n worker.start()\n time.sleep(period)\n if oneshot:\n return", "def run(self):\n while True :\n try:\n appinfo = self.db.hgetall(self.appname)\n appinfo_str = json.dumps(appinfo)\n data = {'applicationname':self.appname,'appinfo':appinfo_str}\n response = requests.post(REGISTRAR_URL, data=data)\n time.sleep(2)\n except :\n pass", "def monitor(self, target):\n while self.RUNNING:\n check_time = datetime.now()\n next_check = check_time + timedelta(seconds=target[\"frequency\"])\n\n try:\n self.produce(\n get(target[\"url\"], timeout=target[\"frequency\"] - 0.5),\n target.get(\"regex\"),\n check_time,\n )\n except Timeout:\n self.logger.warning(\"Check for %s timed out\", target[\"url\"])\n except RequestException as e:\n self.logger.error(e)\n except re.error as e:\n self.logger.error(e)\n break\n\n # Busy loop until next check_time\n while datetime.now() < next_check:\n sleep(1)", "def keep_alive(self):\n req = BFGlobalFactory.create(\"ns1:KeepAliveReq\")\n rsp = self._soapcall(BFGlobalService.keepAlive, req)\n if rsp.header.errorCode != APIErrorEnum.OK:\n logger.error(\"{keepAlive} failed with error {%s}\",\n rsp.header.errorCode)", "def cron_health(request):\n if checkIfProcessRunning('crond'):\n print('Yes a crond process was running')\n return HttpResponse(status=200)\n else:\n print('crond process is Not running')\n\n return HttpResponse(status=500)", "def stream():\n while True:\n try:\n r = requests.post(\"http://streamer_0:5000/stream\", json={})\n break\n except requests.exceptions.ConnectionError:\n logging.error(\"Could not connect to server streamer_0, retrying\")\n time.sleep(2)\n continue\n logging.info(\"'http://streamer_0:5000/stream', response = {}\".format(r.status_code))\n if r.status_code != 200:\n time.sleep(2)\n stream()", "def schedule_alive_job(self):\n if 'alive-notification' not in settings:\n return\n\n self.scheduler.add_job(\n lambda: self.event_notifier.send_message('Surveillance is alive'),\n **settings['alive-notification'])", "async def keep_alive(self):\n self._keepalive = True\n while True:\n await gen.sleep(self.KEEPALIVE_INTERVAL)\n if not self._keepalive:\n return\n try:\n # lines that start with : are comments\n # and should be ignored by event consumers\n self.write(\":keepalive\\n\\n\")\n await self.flush()\n except StreamClosedError:\n return", "def serve_forever(self, poll_interval=0.5):\n logging.info('RPC server starting')\n self._idle_thread.start()\n SimpleJSONRPCServer.SimpleJSONRPCServer.serve_forever(self, poll_interval)", "def run(self):\r\n self.rpc_server.serve_forever(0.5)", "def __call__(self):\n hub.sleep(random.randint(1, self.interval))\n while True:\n self.send_req()\n self.reply_pending = True\n hub.sleep(self.interval)\n if self.reply_pending:\n self.no_response()", "def monitor_and_terminate(self):\n import time\n import datetime\n\n keep_running = True\n\n while keep_running:\n\n print()\n print(datetime.datetime.now().replace(microsecond=0))\n print(self.get_monitor_string())\n\n time.sleep(30)\n\n _, status = self.reporter.get_job_status(self.info)\n if status[\"active\"]+status[\"running\"] == 0:\n keep_running = False\n\n print(\"All tasks done.\")", "def serve(self):\n\t\tself.keep_running=1\n\t\tif self.debug:\n\t\t\tprint \"server started\"\n\t\ttry:\n\t\t\twhile self.keep_running:\n\t\t\t\tself.handle_request()\n\t\tfinally:\n\t\t\tif self.debug:\n\t\t\t\tprint \"server finished\"\n\t\t\tself.keep_running=0\n\t\t\tself.close()", "def __sendHeartbeat(self):\n \n while not rospy.is_shutdown():\n rospy.sleep(5)\n self.setOutput(self.write_start+1,0)", "def updated():\n ws = request.environ.get('wsgi.websocket', None)\n print(\"web socket retrieved\")\n app.number_of_connexion += 1\n if ws:\n while True:\n delay = random.randint(MIN_DELAY, MAX_DELAY)\n gevent.sleep(delay)\n try:\n ws.send(str(app.number_of_connexion))\n except WebSocketError:\n print(\"socket died\")\n app.number_of_connexion -= 1\n return \"disconnected\"\n else:\n raise RuntimeError(\"Environment lacks WSGI WebSocket support\")", "def heartbeat(jsonify=None):\n url = URLS.heartbeat()\n payload = {\n \"request\": URLS.get_endpoint(url)\n }\n generate_signature(payload)\n data, err = request_post(url, payload, jsonify)\n return data, err", "def do_POST(self):\n bugsnag = self.server.bugsnag\n body = self.rfile.read(int(self.headers['Content-Length']))\n bugsnag.enqueue(body)\n\n response = (\"OK %s:%s -> %s (%s/%s)\\n\" % (bugsnag.listen, bugsnag.port, bugsnag.endpoint, bugsnag.queue.qsize(), bugsnag.queue.maxsize))\n\n try:\n self.send_response(200)\n self.send_header('Content-Length', len(response))\n self.end_headers()\n self.wfile.write(response)\n except:\n print \"Client disconnected before waiting for response\"\n print_exception(*sys.exc_info())\n print \"continuing...\"", "def _heartbeat_loop(self):\n # set last time so that \"if t_now - t_last >= HEARTBEAT_LOG_INTERVAL\"\n # below evalutes to True on the first run\n t_last = time.time() - HEARTBEAT_LOG_INTERVAL - 1\n while True:\n alive = 0\n # count alive processes \n for p in PROCESSES:\n if p.is_alive():\n alive += 1\n\n # no processes are alive - exit heartbeat loop\n if alive == 0:\n return\n\n t_now = time.time()\n if t_now - t_last >= HEARTBEAT_LOG_INTERVAL:\n # log heartbeat\n obj = { \n 'timestamp': time.time(),\n 'child_procs_total': self._procs_total,\n 'child_procs_alive': alive,\n 'probe_req_queue_len': self._probe_request_queue.qsize(),\n 'probe_resp_queue_len': \\\n self._probe_response_queue.qsize(), \n }\n \n # push to shared mem\n self._sm.set(config.BASE['SHARED_MEM_HEARTBEAT_KEY'],\n json.dumps(obj), HEARTBEAT_TTL)\n LOG.debug('pushed a heartbeat to the shared memory')\n\n t_last = t_now\n\n time.sleep(HEARTBEAT_LOOP_INTERVAL)", "def is_monitor_process_live(pid_file):\n live = False\n\n try:\n check_process_status(pid_file)\n live = True\n except ComponentIsNotRunning:\n pass\n\n return live", "def serve(self):\n super(BlacknetSensor, self).serve(BlacknetSensorThread, BLACKNET_PING_INTERVAL, self.do_ping)", "def REP_watcher():\n global REQ_sent_time, REP_recd_time, pid, patience_seconds\n while True:\n time.sleep(patience_seconds) # how often to check\n try:\n recent_REQ_sent_time = REQ_sent_time.popleft()\n # if we got here; we have a recent_REQ_sent_time\n time.sleep(patience_seconds) # allow time for receipt of the REP\n try:\n recent_REP_recd_time = REP_recd_time.popleft()\n # if we got here; we have a recent_REP_recd_time\n interval = recent_REP_recd_time - recent_REQ_sent_time\n if interval.total_seconds() <= 0.0:\n # recent_REP_recd_time is not later than recent_REQ_sent_time\n print('After image send in REP_watcher test,')\n print('No REP received within', patience_seconds, 'seconds.')\n print('Ending sending program.')\n os.kill(pid, signal.SIGTERM)\n pass\n continue # Got REP after REQ so continue to next REQ\n except IndexError: # there was a REQ, but no timely REP\n print('After image send in REP_watcher test,')\n print('No REP received within', patience_seconds, 'seconds.')\n print('Ending sending program.')\n os.kill(pid, signal.SIGTERM)\n pass\n except IndexError: # there wasn't a time in REQ_sent_time\n # so there is no REP expected,\n # ... continue to loop until there is a time in REQ_sent_time\n pass", "def post(self):\n\n headers = ''\n for key, value in self.request.headers.iteritems():\n headers += '%s: %s' % (key, value)\n headers += '\\r\\n'\n InboundRequest.add_record(datetime.utcnow(), self.request.host_url,\n self.request.path, headers, self.request.query_string, self.request.body)\n\n taskqueue.add(url='/check_wipe_task')", "def run(self):\n while True:\n req = self._requests.get()[1]\n req.start()\n logging.info('Running request %s', req)", "def register_background_task_reporter():\n from tornado import ioloop\n\n cb = ioloop.PeriodicCallback(log_background_task_status, 60 * 3 * 1000)\n cb.start()\n return cb", "def Serve(self):\n t = threading.Thread(target=self.serve_forever)\n t.setDaemon(True)\n t.start()\n self._Announce()", "def monitor_ws(self, endpoint='https://www.bitmex.com/api/v1'): #TODO: implement\n started = False\n restart_count = 0\n while True:\n if not started or self.ws.exited or self.ws is None:\n self.ws = BitMEXWebsocket(endpoint=endpoint, symbol=self.symbol,\n api_key=self.api_key, api_secret=self.api_secret)\n time.sleep(1)\n if self.ws.started:\n self.logger.info('Websocket is running.')\n started = True\n self.ws_restarting = False\n else:\n if self.ws.lagging:\n self.logger.error('Ws is lagging ,forcing a restart...')\n self.ws.exit()\n started = False\n self.ws_restarting = True\n restart_count += 1\n self.logger.info(f'Ws starts this session: {restart_count}')\n\n else:\n time.sleep(1)", "def loop_forever(self):\n while self.running:\n time.sleep(0.01)", "def worker_status(request):\n try:\n status = app.control.ping() or []\n except:\n # TODO: What celery exceptions are we catching here?\n status = []\n return HttpResponse(\n json.dumps(status),\n content_type=\"application/json\"\n )", "def background_stuff():\n print \"BACKGROUND\"\n\n\n\n # # global socketio\n\n # # print(wsClient.url, wsClient.products)\n # while (wsClient.MessageCount < 30):\n # print(\"\\nMessageCount =\", \"%i \\n\" % wsClient.MessageCount)\n # # time.sleep(1)\n # # socketio.emit('my response', {'data': [\"TEST\"]}, namespace=\"/test\", broadcast=True)\n # #\n # wsClient.close()\n #\n # while True:\n # time.sleep(1)\n # t = str(time.clock())\n # print t\n\n def minute_passed(oldepoch):\n return time.time() - oldepoch >= .1\n\n global wsClient\n\n # t = time.time()\n # for i in range(3000):\n # # while True:\n # # print time.clock(), t\n # # if time.clock() > ( t + .1 ):\n # # show = True #minute_passed(t)\n # # if show:\n # # print show, time.time(), t, time.time() - t\n # t = time.time()\n # cur_time = str(t)\n # socketio.emit('message', {'data': cur_time, \"msg\": wsClient.message['price'] }, namespace=\"/test\", broadcast=True)\n\n # global thread\n # thread = None", "def on_post(cls, req, resp):\n try:\n message = req.body['message']\n runnable = req.body['runnable']\n\n except KeyError as ke:\n raise falcon.HTTPMissingParam(ke.args[0]) from None\n\n session = req.headers.get('API-SESSION')\n if not session:\n raise falcon.HTTPMissingParam('API-SESSION')\n\n req.context['session'] = ObjectId(session)\n req.context['internal'] = True\n\n cls.logger.debug('Running runnable %s', runnable, extra={'session': session})\n\n runnable_class = cls.api.runnables[runnable]\n runnable_class(req).run_local(message)\n\n resp.body = {\n 'runnable': runnable,\n 'message': message,\n }\n cls.logger.debug('runnable %s status: OK', runnable, extra={'session': session})", "def get_status(self) -> bool:\n try:\n self.__driver.service.assert_process_still_running()\n return True\n except AttributeError:\n return False", "def run(self):\n while True:\n # Do something\n print('Doing something imporant in the background')\n\n self.loadData()\n time.sleep(self.interval)", "async def update_stats(self):\r\n\r\n\t\twhile True:\r\n\t\t\tlogging.info('Attempting to post server count')\r\n\t\t\ttry:\r\n\t\t\t\tawait self.dblpy.post_server_count()\r\n\t\t\t\tlogging.info(f'Posted server count ({len(self.bot.guilds)})')\r\n\t\t\texcept Exception as e:\r\n\t\t\t\tlogging.exception(f'Failed to post server count\\n{type(e).__name__}: {e}')\r\n\t\t\tawait asyncio.sleep(1800)", "def start_watcher():\n while True:\n request_date = datetime.datetime.utcnow().strftime(\"%Y%m%d\")\n pull_request_from_remote(remote_files=\"*%s*\" % request_date)\n new_requests = check_for_new_request(request_date=request_date)\n if not new_requests:\n time.sleep(5)\n continue\n\n # noinspection PyTypeChecker\n for r in new_requests:\n print(\"Processing %s\" % r)\n try:\n ret = process_new_request(r, request_date=request_date,\n add2db=True)\n print(ret)\n except:\n os.system('cp -r %s /home/sedm/growth_marshal/archived/failed/'\n % r)\n os.system('cp -r %s /home/sedm/growth_marshal/archived/%s/' %\n (r, request_date))\n\n print(\"Waiting %ss before checking for new request\" % 5)\n time.sleep(5)", "def heartbeat_process(client_id):\n asyncio.run(Client.heartbeat(client_id))", "def start(self):\n threading.Thread(target=self.serve_forever).start()", "def post(self):\n try:\n msg = json.loads(self.request.body)\n command = msg[\"command\"]\n # start/stop data_worker\n if command == \"start\":\n message = self.start_data_worker()\n self.write({\"success\": True, \"message\": message})\n elif command == \"stop\":\n message = self.stop_data_worker()\n self.write({\"success\": True, \"message\": message})\n else:\n self.write({\"success\": False, \"message\": \"unknown command\"})\n except Exception:\n log.exception(\"Exception\")\n self.write({\"success\": False, \"message\": \"error during control\"})", "def monitor_ws(self):\n started = False\n restart_count = 0\n while True:\n if not started or self.ws is None:\n self.ws = BitMEXWebsocket(endpoint=\"https://www.bitmex.com/api/v1\", symbol=self.strategy.symbol,\n api_key=self.api_key, api_secret=self.api_secret)\n sleep(1)\n #try:\n if self.ws.started:\n self.logger.info('Websocket is running.')\n started = True\n self.ws_restarting = False\n #except Exception as fuck:\n #self.logger.error(f'Error in monitor_ws: {fuck}')\n else:\n if self.ws.lagging:\n self.logger.error('Ws is lagging ,forcing a restart...')\n self.ws.exit()\n started = False\n self.ws_restarting = True\n restart_count += 1\n self.logger.info(f'Ws starts this session: {restart_count}')\n\n else:\n sleep(1)", "async def keep_alive(self, period=1, margin=.3):\n self.KeepAlive.interval = period\n self.KeepAlive.margin = margin", "async def handle_ping(request):\n return web.Response(text=f\"OK {datetime.now().isoformat()}\\n\", headers={'Content-Type': 'text/event-stream'})", "def ping_daemon(self):\n s = self.ping_interval\n while True:\n p = domintell.messages.Ping()\n self.send(p)\n time.sleep(s)", "def _is_alive(self) -> bool:\n\n if self._on:\n return True\n\n try:\n os.kill(self.proc.pid, 0)\n except (OSError, ProcessLookupError):\n return False\n\n return True", "def running(self) -> bool:\n return self._state == AsyncPostRequest._RUNNING", "def timerAction():\n timer = threading.Timer(30.0, timerAction)\n timer.daemon = True\n timer.start()\n save()", "def post(self):\n sleep(pow((self.unit * self.timeout), self.count))", "def is_alive(self):\n if self.status == 1:\n return True\n else:\n return False" ]
[ "0.6335293", "0.6263304", "0.6176471", "0.61680967", "0.6098956", "0.6098956", "0.6098956", "0.6034498", "0.5934776", "0.5786157", "0.5784672", "0.5767236", "0.57396203", "0.57379746", "0.5700311", "0.5679326", "0.5662139", "0.56477326", "0.5647195", "0.56367123", "0.5632368", "0.56243", "0.5592091", "0.5568196", "0.55598336", "0.5550228", "0.5538802", "0.5535337", "0.5510907", "0.5478055", "0.5476954", "0.54576606", "0.5450369", "0.5446191", "0.5445085", "0.54326385", "0.541886", "0.54011893", "0.539024", "0.5388052", "0.5384775", "0.53689957", "0.53668517", "0.53545487", "0.53452295", "0.531786", "0.5303038", "0.53027874", "0.5291096", "0.5290996", "0.52885085", "0.52801585", "0.52793306", "0.5247848", "0.52417064", "0.52389586", "0.523801", "0.5237575", "0.523165", "0.522742", "0.52157426", "0.5210976", "0.52099115", "0.520755", "0.5205918", "0.5200901", "0.51947063", "0.5186975", "0.5178605", "0.51681745", "0.51674783", "0.5157048", "0.51501524", "0.51451546", "0.51331717", "0.5128908", "0.51261455", "0.51228744", "0.5103215", "0.5090531", "0.5089325", "0.50857455", "0.5082183", "0.507677", "0.50720435", "0.50597346", "0.5036731", "0.50349003", "0.50279707", "0.50272673", "0.50147766", "0.50093746", "0.5002977", "0.50025254", "0.49996516", "0.49982074", "0.49855337", "0.4984125", "0.49797288", "0.49721736" ]
0.57290554
14
Gets the contact_list of this MessagingCampaign. The contact list that this messaging campaign will send messages for.
def contact_list(self): return self._contact_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_contacts(self):\n return self.contacts", "def get_contacts(self):\n\n\t\treturn self.__contacts", "def GetContactList(self):\n\t\tfeeds = []\n\t\tfeed = self.client.GetContacts()\n\t\tfeeds.append(feed)\n\t\tnext = feed.GetNextLink()\n\t\twhile next:\n\t\t\tfeed = self.client.GetContacts(uri=next.href)\n\t\t\tfeeds.append(feed)\n\t\t\tnext = feed.GetNextLink()\n\t\t\n\t\tcontacts = []\n\t\tfor feed in feeds:\n\t\t\tif not feed.entry:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tfor i, entry in enumerate(feed.entry):\n\t\t\t\t\tcontacts.append(entry)\n\t\treturn contacts", "def get_contacts_list(self):\n return [(id + 1, contact) for id, contact in enumerate(self.contact_list)]", "def contact_lists(self):\n from hubspot3.contact_lists import ContactListsClient\n\n return ContactListsClient(**self.auth, **self.options)", "def get_contacts(self):\n contacts = Membership.objects.filter(entity = self, key_contact = True).order_by('importance_to_entity')\n return contacts", "def contacts(self):\r\n return contacts.Contacts(self)", "def ListAllContacts(self):\n feed = self.gd_client.GetContacts()\n self.contacts = self.CleanPhoneNumbers(self.GetContactsInfo(feed))\n return self.contacts", "def contacts(self):\n return ContactCollection(self.request)", "def _get_receivers_list(self):\n\n # TODO: document what this plugin expects to be in Dockerfile/where it gets info from\n global_component = self._get_component_label()\n # this relies on bump_release plugin configuring source.git_commit to actually be\n # branch name, not a commit\n if not isinstance(self.workflow.source, GitSource):\n raise PluginFailedException('Source is not of type \"GitSource\", panic!')\n git_branch = self.workflow.source.git_commit\n try:\n r = requests.get(urljoin(self.pdc_url, 'rest_api/v1/release-component-contacts/'),\n headers={'Authorization': 'Token %s' % self._get_pdc_token()},\n params={'global_component': global_component,\n 'dist_git_branch': git_branch,\n 'role': self.pdc_contact_role},\n verify=self.pdc_verify_cert)\n except requests.RequestException as e:\n self.log.error('failed to connect to PDC: %s', str(e))\n raise RuntimeError(e)\n\n if r.status_code != 200:\n self.log.error('PDC returned status code %s, full response: %s',\n r.status_code, r.text)\n raise RuntimeError('PDC returned non-200 status code (%s), see referenced build log' %\n r.status_code)\n\n contacts = r.json()\n\n if contacts['count'] == 0:\n self.log.error('no %s role for the component', self.pdc_contact_role)\n raise RuntimeError('no %s role for the component' % self.pdc_contact_role)\n\n send_to = []\n for contact in contacts['results']:\n send_to.append(contact['contact']['email'])\n\n return send_to", "def get_recipients(self) -> List[Client]:\n\n index_list = [i for i in range(len(self.int_var_list)) if self.int_var_list[i].get() == 1]\n return [self.client_list[i] for i in index_list]", "def get_contacts_list(self):\n contacts = self.driver.find_elements_by_class_name(\"_1wjpf\")\n s= [contact.text for contact in contacts] #extracts chats and last messsages\n print (\"get contacts: \"+str(s)) #print only chat names\n return s[::2] #returns only chat names", "def get_active_contact(self):\n list_contact = Contact.objects.filter(phonebook__campaign=self.id,\n status=CONTACT_STATUS.ACTIVE).all()\n if not list_contact:\n return False\n return list_contact", "def contact(self):\n return self._contact", "def contact(self):\n return self._contact", "def get_contacts(self, count=-1, excluded_guid=None):\n current_len = len(self._contacts)\n if current_len == 0 or count == 0:\n return []\n\n if count < 0:\n count = current_len\n else:\n count = min(count, current_len)\n\n if excluded_guid is None:\n # Get the last `count` contacts.\n contact_list = self._contacts[-count:]\n else:\n contact_list = []\n for contact in reversed(self._contacts):\n if contact.guid == excluded_guid:\n continue\n contact_list.append(contact)\n if len(contact_list) >= count:\n break\n return contact_list", "async def get_contacts(self, **kwargs) -> List[CertificateContact]:\n contacts = await self._client.get_certificate_contacts(\n vault_base_url=self._vault_url, **kwargs\n )\n return [CertificateContact._from_certificate_contacts_item(contact_item=item) for item in contacts.contact_list]", "def get_cached_contacts(self):\n return list(self._replacement_cache)", "def get_all_contacts(self):\n self.init_db(self._testing)\n\n query = \"SELECT {} FROM {} ORDER BY id;\".format(\", \".join(Contact.columns_with_uid), Contact.table_name)\n\n data = self.db.conn.execute(query)\n\n return [Contact(*item) for item in data]", "def get_queryset(self):\n contact_data = Contact.objects.filter(contact_groups__in=Member.objects.filter(\n user=self.request.user).values('group_id').distinct())\n\n return contact_data", "def Contact(self):\n return self.__contact", "def support_contacts(self):\n return self._support_contacts", "def contacts(self):\n from hubspot3.contacts import ContactsClient\n\n return ContactsClient(**self.auth, **self.options)", "def get_queryset(self):\n return self.request.user.contacts.all()", "def receiveContactList(self, contactList):", "def update_contacts(self):\n self.contacts = self.db.list_contacts()\n return self.list_contacts()", "def list_contact(self, key, value):\n self.db.list_contact(\n key,\n value,\n )", "def get_contact(self, username, password):\n\t\tdn, username = self.auth(username, password)\n\t\tif self.is_blacklisted(username):\n\t\t\traise ServiceForbidden()\n\n\t\tuser = self.get_udm_user(username=username)\n\t\tif not self.send_plugins:\n\t\t\traise ServiceForbidden()\n\n\t\treturn [{\n\t\t\t\"id\": p.send_method(),\n\t\t\t\"label\": p.send_method_label(),\n\t\t\t\"value\": user[p.udm_property]\n\t\t} for p in self.send_plugins.values() if p.udm_property in user]", "def contacts(request):\n User = get_user_model()\n ids = set(request.user.chatmessage_set.all().values_list(\"recipients\", flat=True))\n context = {\n 'contacts': User.objects.filter(pk__in=ids)\n }\n return render(request, \"chat/contacts.html\", context)", "def contact_info(self):\n return self._contact_info", "def contacts(self):\n service_root = self._get_webservice_url(\"contacts\")\n return ContactsService(service_root, self.session, self.params)", "def cc_email_address(self):\n return self._cc_recipients", "def getcontacts():\n contacts = {}\n\n try:\n #get list of contact ids\n contactids = r.smembers(\"contacts\")\n\n #for each contact id get data\n for contactid in contactids:\n contacts.update(_getcontact(str(contactid)))\n return contacts\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise", "def get_messages(self):\n return self.addresses", "def fetch_contact_messages(self, org, contact, created_after, created_before):\n pass", "def get_message_list(self):\n \n result = requests.get(\n url = root_url + '/{}'.format(\"message\"),\n headers = { 'Authorization': api_key },\n )\n\n message_list = result.json()\n\n self.message_list = message_list", "def contact_list(self, contact_list):\n \n self._contact_list = contact_list", "def update_contacts(self, contact_list):\n updated_contacts = 0\n request_list = list()\n\n # stale_contacts contains all old contacts at first, all current\n # contacts get then removed so that the remaining can get deleted\n stale_contacts = set(self.contacts)\n\n for contact in contact_list:\n c = Persona.query.get(contact[\"id\"])\n\n if c is None:\n c = Persona(id=contact[\"id\"], _stub=True)\n\n if c._stub is True:\n request_list.append(contact[\"id\"])\n\n try:\n # Old and new contact; remove from stale list\n stale_contacts.remove(c)\n except KeyError:\n # New contact\n self.contacts.append(c)\n updated_contacts += 1\n\n # Remove old contacts that are not new contacts\n for contact in stale_contacts:\n self.contacts.remove(contact)\n\n app.logger.info(\"Updated {}'s contacts: {} added, {} removed, {} requested\".format(\n self.username, updated_contacts, len(stale_contacts), len(request_list)))\n\n return request_list", "def get_drip_campaigns(self):\n return list(DripCampaign.objects(user_id=self.user_id))", "def contacts(self):\n if \"contacts\" in self._prop_dict:\n return ContactsCollectionPage(self._prop_dict[\"contacts\"])\n else:\n return None", "def get_list_of_campaigns(self, limit=0, offset=0):\n logger.info(\"Function call: get_list_of_campaigns\")\n return self.__handle_result(self.__send_request('campaigns', 'GET', {'limit': limit or 0, 'offset': offset or 0}))", "def get_campaign_name_list(self):\n campaigns = self.find('campaigns', {})\n campaign_names = []\n for campaign in campaigns:\n if 'name' in campaign:\n campaign_names.append(campaign['name'])\n return campaign_names", "def billing_contact(self):\n return self._billing_contact", "def get_a_contact(self, uid):\n self.init_db(self._testing)\n\n query = \"SELECT {} FROM {} WHERE (id=?) ORDER BY id;\".format(\n \", \".join(Contact.columns_with_uid), Contact.table_name)\n\n data = self.db.conn.execute(query, (uid,))\n\n return [Contact(*item) for item in data]", "def recipients(self) -> ty.List[str]:", "def search_contact_list(self):\n\n search_db = Database()\n result = search_db.contact_search(self.name)\n if not result:\n print Fore.YELLOW + ' No such contact'\n return None\n if result > 1:\n print ' Which contact ??'\n for items in result:\n if items[2] > 1:\n print Fore.BLUE + ' %s %s %s' % ([items[0]], items[1], items[2])\n else:\n print str(items[1]), items[2]\n\n return result", "def contact_info(self):\n return [\n {\n 'contact_info': c.get('contactInfo'),\n 'type': c.get('type'),\n 'primary': c.get('primary'),\n 'verified': c.get('verified'),\n }\n for c in self.entity_payload.get('contactInfo')]", "def resulting_contact(self):\n return self._resulting_contact", "def getallcontacts(self):\n feed_url = self.contacts_client.GetFeedUri(projection='full')\n total_read = 0\n while True:\n print('Retrieving contacts... (%d retrieved so far)' % total_read)\n feed = self.contacts_client.get_feed(uri=feed_url,\n auth_token=None,\n desired_class=gdata.contacts.data.ContactsFeed)\n total_read += len(feed.entry)\n for entry in feed.entry:\n yield entry\n next_link = feed.GetNextLink()\n if next_link is None:\n print('All contacts retrieved: %d total' % total_read)\n break\n feed_url = next_link.href", "def source_contact(self):\n return self._source_contact", "def contact_information(self) -> ContactInformation:\n return self._contact_information", "def get_contacts(self, uuid=None, urn=None, group=None, deleted=None, before=None, after=None):\n params = self._build_params(uuid=uuid, urn=urn, group=group, deleted=deleted, before=before, after=after)\n return self._get_query('contacts', params, Contact)", "def get_contacts():\n return jsonify(g.driver.get_contacts())", "def contact_points(self) -> object:\n return self._contact_points", "def get_contacts_by_company(self, company_id):\n\n contacts = self._request('getContactsByCompany', {'company_id': company_id})\n for contact in contacts:\n yield contact", "def contacts_list_update(self):\n\t\tself.database.contacts_clear()\n\t\tclient_log.debug(f'Запрос контакт листа для пользователся {self.name}')\n\t\treq = {\n\t\t\tACTION: GET_CONTACTS,\n\t\t\tTIME: time.time(),\n\t\t\tUSER: self.username\n\t\t}\n\t\tclient_log.debug(f'Сформирован запрос {req}')\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, req)\n\t\t\tans = get_message(self.transport)\n\t\tclient_log.debug(f'Получен ответ {ans}')\n\t\tif RESPONSE in ans and ans[RESPONSE] == 202:\n\t\t\tfor contact in ans[LIST_INFO]:\n\t\t\t\tself.database.add_contact(contact)\n\t\telse:\n\t\t\tclient_log.error('Не удалось обновить список контактов.')", "def cc_emails(self):\n return self._cc_emails", "def get_customer_list(self):\n return self._customer_repo.get_customer_list()", "def get_company_to_contacts(self, company_id: str):\n return self.get(object_id=company_id, definition=Definitions.COMPANY_TO_CONTACT)", "def get_phone_numbers_to_send_to(self):\n # Get the phone numbers we want to send to, excluding those that have\n # already done the thing we want to remind them of\n phone_numbers = self.PhoneModel.objects.exclude(phone_number__in=self.to_exclude())\\\n .values_list('phone_number', flat=True)\n\n message_text = self.get_message_text()\n # Set from_number to REPORTS_SHORT_CODE so that recipient can\n # simply just respond to this message with their report.\n from_shortcode = settings.REPORTS_SHORT_CODE\n for phone_number in phone_numbers:\n yield phone_number, message_text, from_shortcode", "def get_all(self):\n total_contacts = []\n get_count = {\n 'query': {\n 'object': 'CONTACT',\n 'select': {\n 'field': 'RECORDNO'\n },\n 'pagesize': '1'\n }\n }\n\n response = self.format_and_send_request(get_count)\n count = int(response['data']['@totalcount'])\n pagesize = 2000\n offset = 0\n for i in range(0, count, pagesize):\n data = {\n 'query': {\n 'object': 'CONTACT',\n 'select': {\n 'field': [\n 'RECORDNO',\n 'CONTACTNAME',\n 'COMPANYNAME',\n 'FIRSTNAME',\n 'LASTNAME',\n 'INITIAL',\n 'PRINTAS',\n 'TAXABLE',\n 'MAILADDRESS.ADDRESS1'\n ]\n },\n 'pagesize': pagesize,\n 'offset': offset\n }\n }\n contacts = self.format_and_send_request(data)['data']['CONTACT']\n total_contacts = total_contacts + contacts\n offset = offset + pagesize\n return total_contacts", "def contact(self, contactid):\r\n return contacts.Contact(self, contactid)", "def _messages_list(self, queue):\n\n return queue.messages()", "async def delete_contacts(self, **kwargs) -> List[CertificateContact]:\n contacts = await self._client.delete_certificate_contacts(\n vault_base_url=self.vault_url, **kwargs\n )\n return [CertificateContact._from_certificate_contacts_item(contact_item=item) for item in contacts.contact_list]", "async def set_contacts(self, contacts: List[CertificateContact], **kwargs) -> List[CertificateContact]:\n new_contacts = await self._client.set_certificate_contacts(\n vault_base_url=self.vault_url,\n contacts=self._models.Contacts(contact_list=[c._to_certificate_contacts_item() for c in contacts]),\n **kwargs\n )\n return [\n CertificateContact._from_certificate_contacts_item(contact_item=item) for item in new_contacts.contact_list\n ]", "def get_conversations(self):\n\t\treturn self.conversations", "def get_mailing_list():\n\t\tresult = {}\n\t\tconnection = DbHelper.connect()\n\n\t\twith connection.cursor() as cursor:\n\t\t\tsql = \"SELECT email FROM mail_list \\\n\t\t\t\t WHERE is_activated=1;\"\n\t\t\tcursor.execute(sql)\n\t\t\tresult = cursor.fetchall()\n\n\t\treturn [email_data['email'] for email_data in result]", "def get_sent_messages(self):\n return self.sent_messages", "def getList(self):\n return self.list_", "def get(self) -> List[Conversation]:\n return get_all_conversations(), 200", "def get_dmarc_messages(self):\n messages = []\n try:\n if self.opt_use_ssl:\n self.server = poplib.POP3_SSL(self.opt_pop3_server)\n self.server.user(self.opt_global_account[\"username\"])\n self.server.pass_(self.opt_global_account[\"password\"])\n else:\n self.server = poplib.POP3(self.opt_pop3_server)\n self.server.user(self.opt_global_account[\"username\"])\n self.server.pass_(self.opt_global_account[\"password\"])\n except Exception as e:\n raise Exception(\n \"Error connecting to %s with exception %s\" %\n (self.opt_pop3_server, str(e)))\n else:\n self.helper.log_debug(\n 'get_dmarc_messages: successfully connected to %s' %\n self.opt_pop3_server)\n messages = self.byte2str(self.server.uidl()[1])\n self.helper.log_info(\n 'get_dmarc_messages: %d messages' %\n len(messages))\n return messages", "async def get_contacts_for_contact_group(dbcon: DBConnection, contact_group_id: int) -> Iterable[object_models.Contact]:\n q = \"\"\"select\n contacts.id, contacts.name, contacts.email, contacts.phone, contacts.active\n from contact_group_contacts, contacts\n where contact_group_contacts.contact_group_id = %s\n and contact_group_contacts.contact_id = contacts.id\"\"\"\n return [object_models.Contact(*row) for row in await dbcon.fetch_all(q, (contact_group_id,))]", "def get(self):\n args = GET_PARSER.parse_args()\n print(f'args={args}')\n\n return Contacts().get_all(\n args[\"phonetypeOne\"],\n args[\"phonetypeTwo\"],\n args[\"phonetypeThree\"],\n args[\"firstName\"],\n args[\"lastName\"],)", "def get_queryset(self):\n user = self.request.user\n return Contact.objects.filter(owner=user)", "def getList(self):\n return self.list", "def get_recipients(self):\n recipients = set([user.email for user in self.users.all()])\n for group in self.groups.all():\n recipients.update([user.email for user in group.user_set.all()])\n return list(recipients)", "async def get_all_contacts_for_active_monitor(dbcon: DBConnection, monitor_id: int) -> Iterable[object_models.Contact]:\n contacts = set()\n contacts.update(await _active_monitor_contacts(dbcon, monitor_id))\n contacts.update(await _active_monitor_contact_groups(dbcon, monitor_id))\n contacts.update(await _active_monitor_monitor_group_contacts(dbcon, monitor_id))\n contacts.update(await _active_monitor_monitor_group_contact_groups(dbcon, monitor_id))\n return list(contacts)", "def get_campaigns(self, uuid=None):\n params = self._build_params(uuid=uuid)\n return self._get_query('campaigns', params, Campaign)", "def first_contact(self) -> List[str]:\n error_list = []\n return error_list", "def get_recipients(self):\n return [\n participant.user for participant in self.obj.activity.accepted_participants\n ]", "def display_contact(self):\n contacts = \"\".join(str(contact) for contact in self.contact_list)\n print(contacts)", "def get_candidate_list(self):\n return self.candidate_list", "def contact(self, id_or_email):\n return ContactCollection(self.request).find(id_or_email)", "def contact_details(self) -> 'outputs.ContactDetailsResponse':\n return pulumi.get(self, \"contact_details\")", "def contact_details(self) -> 'outputs.ContactDetailsResponse':\n return pulumi.get(self, \"contact_details\")", "def contact_details(self) -> 'outputs.ContactDetailsResponse':\n return pulumi.get(self, \"contact_details\")", "def get_citation_child_list(self):\n return self.address_list", "def get_messages(self):\r\n return self.messages", "def pull_one_contact(self, name):\n contact = []\n for x in self.contacts:\n if x[0] == name:\n contact_name = x[0]\n number = x[1]\n email = x[2]\n zipcode = x[3]\n contact = [contact_name, number, email, zipcode]\n print(contact)\n return contact, self.contacts.index(x)", "def get_group_list(self):\n return [(item[0], item[1][0]) for item in self.contacts_by_group_list]", "def get_message_list(self):\n count = 0\n for msg in self.mbox:\n if msg['From'].find(self.config['tgt_email']) > -1:\n dtime = arrow.get(msg['Date'], 'ddd, D MMM YYYY HH:mm:ss ZZ')\n message = dict({'from': msg['From'],\n 'date': dtime,\n 'subject': msg['Subject']})\n # boundary = msg.get_boundary()\n # if boundary is not None:\n # bounds = [m.start() for m\n # in re.finditer(boundary, str(msg))]\n # else:\n # bounds = list()\n # if len(bounds) > 2:\n # message['text'] = str(msg)[bounds[1]:bounds[2]]\n # else:\n # message['text'] = None\n pl = None\n if msg['Subject'].find(\":\") == -1:\n finished = False\n pl = msg.get_payload()\n while finished is False:\n if isinstance(pl, str):\n finished = True\n elif isinstance(pl, list):\n pl = pl[0].get_payload()\n else:\n raise ValueError(\"Non-list, non-str payload?\")\n break\n message['text'] = self.clean_text(str(pl))\n\n if message['text'] is not None:\n self.messages.append(message)\n count += 1\n # print count\n self.messages.sort(key=lambda item: item['date'])", "def get_contacts(self):\n feet = [\"REAR_RIGHT_FOOT\", \"REAR_LEFT_FOOT\",\n \"FRONT_RIGHT_FOOT\", \"FRONT_LEFT_FOOT\"]\n contacts = np.zeros(4, dtype=np.float32)\n for i, foot in enumerate(feet):\n if self.supervisor.getFromDef(foot).getNumberOfContactPoints() > 0:\n contacts[i] = 1.0\n return contacts", "def recipients(self, content_object):\n if not hasattr(content_object, self.recipient_function):\n # the content object does not provide this function, test to see if its a lambda\n if not self.recipient_function.lower().startswith(\"lambda\"):\n raise NotImplementedError(\"%s does not exist for %s\" % (self.recipient_function,\n content_object))\n recipients = eval(self.recipient_function)(content_object)\n else:\n func_or_attr = getattr(content_object, self.recipient_function)\n recipients = func_or_attr() if inspect.ismethod(func_or_attr) else func_or_attr\n if not hasattr(recipients, \"__iter__\"):\n if recipients is None:\n return None\n recipients = [recipients]\n return [(r.email, r) if hasattr(r, \"email\") else (r, None) for r in recipients]", "def get_contacts(self, uuids=None, urns=None, groups=None, before=None, after=None, deleted=None, pager=None):\n params = self._build_params(uuid=uuids, urns=urns, group_uuids=groups, before=before, after=after,\n deleted=deleted)\n return Contact.deserialize_list(self._get_multiple('contacts', params, pager))", "def get_messages(self):\n res = self.conn.cursor().execute(\"SELECT * FROM messages\")\n return res.fetchall()", "def recipients(self):\n return self._recips", "def list(self):\n try:\n response = self.service.users().messages().list(userId=self.user_id,\n q=self.query).execute()\n messages = []\n if 'messages' in response:\n messages.extend(response['messages'])\n\n while 'nextPageToken' in response:\n page_token = response['nextPageToken']\n response = self.service.users().messages().list(userId=self.user_id, q=self.query,\n pageToken=page_token).execute()\n messages.extend(response['messages'])\n\n return messages\n except errors.HttpError as error:\n print('An error occurred: %s' % error)", "def get(self, set=''):\n params = {}\n if set: params['set'] = set\n\n request = self._connection.get('contacts.json', params=params)\n if request.status_code != 200:\n raise Exception('status code {0}: cannot get contacts'.format(request.status_code))\n return [User.parse(self._connection, each) for each in request.json()]", "def target_contact(self):\n return self._target_contact", "def get_recipients(msg_parsed):\n recipients = []\n addr_fields = ['From', 'To', 'Cc', 'Bcc']\n\n for f in addr_fields:\n rfield = msg_parsed.get(f, \"\") # Empty string if field not present\n rlist = re.findall(ADDR_PATTERN, rfield)\n recipients.extend(rlist)\n\n return recipients" ]
[ "0.72631705", "0.71663344", "0.7121245", "0.6652589", "0.65349084", "0.64175344", "0.63972867", "0.6386485", "0.63770306", "0.63157517", "0.61904186", "0.61765337", "0.6132532", "0.6121634", "0.6121634", "0.602559", "0.6003591", "0.59747976", "0.5964907", "0.5926964", "0.5926448", "0.5915741", "0.59078896", "0.5905416", "0.589102", "0.5790044", "0.5783242", "0.57187986", "0.5704222", "0.5675407", "0.567043", "0.56384206", "0.56341976", "0.5627944", "0.56250626", "0.559637", "0.552889", "0.5514794", "0.5482435", "0.54757184", "0.545075", "0.54494613", "0.544242", "0.5432188", "0.5395183", "0.53910005", "0.534315", "0.53385097", "0.53358686", "0.53316194", "0.53243816", "0.53139645", "0.52997684", "0.52973974", "0.5296391", "0.529599", "0.52929693", "0.52762717", "0.5262194", "0.5249087", "0.5246305", "0.5237774", "0.52237517", "0.52132976", "0.5200423", "0.51861614", "0.518084", "0.51734054", "0.5170489", "0.5152326", "0.51436543", "0.5137565", "0.51349556", "0.51309234", "0.50918293", "0.507489", "0.50743437", "0.5072385", "0.50628895", "0.5050648", "0.50476044", "0.5032133", "0.5026486", "0.5022638", "0.5022638", "0.5022638", "0.5017613", "0.50095385", "0.50027764", "0.4992955", "0.49862403", "0.49730676", "0.49716076", "0.49650335", "0.4963129", "0.49521157", "0.49408746", "0.4938071", "0.49301484", "0.49292713" ]
0.8081953
0
Sets the contact_list of this MessagingCampaign. The contact list that this messaging campaign will send messages for.
def contact_list(self, contact_list): self._contact_list = contact_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def receiveContactList(self, contactList):", "def set_contacts(self, contacts):\n\n\t\tif contacts is not None and not isinstance(contacts, list):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: contacts EXPECTED TYPE: list', None, None)\n\t\t\n\t\tself.__contacts = contacts\n\t\tself.__key_modified['Contacts'] = 1", "def contacts(self, contacts):\n\n self._contacts = contacts", "def contacts(self, contacts):\n\n self._contacts = contacts", "def contact_list(self):\n return self._contact_list", "def update_contacts(self, contact_list):\n updated_contacts = 0\n request_list = list()\n\n # stale_contacts contains all old contacts at first, all current\n # contacts get then removed so that the remaining can get deleted\n stale_contacts = set(self.contacts)\n\n for contact in contact_list:\n c = Persona.query.get(contact[\"id\"])\n\n if c is None:\n c = Persona(id=contact[\"id\"], _stub=True)\n\n if c._stub is True:\n request_list.append(contact[\"id\"])\n\n try:\n # Old and new contact; remove from stale list\n stale_contacts.remove(c)\n except KeyError:\n # New contact\n self.contacts.append(c)\n updated_contacts += 1\n\n # Remove old contacts that are not new contacts\n for contact in stale_contacts:\n self.contacts.remove(contact)\n\n app.logger.info(\"Updated {}'s contacts: {} added, {} removed, {} requested\".format(\n self.username, updated_contacts, len(stale_contacts), len(request_list)))\n\n return request_list", "def contact(self, contact):\n\n self.logger.debug(\"In 'contact' setter.\")\n\n self._contact = contact", "async def set_contacts(self, contacts: List[CertificateContact], **kwargs) -> List[CertificateContact]:\n new_contacts = await self._client.set_certificate_contacts(\n vault_base_url=self.vault_url,\n contacts=self._models.Contacts(contact_list=[c._to_certificate_contacts_item() for c in contacts]),\n **kwargs\n )\n return [\n CertificateContact._from_certificate_contacts_item(contact_item=item) for item in new_contacts.contact_list\n ]", "def contact_points(self, contact_points: object):\n\n self._contact_points = contact_points", "def contact(self, contact):\n\n self._contact = contact", "def contact(self, contact):\n\n self._contact = contact", "def support_contacts(self, support_contacts):\n self._support_contacts = support_contacts", "def contact_lists(self):\n from hubspot3.contact_lists import ContactListsClient\n\n return ContactListsClient(**self.auth, **self.options)", "def contacts_list_update(self):\n\t\tself.database.contacts_clear()\n\t\tclient_log.debug(f'Запрос контакт листа для пользователся {self.name}')\n\t\treq = {\n\t\t\tACTION: GET_CONTACTS,\n\t\t\tTIME: time.time(),\n\t\t\tUSER: self.username\n\t\t}\n\t\tclient_log.debug(f'Сформирован запрос {req}')\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, req)\n\t\t\tans = get_message(self.transport)\n\t\tclient_log.debug(f'Получен ответ {ans}')\n\t\tif RESPONSE in ans and ans[RESPONSE] == 202:\n\t\t\tfor contact in ans[LIST_INFO]:\n\t\t\t\tself.database.add_contact(contact)\n\t\telse:\n\t\t\tclient_log.error('Не удалось обновить список контактов.')", "def send_mass_messages(self, recipient_list, sender, message=\"\", subject=\"\"):\n try:\n for s in recipient_list:\n self.send_message(to=s, sender=sender, message=message, subject=subject)\n except TypeError:\n return -1\n return 1", "def list(self, list):\n if list is None:\n raise ValueError(\"Invalid value for `list`, must not be `None`\") # noqa: E501\n\n self._list = list", "def do_send_list( self, a_list ):\r\n # --- this needs to be moved to task some set up here then on there\r\n self.logger.info( \"turn on sendList\" )\r\n self.send_list_ix = 0\r\n\r\n #self.send_list = [ 180, 920, 160, 1740, 160, 780, 160, 2840, 160, 1320, 160, 1340, 160, ] # 1180, 160, 2700, 160, 12780, 200, 920, \\\r\n #160, 2680, 160, 780, 160, 800, 160, 780, 160, 920, 160, 800, 140, 800, \\\r\n # 160 ]\r\n self.send_list = a_list\r\n self.com_driver.send( \"z\\n\" )\r\n self.list_send = True # if we were mult-threaded this would have to be here\r\n\r\n return", "def contact_point(self, contact_point: object):\n\n self._contact_point = contact_point", "def contact_reference(self, contact_reference):\n\n self._contact_reference = contact_reference", "def list_contact(self, key, value):\n self.db.list_contact(\n key,\n value,\n )", "def target_contact(self, target_contact):\n \n self._target_contact = target_contact", "def contactListClicked(self):\n \n contacts = self.userList.getSelectedItems()\n self.mergeButton.setEnabled(contacts != None and len(contacts) > 1)\n \n if contacts != None and len(contacts) == 1:\n self.messageList.filterByContact(contacts[0])\n else:\n self.messageList.removeFilter()", "def _create_mailing_list(cls):\n cls.mailing_list_1 = cls.env['mailing.list'].with_context(cls._test_context).create({\n 'name': 'List1',\n 'contact_ids': [\n (0, 0, {'name': 'Déboulonneur', 'email': '[email protected]'}),\n (0, 0, {'name': 'Gorramts', 'email': '[email protected]'}),\n (0, 0, {'name': 'Ybrant', 'email': '[email protected]'}),\n ]\n })\n cls.mailing_list_2 = cls.env['mailing.list'].with_context(cls._test_context).create({\n 'name': 'List2',\n 'contact_ids': [\n (0, 0, {'name': 'Gilberte', 'email': '[email protected]'}),\n (0, 0, {'name': 'Gilberte En Mieux', 'email': '[email protected]'}),\n (0, 0, {'name': 'Norbert', 'email': '[email protected]'}),\n (0, 0, {'name': 'Ybrant', 'email': '[email protected]'}),\n ]\n })", "def member_list(self, member_list):\n\n self._member_list = member_list", "def contact_info(self, contact_info):\n\n self._contact_info = contact_info", "def SetDomainsList(self, domainsList) :\n\t\t...", "def source_contact(self, source_contact):\n \n self._source_contact = source_contact", "def remove_from_contact_list(self, contacts_to_remove_list):\n if self.contact_list is None:\n return\n for id in contacts_to_remove_list:\n if id in range(0, len(self.contact_list) + 1):\n self.contact_list[id - 1] = None\n self.contact_list = [contact for contact in self.contact_list if contact is not None]", "def add_contact(self, name, number, email, zipcode):\n \n new_contact = f\"{name}, {number}, {email}, {zipcode}\"\n contact_list = [name,number,email,zipcode]\n self.contacts.append(contact_list)\n self.save()\n print(f\"Thank you {new_contact} has been added to your contact book.\")", "def contact_email(self, contact_email):\n\n self._contact_email = contact_email", "def contact_email(self, contact_email):\n\n self._contact_email = contact_email", "def update_contacts(self, contacts):\n\n if contacts.time.size != 1:\n raise IndexError(\"Contacts should be from one frame only\")\n if contacts.channel.size != self.contacts.channel.size:\n self.new_contact_set(contacts)\n return # Prevent calling update_contacts recursively\n self.contacts = contacts\n contacts = np.array(contacts)\n\n for i, actor in enumerate(self.contacts_actors):\n # mapper = actors.GetNextActor().GetMapper()\n mapper = actor.GetMapper()\n self.contacts_actors[i].GetProperty().SetColor(self.contacts_color)\n self.contacts_actors[i].GetProperty().SetOpacity(self.contacts_opacity)\n source = vtkSphereSource()\n source.SetCenter(contacts[0:3, i])\n source.SetRadius(self.contacts_size)\n mapper.SetInputConnection(source.GetOutputPort())", "def contact_information(self, contact_information: ContactInformation):\n\n self._contact_information = contact_information", "def new_contact_set(self, contacts):\n if contacts.time.size != 1:\n raise IndexError(\"Contacts should be from one frame only\")\n self.contacts = contacts\n\n # Remove previous actors from the scene\n for actor in self.contacts_actors:\n self.parent_window.ren.RemoveActor(actor)\n self.contacts_actors = list()\n\n # Create the geometry of a point (the coordinate) points = vtk.vtkPoints()\n for i in range(contacts.channel.size):\n # Create a mapper\n mapper = vtkPolyDataMapper()\n\n # Create an actor\n self.contacts_actors.append(vtkActor())\n self.contacts_actors[i].SetMapper(mapper)\n\n self.parent_window.ren.AddActor(self.contacts_actors[i])\n\n # Update marker position\n self.update_contacts(self.contacts)", "def test_create_contact_list(self):\n c1 = ContactFactory(company_id=self.company.id)\n data = {\n 'title': 'ContactList1',\n 'contact_ids': [c1.id],\n }\n\n url, parsed = self.prepare_urls('v1:contact_list-list', subdomain=self.company.subdomain)\n \n response = self.client.post(url, data, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n self.authenticate_user()\n response = self.client.post(url, data, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n content = json.loads(response.content)\n self.assertEqual(content['title'], 'ContactList1')\n self.assertEqual(content['contacts'], [c1.id])\n self.assertNotEqual(content['company_id'], None)\n self.assertNotEqual(content['owner'], None)\n\n url, parsed = self.prepare_urls('v1:contact_list-list', subdomain=self.company.subdomain)\n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n content = json.loads(response.content)\n self.assertEqual(self.contact_lists_count+1, len(content))", "def billing_contact(self, billing_contact):\n\n self._billing_contact = billing_contact", "def contact_number(self, contact_number):\n if contact_number is None:\n raise ValueError(\"Invalid value for `contact_number`, must not be `None`\") # noqa: E501\n\n self._contact_number = contact_number", "def update_contacts(self):\n self.contacts = self.db.list_contacts()\n return self.list_contacts()", "def contact_person(self, contact_person):\n\n self._contact_person = contact_person", "def contact_person(self, contact_person):\n\n self._contact_person = contact_person", "def list_contacts(self):\n return self.contacts", "def set_contact_mechanisms(cls, records, name, value=None):\n Party = Pool().get('party.party')\n\n for record in records:\n Party.write([record.party], {'contact_mechanisms': value})", "def cc_email_address(self, val: list):\n self._cc_recipients = []\n if val is not None:\n for item in val:\n if isinstance(item, EmailAddress):\n self._cc_recipients.append(item)", "def contact_name(self, contact_name):\n\n self._contact_name = contact_name", "def contact_name(self, contact_name):\n\n self._contact_name = contact_name", "def tearDown(self):\n Contact.contact_list = []", "def set_customer(self, customer_list):\n self.multiple_items_selection_from_kendo_dropdown(self.customer_dropdown_locator, customer_list)\n self.wait_for_ajax_spinner_load()", "def __init__(self, first_name=\" \", last_name=\" \", phone_number=0, phone_number_type=\" \", contact_list=[]):\n self.first_name = first_name\n self.last_name = last_name\n self.phone_number = phone_number\n self.phone_number_type = phone_number_type\n self.valid_phone_number_types = [\"home\", \"office\", \"cell\"]\n self.contact_list = contact_list", "def mailman_add(contact, listname=None, send_welcome_msg=None, admin_notify=None):\n mm, listname = _get_maillist(listname)\n print('mailman adding %s to %s' % (contact.email, listname), file=sys.stderr)\n if send_welcome_msg is None:\n send_welcome_msg = mm.send_welcome_msg\n\n userdesc = UserDesc()\n userdesc.fullname = contact.full_name\n userdesc.address = contact.email\n userdesc.digest = False\n\n if mm.isMember(contact.email):\n print(_('Already Subscribed: %s' % contact.email), file=sys.stderr)\n else:\n try:\n try:\n mm.Lock()\n mm.ApprovedAddMember(userdesc, send_welcome_msg, admin_notify)\n mm.Save()\n print(_('Subscribed: %(email)s') % { 'email' : contact.email }, file=sys.stderr)\n except Errors.MMAlreadyAMember:\n print(_('Already a member: %(email)s') % { 'email' : contact.email }, file=sys.stderr)\n except Errors.MMBadEmailError:\n if userdesc.address == '':\n print(_('Bad/Invalid email address: blank line'), file=sys.stderr)\n else:\n print(_('Bad/Invalid email address: %(email)s') % { 'email' : contact.email }, file=sys.stderr)\n except Errors.MMHostileAddress:\n print(_('Hostile address (illegal characters): %(email)s') % { 'email' : contact.email }, file=sys.stderr)\n finally:\n mm.Unlock()", "def contact_id(self, contact_id):\n if self.local_vars_configuration.client_side_validation and contact_id is None: # noqa: E501\n raise ValueError(\"Invalid value for `contact_id`, must not be `None`\") # noqa: E501\n\n self._contact_id = contact_id", "def set_contacts_size(self, contacts_size):\n self.contacts_size = contacts_size\n self.update_contacts(self.contacts)", "def block(self, report=False):\n prefix = \"28\" if isinstance(self, SkypeBotUser) else \"8\"\n self.skype.conn(\"PUT\", \"{0}/users/{1}/contacts/blocklist/{2}:{3}\"\n .format(SkypeConnection.API_CONTACTS, self.skype.userId, prefix, self.id),\n auth=SkypeConnection.Auth.SkypeToken, json={\"report_abuse\": report, \"ui_version\": \"skype.com\"})\n self.blocked = True", "def test_save_multiple_contact(self):\n self.new_contact.save_contact()\n # new contact\n test_contact = Contact(\"Test\", \"user\", \"0798765432\", \"[email protected]\")\n test_contact.save_contact()\n self.assertEqual(len(Contact.contact_list), 2)", "def send_email(subject, message, recipient_list, from_email=None,\n fail_silently=False, connection=None):\n if not from_email:\n from_email = _s('SERVER_EMAIL') or _s('DEFAULT_FROM_EMAIL')\n try:\n subj = unicode(subject)\n except UnicodeDecodeError:\n subj = subject.decode('utf8')\n datatuple = [(subj, message, from_email, [recipient],) \\\n for recipient in recipient_list]\n send_mass_mail(datatuple)", "def setListDoc(self, list):\n if list is None: list__o = None\n else: list__o = list._o\n libxml2mod.xmlSetListDoc(list__o, self._o)", "def set_raw_contact(self, value: Atoms):\n self._raw_contact = value", "async def send_contact(self, chat_id: typing.Union[base.Integer, base.String],\n phone_number: base.String,\n first_name: base.String, last_name: typing.Union[base.String, None] = None,\n disable_notification: typing.Union[base.Boolean, None] = None,\n reply_to_message_id: typing.Union[base.Integer, None] = None,\n reply_markup: typing.Union[types.InlineKeyboardMarkup,\n types.ReplyKeyboardMarkup,\n types.ReplyKeyboardRemove,\n types.ForceReply, None] = None) -> types.Message:\n reply_markup = prepare_arg(reply_markup)\n payload = generate_payload(**locals())\n result = await self.request(api.Methods.SEND_CONTACT, payload)\n\n return types.Message(**result)", "def set_contacts_color(self, contacts_color):\n self.contacts_color = contacts_color\n self.update_contacts(self.contacts)", "def contact_method(self, contact_method):\n\n self._contact_method = contact_method", "def add_contact(self):\n contact_list = {}\n contact_list[self.my_number] = self.name\n connect_db = Database()\n connect_db.add_contact(self.name, self.my_number)", "def sequencing_contact(self, sequencing_contact):\n self.logger.debug(\"In 'sequencing_contact' setter.\")\n\n self._sequencing_contact = sequencing_contact", "def cc_emails(self, cc_emails):\n\n self._cc_emails = cc_emails", "def email_ml_set_can_send_on_behalf(self, maillist=None, maillist_uid=None, subscriber=None, subscriber_uid=None, can_send_on_behalf=None):\n if not maillist and not maillist_uid:\n raise ValueError('Maillist or uid required')\n if not subscriber and not subscriber_uid:\n raise ValueError('Subscriber or uid required')\n return self._request('email/ml/set_can_send_on_behalf', inspect_args_func(inspect.currentframe()))", "def ip_addresses_list(self, ip_addresses_list):\n\n self._ip_addresses_list = ip_addresses_list", "def ListAllContacts(self):\n feed = self.gd_client.GetContacts()\n self.contacts = self.CleanPhoneNumbers(self.GetContactsInfo(feed))\n return self.contacts", "def test_save_multiple_contacts(self):\n self.new_contact.save_contact() # saving the new contact\n test_contact = Contact(\"Test\", \"User\", 254712345678, \"[email protected]\") # new user\n test_contact.save_contact() # saving the new contact\n self.assertEqual(len(Contact.contact_list), 2)", "def email_process(recipient_list: List[Client]) -> None:\n\n if recipient_list:\n send_email(recipient_list)\n update_only_emailed_clients(recipient_list)\n remove_fully_contacted_clients()\n else:\n print(\"No emails were sent.\")", "def block_contacts(self, contacts):\n self._post('contact_actions', None, self._build_params(contacts=contacts, action='block'))", "def GetContactList(self):\n\t\tfeeds = []\n\t\tfeed = self.client.GetContacts()\n\t\tfeeds.append(feed)\n\t\tnext = feed.GetNextLink()\n\t\twhile next:\n\t\t\tfeed = self.client.GetContacts(uri=next.href)\n\t\t\tfeeds.append(feed)\n\t\t\tnext = feed.GetNextLink()\n\t\t\n\t\tcontacts = []\n\t\tfor feed in feeds:\n\t\t\tif not feed.entry:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tfor i, entry in enumerate(feed.entry):\n\t\t\t\t\tcontacts.append(entry)\n\t\treturn contacts", "def resulting_contact(self, resulting_contact):\n \n self._resulting_contact = resulting_contact", "def fetch_contact_messages(self, org, contact, created_after, created_before):\n pass", "def setListPunctCmplx(self, list):\n\t\tself.list_punct_cmplx = list", "def configure_tls_to_endpoint_in_sipserver(\n self,\n phone_list: List[Union[fxo_template.FXOTemplate, SIPPhoneTemplate]],\n ) -> None:", "def add_contact_to_google_account(self, i):\n\n self.add_contact_to_phone(i)", "def activities_list(self, new_activities_list):\n self._activities_list = new_activities_list\n self.__save_activities_from_memory_to_file()", "def display_contact(self):\n contacts = \"\".join(str(contact) for contact in self.contact_list)\n print(contacts)", "def update_device_list(self, device_list):\n self.device_list = device_list\n\n self.device_combo.clear()\n\n if not device_list:\n return\n\n self.device_combo.addItem(\"\")\n\n active_entry = None\n\n for dev in device_list:\n\n action_string = \"{model:<18} - {contype:<7} - {serial}\".format(model=dev.model,\n contype=dev.device_type,\n serial=dev.serial)\n if dev.serial == self.serial:\n active_entry = action_string\n self.device_combo.addItem(action_string)\n\n if active_entry is not None:\n self.device_combo.setCurrentText(active_entry)", "def AddContact(self, contact):\n\t\tcontact.group_membership_info = [gdata.contacts.data.GroupMembershipInfo(href=self.GetFirstGroupId())]\n\t\ttry:\n\t\t\tself.client.CreateContact(contact)\n\t\texcept gdata.client.RequestError:\n\t\t\tpass", "def email_ml_subscribers(self, maillist=None, maillist_uid=None):\n if not maillist and not maillist_uid:\n raise ValueError('Maillist or uid required')\n return self._request('email/ml/subscribers', inspect_args_func(inspect.currentframe()), method='get')", "def contact_started(self, node, contact):\n for subscriber in self.subscribers:\n subscriber.contact_started(node, contact)", "def bcc_email_address(self, val: list):\n self._bcc_recipients = []\n if val is not None:\n for item in val:\n if isinstance(item, EmailAddress):\n self._bcc_recipients.append(item)", "def contacts(self):\n from hubspot3.contacts import ContactsClient\n\n return ContactsClient(**self.auth, **self.options)", "async def set_contact_group_contacts(dbcon: DBConnection,\n contact_group_id: int, contact_ids: Iterable[int]) -> None:\n\n async def _run(cur: Cursor) -> None:\n q = \"\"\"delete from contact_group_contacts where contact_group_id=%s\"\"\"\n await cur.execute(q, (contact_group_id,))\n for contact_id in contact_ids:\n q = \"\"\"insert into contact_group_contacts (contact_group_id, contact_id) values (%s, %s)\"\"\"\n q_args = (contact_group_id, contact_id)\n await cur.execute(q, q_args)\n\n if not await contact_group_exists(dbcon, contact_group_id):\n raise errors.InvalidArguments('contact group does not exist')\n await dbcon.transact(_run)", "def push_all(self, contacts):\n for ell in contacts:\n self.push(ell)", "def new_soft_contacts_set(self, soft_contacts):\n if soft_contacts.time.size != 1:\n raise IndexError(\"soft_contacts should be from one frame only\")\n self.soft_contacts = soft_contacts\n\n # Remove previous actors from the scene\n for actor in self.soft_contacts_actors:\n self.parent_window.ren.RemoveActor(actor)\n self.soft_contacts_actors = list()\n\n # Create the geometry of a point (the coordinate) points = vtk.vtkPoints()\n for i in range(soft_contacts.channel.size):\n # Create a mapper\n mapper = vtkPolyDataMapper()\n\n # Create an actor\n self.soft_contacts_actors.append(vtkActor())\n self.soft_contacts_actors[i].SetMapper(mapper)\n\n self.parent_window.ren.AddActor(self.soft_contacts_actors[i])\n # Update marker position\n self.update_soft_contacts(self.soft_contacts)", "def contact_list(request):\n if request.method == 'GET':\n contact = Contact.objects.all()\n serializer = ContactSerializer(contact, many=True)\n return Response(serializer.data)\n elif request.method == 'POST':\n serializer = ContactSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data,\n status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors,\n status=status.HTTP_400_BAD_REQUEST)", "def set_response_list(self, r_list):\n self.response_list = r_list", "def set_coupled_pair_list(self, coupled_pair_list):\n self.coupled_pair_list = coupled_pair_list\n self.reg_coupled_pair = True", "def get_contacts_list(self):\n return [(id + 1, contact) for id, contact in enumerate(self.contact_list)]", "def test_delete_contact_list(self):\n contact_list = ContactList.objects.first()\n\n url, parsed = self.prepare_urls('v1:contact_list-detail', subdomain=self.company.subdomain, kwargs={'pk':contact_list.id})\n \n response = self.client.delete(url, HTTP_HOST=parsed.netloc)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n self.authenticate_user()\n response = self.client.delete(url, HTTP_HOST=parsed.netloc)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n \n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n\n url, parsed = self.prepare_urls('v1:contact_list-list', subdomain=self.company.subdomain)\n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n content = json.loads(response.content)\n self.assertEqual(self.contact_lists_count-1, len(content))", "def __init__(self, contacts_client):\n self.contacts_client = contacts_client", "def contacts(self):\n return ContactCollection(self.request)", "def send(self, from_email, to_list, cc_list, bcc_list, subject, text):\n\n to_address_list = []\n\n if len(to_list) > 0:\n for to_address in to_list:\n to_address_list.append(\n {\n \"email\": to_address,\n \"type\": \"to\"\n }\n )\n\n if len(cc_list) > 0:\n for cc_address in cc_list:\n to_address_list.append(\n {\n \"email\": cc_address,\n \"type\": \"cc\"\n }\n )\n\n if len(bcc_list) > 0:\n for bcc_address in bcc_list:\n to_address_list.append(\n {\n \"email\": bcc_address,\n \"type\": \"bcc\"\n }\n )\n\n sendgrid_data = {\n \"key\": sendgrid_key,\n \"message\": {\n \"text\": text,\n \"subject\": subject,\n \"from_email\": from_email,\n \"to\": to_address_list\n },\n \"async\": False,\n }\n\n response = requests.post(\n sendgrid_url,\n data=json.dumps(sendgrid_data)\n )\n\n if response.ok:\n status = 0\n else:\n status = 1\n\n message = str(response.content)\n\n return status, message", "def appendedEntries(self):\n self.contact_list.append({\"name\": self.first_name.title() + \" \" + self.last_name.title(), \"phone number\": self.phone_number, \"phone number type\": self.phone_number_type})", "def archive_contact_messages(self, org, contact):\n pass", "def fields_in_list(self, fields_in_list):\n\n self._fields_in_list = fields_in_list", "def fields_in_list(self, fields_in_list):\n\n self._fields_in_list = fields_in_list", "def add_contacts(self, contacts, group=None, group_uuid=None):\n payload = self._build_params(contacts=contacts, action='add', group=group, group_uuid=group_uuid)\n self._post('contact_actions', None, payload)", "def __init__(self, contact_loader):\n self.contacts_by_group_list = contact_loader.contacts_by_group_list\n self.contact_list = None", "def update_only_emailed_clients(recipient_list, file=\"db.json\") -> None:\n\n with TinyDB(file) as db:\n for client in recipient_list:\n query = Query()\n db.update(add(\"times contacted\", 1), (query[\"first name\"].matches(client.get_first_name())\n & (query[\"last name\"].matches(client.get_last_name()))))\n db.update(set_val(\"rem date\", datetime_to_string(default_rem_date)),\n (query[\"first name\"].matches(client.get_first_name())\n & (query[\"last name\"].matches(client.get_last_name())\n )))" ]
[ "0.6209503", "0.6202445", "0.588594", "0.588594", "0.5883504", "0.5845821", "0.5795876", "0.5775629", "0.55796677", "0.5562263", "0.5562263", "0.5559013", "0.5479643", "0.5460591", "0.53779536", "0.5320598", "0.53073066", "0.5305575", "0.52854943", "0.5274175", "0.5192591", "0.5121968", "0.5113486", "0.50693697", "0.5029035", "0.5010891", "0.50096", "0.5007492", "0.49756786", "0.49653065", "0.49653065", "0.4957423", "0.49395514", "0.4926072", "0.49036422", "0.48886657", "0.48883614", "0.4827787", "0.48177895", "0.48177895", "0.47843698", "0.47793344", "0.47782195", "0.4772784", "0.4772784", "0.47690243", "0.47679976", "0.47509006", "0.47490525", "0.4745818", "0.47420013", "0.4738187", "0.47350198", "0.4732864", "0.47066173", "0.4680452", "0.46777913", "0.46690828", "0.46402803", "0.46320206", "0.46003407", "0.45983198", "0.45887333", "0.45852426", "0.4579518", "0.45610544", "0.45582846", "0.455033", "0.452969", "0.45121688", "0.45078608", "0.45051056", "0.45047635", "0.44964176", "0.4493843", "0.44880277", "0.4487089", "0.44746184", "0.4472952", "0.44660527", "0.4462495", "0.44624025", "0.44611588", "0.4439524", "0.44338965", "0.44322392", "0.44172618", "0.44003206", "0.43955433", "0.43901408", "0.4382922", "0.4382315", "0.4381088", "0.43688384", "0.43652815", "0.43610385", "0.43610385", "0.43607998", "0.43484876", "0.4348318" ]
0.83265656
0
Gets the errors of this MessagingCampaign. A list of current error conditions associated with this messaging campaign.
def errors(self): return self._errors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_errors(self):\n return [result for result in self.values() if result.outcome == Result.ERROR]", "def Errors(self):\n return self._get_attribute('errors')", "def getErrorsList(self):\n return self.__errors", "def errors (self):\n return self._errors", "def errors (self):\n return self._errors", "def getErrors(self):\n return self.errors", "def errors(self):\n return self.__errors", "def errors(self) -> List[Error]:\n return self._errors_files + list(self._errors.values())", "def errors(self):\r\n if not hasattr(self, '_errors_cache'):\r\n self._errors_cache = self.form.get_field_errors(self)\r\n return self._errors_cache", "def get_errors(self, request):\n\n value = request._get_parameter_value(self)\n return value.errors", "def Errors(self):\r\n\t\treturn self._get_attribute('errors')", "def errors(self):\n return self._properties.get(\"errors\")", "def errors(self) -> Tuple[MqexsErrorInfo, ...]:\n return self.__errors", "def retrieve_error_messages(self):\n return self.errors_seen[:]", "def validation_errors(self):\n return self._validation_errors", "def errors(self) -> pulumi.Output[Sequence['outputs.BatchAIErrorResponse']]:\n return pulumi.get(self, \"errors\")", "def get_validation_errors(self):\n return [err.to_dict() for err in self._schema.validator.validation_errors]", "def errors(self):\n _errors = {}\n # pylint: disable=no-member\n for name, field in self._fields.items():\n if field.errors:\n _errors[name] = field.errors.pop()\n\n return _errors", "def all_errors(self) -> List[XMLSchemaParseError]:\n errors = []\n for comp in self.iter_components():\n if comp.errors:\n errors.extend(comp.errors)\n return errors", "def get_errors(self):\n df = self.get_events()\n return df[df.error.notnull()]", "def errors(self) -> List[Error]:\n # May have inherited errors with a different path.\n for error in self._errors.values():\n error.path = self.path\n if self.is_removed: # Mark all of our errors as non-persistant.\n error.is_persistant = False\n return list(self._errors.values())", "def get_error_messages(self):\n\n if len(self._sensor_results_list) == 0:\n return\n\n error_msgs = []\n\n for reading in self._sensor_results_list:\n if reading.is_error():\n error_msgs.append(reading.get_error_msg())\n\n if len(error_msgs) > 0:\n return error_msgs\n else:\n return \"No Error Readings\"", "def analysis_errors(self) -> str:\n errors = []\n\n # Get any callback errors\n for cid, callback in self._analysis_callbacks.items():\n if callback.status == AnalysisStatus.ERROR:\n errors.append(f\"\\n[Analysis Callback ID: {cid}]: {callback.error_msg}\")\n\n return \"\".join(errors)", "def job_errors(self) -> str:\n errors = []\n\n # Get any job errors\n for job in self._jobs.values():\n if job and job.status() == JobStatus.ERROR:\n if hasattr(job, \"error_message\"):\n error_msg = job.error_message()\n else:\n error_msg = \"\"\n errors.append(f\"\\n[Job ID: {job.job_id()}]: {error_msg}\")\n\n # Get any job futures errors:\n for jid, fut in self._job_futures.items():\n if fut and fut.done() and fut.exception():\n ex = fut.exception()\n errors.append(\n f\"[Job ID: {jid}]\"\n \"\\n\".join(traceback.format_exception(type(ex), ex, ex.__traceback__))\n )\n return \"\".join(errors)", "def error_data(self):\n\n if not self.__settings:\n return []\n\n return self.__transaction_errors", "def _get_errors(exc):\n if hasattr(exc, 'message'):\n errors = exc.messages\n else:\n errors = [str(exc)]\n return errors", "def errors(self) -> List[Error]:", "def getBuildErrors(self):\n return [x for x in self.xeps if x.buildErrors]", "def errors(self) -> str:\n return self.job_errors() + self.analysis_errors()", "def getErrors(self) -> java.util.Collection:\n ...", "def error(self) -> list:\n return self.__err", "def errors():\n return THE_LOGGER.errors", "def report_transaction_error_messages(self):\n response = self.__get_transaction_response()\n\n # get response data from response object\n response_data = response.json()\n\n # get error messages\n response_error = response_data['Error']\n response_error_messages = response_error['messages']\n\n # add all error messages to the report\n error_messages_to_report = []\n for response_error_message in response_error_messages:\n error_description = response_error_message['description']\n error_messages_to_report.append(error_description)\n\n return error_messages_to_report", "def error_messages(self) -> List[str]:\n spatial_msgs = []\n temporal_msgs = []\n if self.spatial:\n spatial_msgs = [m for v, m in self.spatial_validations if not v(self.spatial)]\n if self.temporal:\n temporal_msgs = [m for v, m in self.temporal_validations if not v(self.temporal)]\n\n return spatial_msgs + temporal_msgs", "def getErrorLog(self):\n return _libsbml.SBMLValidator_getErrorLog(self)", "def get_errors(self):\n errors = []\n\n if not self.title:\n msg = 'Title not found: {0}'.format(self.number)\n print(msg)\n errors.append(msg)\n\n if not self.ref:\n msg = 'Ref not found: {0}'.format(self.number)\n print(msg)\n errors.append(msg)\n\n chapter_index = int(self.number) - 1\n\n # get the expected number of frames for this chapter\n expected_frame_count = chapters_and_frames.frame_counts[chapter_index]\n\n for x in range(1, expected_frame_count + 1):\n\n # frame id is formatted like '01-01'\n frame_id = '{0}-{1}'.format(self.number.zfill(2), str(x).zfill(2))\n\n # get the next frame\n frame = next((f for f in self.frames if f['id'] == frame_id), None) # type: dict\n if not frame:\n msg = 'Frame not found: {0}'.format(frame_id)\n print(msg)\n errors.append(msg)\n else:\n # check the frame img and values\n if 'img' not in frame or not frame['img']:\n msg = 'Attribute \"img\" is missing for frame {0}'.format(frame_id)\n print(msg)\n errors.append(msg)\n\n if 'text' not in frame or not frame['text']:\n msg = 'Attribute \"text\" is missing for frame {0}'.format(frame_id)\n print(msg)\n errors.append(msg)\n\n return errors", "def GetAll(self):\n return self._errors.copy()", "def getErrors(self):\n errorList = []\n\n # E0\n try:\n if not self.e0.isValid():\n errorList.append(\"Invalid first error axis in ErrorEllipse Class\")\n except (NameError, AttributeError):\n errorList.append(\"No first error axis in ErrorEllipse Class.\")\n\n # E1\n try:\n if not self.e1.isValid():\n errorList.append(\"Invalid second error axis in ErrorEllipse Class\")\n except (NameError, AttributeError):\n errorList.append(\"No second error axis in ErrorEllipse Class.\")\n\n # E2\n try:\n if not self.e2.isValid():\n errorList.append(\"Invalid third error axis in ErrorEllipse Class\")\n except (NameError, AttributeError):\n errorList.append(\"No third error axis in ErrorEllipse Class.\")\n\n # maximumHorizontalProjection\n try:\n self.maximumHorizontalProjection\n except (NameError, AttributeError):\n errorList.append(\"No MaximumHorizontalProjection in ErrorEllipse Class.\")\n\n # maximumVerticalProjection\n try:\n self.maximumVerticalProjection\n except (NameError, AttributeError):\n errorList.append(\"No MaximumVerticalProjection in ErrorEllipse Class\")\n\n # equivalentHorizontalRadius\n try:\n self.equivalentHorizontalRadius\n except (NameError, AttributeError):\n errorList.append(\"No EquivalentHorizontalRadius in ErrorEllipse class\")\n\n return errorList", "def get_form_errors(form):\n all_errors = []\n for field in form.errors:\n all_errors += form.errors[field]\n return all_errors", "def get_error(self) -> List[str]:\n return []", "def get_error(self) -> List[str]:\n return []", "def getNumErrors(self):\n return _libsbml.XMLErrorLog_getNumErrors(self)", "def security_errors(self):\n errors = ErrorDict()\n for f in [\"honeypot\", \"timestamp\", \"security_hash\"]:\n if f in self.errors:\n errors[f] = self.errors[f]\n return errors", "def get_render_errors(self, revision_id):\n url = DeckhandClient.get_path(\n DeckhandPaths.RENDERED_REVISION_DOCS\n ).format(revision_id)\n\n errors = []\n\n LOG.debug(\"Retrieving rendered docs checking for validation messages\")\n response = self._get_request(url)\n if response.status_code >= 400:\n err_resp = yaml.safe_load(response.text)\n errors = err_resp.get('details', {}).get('messageList', [])\n if not errors:\n # default message if none were specified.\n errors.append({\n \"error\": True,\n \"message\": (\"Deckhand has reported an error but did not \"\n \"specify messages. Response: {}\".format(\n response.text))})\n return errors", "def get_errors(response):\n errors = response.get(\"error\")\n if errors:\n return [e.get(\"message\") for e in errors]\n return None", "def errors(self):\n return self.args[1]", "def errors(self):\n raise NotImplementedError", "def getParseErrors(self):\n return [x for x in self.xeps if x.parseErrors]", "def get_field_errors(self, field):\r\n identifier = format_html('{0}.{1}', self.form_name, field.name)\r\n return self.error_class([SafeTuple((identifier, '$pristine', '$pristine', 'invalid', e))\r\n for e in self.errors.get(field.name, [])])", "def get_all_failures(self):\n return self._get_filtered_results(success=False)", "def errors(self):\n\n dict = {\"Stellar Mass Error\":[self.st_masserr1,self.st_masserr2],\n \"Stellar Radius Error\":[self.st_raderr1,self.st_raderr2]}\n\n return dict", "def get_errors(self, path: str,\n is_ancillary: bool = False,\n is_system: bool = False,\n is_removed: bool = False) -> List[str]:\n u_file = self.__api.files.get(path, is_ancillary=is_ancillary,\n is_system=is_system,\n is_removed=is_removed)\n return [e.message for e in u_file.errors]", "def get_encoding_errors(self):\n return self._encoding_errors", "def get_errors(self):\n return {'loss': self.loss.data[0]}", "def date_errors(self):\r\n try:\r\n _date_errors = self._validate_date(self.sourceDateCol)\r\n return _date_errors\r\n except:\r\n return None", "def formatErrors(self):\n errorlist = []\n xepsWithErrors = sorted(\n set(self.getParseErrors() + self.getBuildErrors()),\n key=lambda x: str(x))\n if self.getErrors() or xepsWithErrors:\n if self.getErrors():\n errorlist.append(\"********** Read errors **********\")\n for error in self.getErrors():\n errorlist.append(error)\n for xep in xepsWithErrors:\n errorlist.append(\n \"********** Error report for {} **********\".format(str(xep)))\n if xep.parseErrors:\n errorlist.append(\"********** Parsing Errors **********\")\n errors = list(set(xep.parseErrors))\n for error in errors:\n errorlist.append(error)\n if xep.buildErrors:\n errorlist.append(\"********** Build Errors **********\")\n for error in xep.buildErrors:\n if len(error.splitlines()) > 4:\n error = ''.join(error.splitlines()[:4])\n errorlist.append(error)\n return '\\n'.join(errorlist)\n else:\n return None", "def error_details(self):\n return self._error_details", "def errors_fatal(self) -> List[Error]:\n return self._errors_fatal_files + self._errors_fatal", "def error_map(self):\n return self._error_map", "def getErrorLog(self):\n return _libsbml.XMLInputStream_getErrorLog(self)", "def getErrorMessage(self):\n return self._errorMessage", "def failed_messages(self, namespace, queue):\n failed = []\n for m in self.messages(namespace, queue):\n if m.error:\n failed.append(m)\n return failed", "def get_aggregated_exceptions(self) -> Payload:\n return Payload(aggregated_errors=list(self._aggregated_exceptions.values()))", "def _get_resp_body_errors(self):\n\n if self._resp_body_errors and len(self._resp_body_errors) > 0:\n return self._resp_body_errors\n\n errors = []\n warnings = []\n resp_codes = []\n\n if self.verb is None:\n return errors\n\n dom = self.response.dom()\n if dom is None:\n return errors\n\n for e in dom.findall('Errors'):\n eSeverity = None\n eClass = None\n eShortMsg = None\n eLongMsg = None\n eCode = None\n\n try:\n eSeverity = e.findall('SeverityCode')[0].text\n except IndexError:\n pass\n\n try:\n eClass = e.findall('ErrorClassification')[0].text\n except IndexError:\n pass\n\n try:\n eCode = e.findall('ErrorCode')[0].text\n except IndexError:\n pass\n\n try:\n eShortMsg = smart_encode(e.findall('ShortMessage')[0].text)\n except IndexError:\n pass\n\n try:\n eLongMsg = smart_encode(e.findall('LongMessage')[0].text)\n except IndexError:\n pass\n\n try:\n eCode = e.findall('ErrorCode')[0].text\n if int(eCode) not in resp_codes:\n resp_codes.append(int(eCode))\n except IndexError:\n pass\n\n msg = str(\"Class: {eClass}, Severity: {severity}, Code: {code}, {shortMsg} {longMsg}\") \\\n .format(eClass=eClass, severity=eSeverity, code=eCode, shortMsg=eShortMsg,\n longMsg=eLongMsg)\n\n # from IPython import embed; embed()\n\n if eSeverity == 'Warning':\n warnings.append(msg)\n else:\n errors.append(msg)\n\n self._resp_body_warnings = warnings\n self._resp_body_errors = errors\n self._resp_codes = resp_codes\n\n if self.config.get('warnings') and len(warnings) > 0:\n log.warn(\"{verb}: {message}\\n\\n\".format(\n verb=self.verb, message=\"\\n\".join(warnings)))\n\n if self.response.reply.Ack == 'Failure':\n if self.config.get('errors'):\n log.error(\"{verb}: {message}\\n\\n\".format(\n verb=self.verb, message=\"\\n\".join(errors)))\n\n return errors\n\n return []", "def get_errors(self, response: response_domain_model.Response, question_code: str) -> Sequence['ValidationError']:\n ...", "def _find_errors(self, new_response: response_domain_model.Response):\n self._reset_child_state()\n for response in self._responses_to_replay:\n for question_code, conditional in self.requirements.items():\n uncaught_errors = conditional.get_errors(response, question_code=question_code)\n if uncaught_errors:\n # Should not be able to get errors on previously passed responses\n # (invalid answers should be ignored when checking conditionals)\n raise Exception('Invalid answers found in previously checked responses')\n\n new_errors = []\n for question_code, conditional in self.requirements.items():\n new_errors.extend(conditional.get_errors(new_response, question_code=question_code))\n\n if new_errors:\n # Recurse again until there are no new errors found\n new_errors.extend(self._find_errors(new_response))\n\n return new_errors", "def get_error_count(self):\n return sum(1 for outcome in (r.outcome for r in self.values()) if outcome == Result.ERROR)", "def invalid_values(self):\n\n return self._get_value(self.invalid_values_provider)", "def getErrorReport(self):\n return self.sError;", "def getErrorReport(self):\n return self.sError;", "def filter_draft_errors(result):\n error_messages = []\n for field, msgs in result.get('messages', {}).items():\n if msgs.get('state', None) == 'error':\n for m in msgs['messages']:\n error_messages.append(dict(\n field=field,\n message=m,\n code=error_codes['validation_error'],\n ))\n return error_messages", "def getError(self):\n \n return self.resp[\"error\"]", "def error_count(self):\n return len(self.errors)", "def _get_errors(sheet, row, col):\n field = _FIELDS['primary data']\n val = sheet.cell(row + field['row'], col + field['column']).value\n if not val:\n return []\n final_row = row + field['row']\n error = sheet.cell(final_row, col + field['column']).value\n while error:\n final_row += 1\n error = sheet.cell(final_row, col + field['column']).value\n return [sheet.cell(i, col + field['column']).value\n for i in range(row + field['row'], final_row)]", "def tasks_with_errors(self):\n errs = []\n while True:\n try:\n errs.append(self._errq.get_nowait())\n except Queue.Empty:\n break\n return errs", "def getErrorMessage(self):\n return self._message", "def CalculateErrors(self, X, D):\n Y = self.Forward(X)\n self.errors = len([Y[i] for i in range(len(Y)) if Y[i] != D[i]])\n return self.errors", "def grammarErrors(self, text, language):\n if not self.__isValidInput(text):\n return []\n\n textUnicode = unicode_str(text)\n errorList = []\n offset = 0\n for paragraph in textUnicode.split(\"\\n\"):\n errorList = errorList + self.__grammarParagraph(paragraph, offset, language)\n offset = offset + len(paragraph) + 1\n return errorList", "def sex_errors(self):\n result = {}\n disable_sex_check = app_settings.get_app_setting(\n \"variants\", \"disable_pedigree_sex_check\", project=self\n )\n if disable_sex_check:\n return result\n for case in self.case_set.all():\n result.update(case.sex_errors(disable_sex_check))\n return result", "def exception(self) -> exceptions.ErrorMessageException:\n\n return ErrorMessage.ERROR_CODES_TO_EXCEPTIONS.get(\n self.error_code,\n exceptions.GenericException\n )", "def get_error(self, idx=0):\n return self.portal.error_log.getLogEntries()[idx]", "def refined_errors(self):\r\n errs = []\r\n for err in self.errors:\r\n if err['typo'].lower() not in self.terms:\r\n errs.append(err)\r\n return errs", "def error_codes(self):\n self._sort_measurements()\n return self._error_codes", "def _pydantic_errors_to_validation_results(\n errors: list[dict | Exception] | ValidationError,\n file_path: Path,\n scope: Scope,\n) -> list[ValidationResult]:\n out = []\n for e in (\n errors.errors() if isinstance(errors, ValidationError) else cast(list, errors)\n ):\n if isinstance(e, Exception):\n message = getattr(e, \"message\", str(e))\n id = \"exception\"\n scope = Scope.FILE\n else:\n id = \".\".join(\n filter(\n bool,\n (\n \"dandischema\",\n e.get(\"type\", \"UNKNOWN\"),\n \"+\".join(e.get(\"loc\", [])),\n ),\n )\n )\n message = e.get(\"message\", e.get(\"msg\", None))\n out.append(\n ValidationResult(\n origin=ValidationOrigin(\n name=\"dandischema\",\n version=dandischema.__version__,\n ),\n severity=Severity.ERROR,\n id=id,\n scope=scope,\n path=file_path,\n message=message,\n # TODO? dataset_path=dataset_path,\n # TODO? dandiset_path=dandiset_path,\n )\n )\n return out", "def error_message(self):\n\n return self._error_message", "def get_errors_list(jobs_object, job_id):\n\n # Get the error keys of the concrete job ii\n jenkins_errors = jobs_object[\"jobsConfig\"][\"errorMsg\"]\n\n error_keys = jobs_object[\"jobsConfig\"][\"jenkinsJobs\"][job_id][\"errorType\"]\n\n # Get the error messages of the error keys\n error_list = []\n # We append the action to perform to the error message\n for ii in error_keys:\n if jenkins_errors[ii][\"action\"] == \"retryBuild\":\n for error in jenkins_errors[ii][\"errorStr\"]:\n error_list.append(error + \" - retryBuild\")\n elif jenkins_errors[ii][\"action\"] == \"nodeOff\":\n for error in jenkins_errors[ii][\"errorStr\"]:\n error_list.append(error + \" - nodeOff\")\n elif jenkins_errors[ii][\"action\"] == \"nodeReconnect\":\n for error in jenkins_errors[ii][\"errorStr\"]:\n error_list.append(error + \" - nodeReconnect\")\n else:\n print(\n \"Action not defined. Please define a valid action in \"\n + jobs_config_path\n )\n return error_list", "def get_errors(self, path: str,\n is_ancillary: bool = False,\n is_system: bool = False,\n is_removed: bool = False) -> List[str]:", "def field_errors(bound_field):\n seen = []\n errors = {}\n if hasattr(bound_field.field, \"fields\"):\n for idx, subfield in enumerate(bound_field.field.fields):\n key = \"%s_%d\" % (bound_field.auto_id, idx)\n subfield_errors = getattr(subfield.widget, \"errors\", [])\n errors[key] = subfield_errors\n seen.extend(subfield_errors)\n for error in bound_field.errors:\n if error not in seen:\n errors.setdefault(bound_field.auto_id, [])\n errors[bound_field.auto_id].append(error)\n return errors.items()", "def getExceptions(self):\n return self.getOrDefault(\"exceptions\")", "def GetFailures(self):\n return self._compute_client.all_failures", "def errorbars (self):\n return self._errorbars", "def warnings(self) -> List[Error]:\n return self._get_warnings()", "def check_errors(self):\n\n errors = []\n while True:\n err = self.values(\"SYST:ERR?\")\n if int(err[0]) != 0:\n errmsg = \"Agilent 5313xA: {0}: {1}\".format(err[0], err[1])\n log.error(errmsg + '\\n')\n errors.append(errmsg)\n else:\n break\n\n return errors", "def ErrorMessage(self):\n return self._errormessage", "def errorcaps (self):\n return self._errorcaps", "def get_errored_courses(self):\r\n return dict((k, self.errored_courses[k].errors) for k in self.errored_courses)", "def messages(self):\n return self._messages", "def messages(self):\n return self._messages", "def messages(self):\n return self._messages", "def query_error(self):\n return self.details[KEY_QUERY_ERROR]" ]
[ "0.7164323", "0.70850545", "0.7084039", "0.7073318", "0.7073318", "0.7032286", "0.69743216", "0.6840167", "0.6840085", "0.6816689", "0.68060446", "0.67511207", "0.66765666", "0.6626607", "0.65734386", "0.65553755", "0.65435886", "0.6522403", "0.6402078", "0.63631475", "0.6350149", "0.63170594", "0.6312729", "0.63122153", "0.6284837", "0.62512636", "0.61444694", "0.6065811", "0.60224736", "0.6016319", "0.6014446", "0.5982093", "0.5935994", "0.5854258", "0.583771", "0.5777689", "0.5770109", "0.5731137", "0.5721682", "0.570453", "0.570453", "0.566765", "0.5632113", "0.5595717", "0.5565627", "0.5503202", "0.54932886", "0.5477905", "0.53716326", "0.53676915", "0.5365297", "0.53583497", "0.53526014", "0.5341042", "0.5331032", "0.53226525", "0.5310991", "0.5303637", "0.5286214", "0.5285876", "0.5285202", "0.52813214", "0.52592516", "0.52442986", "0.52141994", "0.52115345", "0.52092403", "0.5201769", "0.5199416", "0.5199416", "0.517958", "0.51787317", "0.5171295", "0.51709306", "0.51611435", "0.5154321", "0.5152182", "0.5130895", "0.50871825", "0.5076235", "0.5074987", "0.50728893", "0.5070535", "0.50689095", "0.50684375", "0.5067607", "0.5066021", "0.50616026", "0.50598854", "0.5041734", "0.50276005", "0.5024531", "0.5020144", "0.5019323", "0.50067776", "0.5000856", "0.4998048", "0.4998048", "0.4998048", "0.4997809" ]
0.7075878
3
Sets the errors of this MessagingCampaign. A list of current error conditions associated with this messaging campaign.
def errors(self, errors): self._errors = errors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def errors(self, errors):\n\n self._errors = errors", "def validation_errors(self, validation_errors):\n self._validation_errors = validation_errors", "def add_errors(self, errors):\n self.errors = merge_errors(self.errors, errors)", "def errors (self):\n return self._errors", "def errors (self):\n return self._errors", "def errors(self):\n return self._errors", "def errors(self):\n return self.__errors", "def Errors(self):\n return self._get_attribute('errors')", "def errors(self):\r\n if not hasattr(self, '_errors_cache'):\r\n self._errors_cache = self.form.get_field_errors(self)\r\n return self._errors_cache", "def errors(self):\n return self._properties.get(\"errors\")", "def errors(self) -> Tuple[MqexsErrorInfo, ...]:\n return self.__errors", "def errors(self) -> pulumi.Output[Sequence['outputs.BatchAIErrorResponse']]:\n return pulumi.get(self, \"errors\")", "def getErrors(self):\n return self.errors", "def getErrorsList(self):\n return self.__errors", "def Errors(self):\r\n\t\treturn self._get_attribute('errors')", "def validation_errors(self):\n return self._validation_errors", "def errors(self):\n _errors = {}\n # pylint: disable=no-member\n for name, field in self._fields.items():\n if field.errors:\n _errors[name] = field.errors.pop()\n\n return _errors", "def error(self, msg, transfers):\n self.validation_exceptions.extend(self._create_exceptions(msg, transfers, ValidationType.ERROR))", "def errors(self) -> List[Error]:\n return self._errors_files + list(self._errors.values())", "def errors(self):\n raise NotImplementedError", "def set_limit(self, errors):\n self.limit = errors", "def errors(self) -> str:\n return self.job_errors() + self.analysis_errors()", "def on_errors(self, errors):\n log.error(\"Received errors: %s\", errors)", "def get_errors(self):\n return [result for result in self.values() if result.outcome == Result.ERROR]", "def get_errors(self, request):\n\n value = request._get_parameter_value(self)\n return value.errors", "def error_message(self, error_message):\n if (self.local_vars_configuration.client_side_validation and\n error_message is not None and len(error_message) > 500):\n raise ValueError(\"Invalid value for `error_message`, length must be less than or equal to `500`\") # noqa: E501\n\n self._error_message = error_message", "def retrieve_error_messages(self):\n return self.errors_seen[:]", "def report_transaction_error_messages(self):\n response = self.__get_transaction_response()\n\n # get response data from response object\n response_data = response.json()\n\n # get error messages\n response_error = response_data['Error']\n response_error_messages = response_error['messages']\n\n # add all error messages to the report\n error_messages_to_report = []\n for response_error_message in response_error_messages:\n error_description = response_error_message['description']\n error_messages_to_report.append(error_description)\n\n return error_messages_to_report", "def errors(self) -> List[Error]:", "def analysis_errors(self) -> str:\n errors = []\n\n # Get any callback errors\n for cid, callback in self._analysis_callbacks.items():\n if callback.status == AnalysisStatus.ERROR:\n errors.append(f\"\\n[Analysis Callback ID: {cid}]: {callback.error_msg}\")\n\n return \"\".join(errors)", "def check_set_errors(self):\n raise NotImplementedError(\"Implement it in a subclass.\")", "def get_validation_errors(self):\n return [err.to_dict() for err in self._schema.validator.validation_errors]", "def error_data(self):\n\n if not self.__settings:\n return []\n\n return self.__transaction_errors", "def errors(self) -> List[Error]:\n # May have inherited errors with a different path.\n for error in self._errors.values():\n error.path = self.path\n if self.is_removed: # Mark all of our errors as non-persistant.\n error.is_persistant = False\n return list(self._errors.values())", "def job_errors(self) -> str:\n errors = []\n\n # Get any job errors\n for job in self._jobs.values():\n if job and job.status() == JobStatus.ERROR:\n if hasattr(job, \"error_message\"):\n error_msg = job.error_message()\n else:\n error_msg = \"\"\n errors.append(f\"\\n[Job ID: {job.job_id()}]: {error_msg}\")\n\n # Get any job futures errors:\n for jid, fut in self._job_futures.items():\n if fut and fut.done() and fut.exception():\n ex = fut.exception()\n errors.append(\n f\"[Job ID: {jid}]\"\n \"\\n\".join(traceback.format_exception(type(ex), ex, ex.__traceback__))\n )\n return \"\".join(errors)", "def set_error(self, name, value):\n self.errors[name] = value", "def error_count(self, error_count):\n\n self._error_count = error_count", "async def flush_errors(self, errors):\n logger.error(errors)\n data = {\"output\": {\"errors\": errors}, \"state\": \"completed\", \"status\": \"error\"}\n await self.c_task.update(data)", "def errors(self):\n\n dict = {\"Stellar Mass Error\":[self.st_masserr1,self.st_masserr2],\n \"Stellar Radius Error\":[self.st_raderr1,self.st_raderr2]}\n\n return dict", "def setErrorMessage(self, errorMessage):\n self._errorMessage = errorMessage", "def mark_error(self):\r\n self.status = ERROR", "def error_message(self, error_message):\n\n self._error_message = error_message", "def _initialize_error_dictionaries(self):\n for task_id in self.task_ids.keys():\n self.training_errors[task_id] = []\n self.validation_errors[task_id] = []", "def errors_and_warnings(self, errors_and_warnings):\n\n self._errors_and_warnings = errors_and_warnings", "def error(self) -> list:\n return self.__err", "def errors_summary(self, errors_summary):\n\n self._errors_summary = errors_summary", "def all_errors(self) -> List[XMLSchemaParseError]:\n errors = []\n for comp in self.iter_components():\n if comp.errors:\n errors.extend(comp.errors)\n return errors", "def getNumErrors(self):\n return _libsbml.XMLErrorLog_getNumErrors(self)", "def setError(self,err):\n self.error = err", "def getErrors(self) -> java.util.Collection:\n ...", "def set_error(self, error):\n self._set_sub_text('error', text=str(error))\n return self", "def error_messages(self) -> List[str]:\n spatial_msgs = []\n temporal_msgs = []\n if self.spatial:\n spatial_msgs = [m for v, m in self.spatial_validations if not v(self.spatial)]\n if self.temporal:\n temporal_msgs = [m for v, m in self.temporal_validations if not v(self.temporal)]\n\n return spatial_msgs + temporal_msgs", "def error_message(self, error_message: str):\n\n self._error_message = error_message", "def _find_errors(self, new_response: response_domain_model.Response):\n self._reset_child_state()\n for response in self._responses_to_replay:\n for question_code, conditional in self.requirements.items():\n uncaught_errors = conditional.get_errors(response, question_code=question_code)\n if uncaught_errors:\n # Should not be able to get errors on previously passed responses\n # (invalid answers should be ignored when checking conditionals)\n raise Exception('Invalid answers found in previously checked responses')\n\n new_errors = []\n for question_code, conditional in self.requirements.items():\n new_errors.extend(conditional.get_errors(new_response, question_code=question_code))\n\n if new_errors:\n # Recurse again until there are no new errors found\n new_errors.extend(self._find_errors(new_response))\n\n return new_errors", "def email_error_report(self, digest_content, jats_content, error_messages):\n datetime_string = time.strftime(utils.DATE_TIME_FORMAT, time.gmtime())\n doi = None\n if digest_content:\n doi = digest_content.doi\n body_content = requests_provider.error_email_body_content(\n doi, jats_content, error_messages\n )\n body = email_provider.simple_email_body(datetime_string, body_content)\n author = None\n if digest_content:\n author = digest_content.author\n subject = requests_provider.error_email_subject_msid_author(\n \"digest\", digest_provider.get_digest_msid(digest_content), author\n )\n sender_email = self.settings.digest_sender_email\n\n recipient_email_list = email_provider.list_email_recipients(\n self.settings.digest_jats_error_recipient_email\n )\n\n messages = email_provider.simple_messages(\n sender_email, recipient_email_list, subject, body, logger=self.logger\n )\n self.logger.info(\n \"Formatted %d error email messages in %s\" % (len(messages), self.name)\n )\n\n details = email_provider.smtp_send_messages(\n self.settings, messages, self.logger\n )\n self.logger.info(\"Email sending details: %s\" % str(details))\n\n return True", "def add_error_tables(self, error_tables):\n self.error_distribution = error_tables", "def get_error_messages(self):\n\n if len(self._sensor_results_list) == 0:\n return\n\n error_msgs = []\n\n for reading in self._sensor_results_list:\n if reading.is_error():\n error_msgs.append(reading.get_error_msg())\n\n if len(error_msgs) > 0:\n return error_msgs\n else:\n return \"No Error Readings\"", "def error_count(self):\n return len(self.errors)", "def __set_errors_json(self, error_count_by_operation, errors_by_operation):\n message = \"{0} error/s reported.\".format(error_count_by_operation)\n log_file_path = self.logger.file_logger.log_file_path\n message += \" The latest {0} error/s are shared in detail. To view all errors, review this log file on the machine: {1}\".format(len(errors_by_operation), log_file_path) if error_count_by_operation > 0 else \"\"\n return {\n \"code\": Constants.PatchOperationTopLevelErrorCode.SUCCESS if error_count_by_operation == 0 else Constants.PatchOperationTopLevelErrorCode.ERROR,\n \"details\": errors_by_operation,\n \"message\": message\n }", "def security_errors(self):\n errors = ErrorDict()\n for f in [\"honeypot\", \"timestamp\", \"security_hash\"]:\n if f in self.errors:\n errors[f] = self.errors[f]\n return errors", "def GetAll(self):\n return self._errors.copy()", "def errors():\n return THE_LOGGER.errors", "def add_error(self, field, message):\n add_list_value(self.errors, field, message)", "def error_recovery_settings(self, error_recovery_settings):\n\n self._error_recovery_settings = error_recovery_settings", "def setErrorThreshold(self, threshold):\n return self._set(errorThreshold=threshold)", "def setErrorThreshold(self, threshold):\n return self._set(errorThreshold=threshold)", "def _get_resp_body_errors(self):\n\n if self._resp_body_errors and len(self._resp_body_errors) > 0:\n return self._resp_body_errors\n\n errors = []\n warnings = []\n resp_codes = []\n\n if self.verb is None:\n return errors\n\n dom = self.response.dom()\n if dom is None:\n return errors\n\n for e in dom.findall('Errors'):\n eSeverity = None\n eClass = None\n eShortMsg = None\n eLongMsg = None\n eCode = None\n\n try:\n eSeverity = e.findall('SeverityCode')[0].text\n except IndexError:\n pass\n\n try:\n eClass = e.findall('ErrorClassification')[0].text\n except IndexError:\n pass\n\n try:\n eCode = e.findall('ErrorCode')[0].text\n except IndexError:\n pass\n\n try:\n eShortMsg = smart_encode(e.findall('ShortMessage')[0].text)\n except IndexError:\n pass\n\n try:\n eLongMsg = smart_encode(e.findall('LongMessage')[0].text)\n except IndexError:\n pass\n\n try:\n eCode = e.findall('ErrorCode')[0].text\n if int(eCode) not in resp_codes:\n resp_codes.append(int(eCode))\n except IndexError:\n pass\n\n msg = str(\"Class: {eClass}, Severity: {severity}, Code: {code}, {shortMsg} {longMsg}\") \\\n .format(eClass=eClass, severity=eSeverity, code=eCode, shortMsg=eShortMsg,\n longMsg=eLongMsg)\n\n # from IPython import embed; embed()\n\n if eSeverity == 'Warning':\n warnings.append(msg)\n else:\n errors.append(msg)\n\n self._resp_body_warnings = warnings\n self._resp_body_errors = errors\n self._resp_codes = resp_codes\n\n if self.config.get('warnings') and len(warnings) > 0:\n log.warn(\"{verb}: {message}\\n\\n\".format(\n verb=self.verb, message=\"\\n\".join(warnings)))\n\n if self.response.reply.Ack == 'Failure':\n if self.config.get('errors'):\n log.error(\"{verb}: {message}\\n\\n\".format(\n verb=self.verb, message=\"\\n\".join(errors)))\n\n return errors\n\n return []", "def add_error(self, message):\n self.errors.append(message)", "def setErrorMax(self, error_max):\n\t\tself.error_max = error_max", "def getBuildErrors(self):\n return [x for x in self.xeps if x.buildErrors]", "def sex_errors(self):\n result = {}\n disable_sex_check = app_settings.get_app_setting(\n \"variants\", \"disable_pedigree_sex_check\", project=self\n )\n if disable_sex_check:\n return result\n for case in self.case_set.all():\n result.update(case.sex_errors(disable_sex_check))\n return result", "def set_max_errors(self,newMaxErrors):\n previousErrors = self.errors[max(0, self._errorCursor - newMaxErrors) : self._errorCursor]\n #Too complicated fuck me\n wrappingErrors = self.errors[self._errorCursor + max(0, self.maxErrors - newMaxErrors) : self.maxErrors]\n del self.errors\n freeSpace = [0] * (newMaxErrors - len(previousErrors) - len(wrappingErrors))\n self._errorCursor = (len(wrappingErrors) + len(previousErrors)) % newMaxErrors\n self.errors = wrappingErrors + previousErrors + freeSpace\n self.maxErrors = newMaxErrors\n self._errorSum = sum(self.errors)\n del previousErrors\n del wrappingErrors\n del freeSpace\n gc.collect()", "def processErrors(self):\n e = self.formatErrors()\n if not self.config.nologtostdout:\n if e:\n print e\n else:\n print \"No errors\"\n if self.config.logtomail:\n if e:\n m = xeputils.mail.LogMail(self.config, e)\n m.send()\n if self.config.logtofile:\n f = open(self.config.logtofile, 'a')\n f.write(\"\\n===================\\n\")\n f.write(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n f.write(\"\\n===================\\n\")\n if e:\n f.write(e)\n else:\n f.write(\"No errors\")\n f.close()", "def failed_replication_jobs(self, failed_replication_jobs):\n if self._configuration.client_side_validation and failed_replication_jobs is None:\n raise ValueError(\"Invalid value for `failed_replication_jobs`, must not be `None`\") # noqa: E501\n if (self._configuration.client_side_validation and\n failed_replication_jobs is not None and failed_replication_jobs < 0): # noqa: E501\n raise ValueError(\"Invalid value for `failed_replication_jobs`, must be a value greater than or equal to `0`\") # noqa: E501\n\n self._failed_replication_jobs = failed_replication_jobs", "def init_matches_errors(self) -> None:\n\n self.matches = set()\n self.ignored = set()\n self.errors = set()", "def form_invalid_add_global_errormessages(self, form):\n if self.get_selected_items_form_attribute() in form.errors:\n errormessages = form.errors[self.get_selected_items_form_attribute()]\n for errormessage in errormessages:\n messages.error(self.request, errormessage)", "def record_error(self, message, keys=None, type=None, **kwargs):\n keys = list(keys) if keys is not None else []\n self.errors.append(\n dict(\n message=message,\n keys=keys,\n type=type or EntityErrors.UNCATEGORIZED,\n **kwargs\n )\n )", "def _get_errors(exc):\n if hasattr(exc, 'message'):\n errors = exc.messages\n else:\n errors = [str(exc)]\n return errors", "def report_errors(errors):\n if len(errors) > 0:\n for error in errors:\n logger.debug(error)\n sys.exit(0)", "def WriteFlowErrors(self, errors):\n # Errors are similar to results, as they represent a somewhat related\n # concept. Error is a kind of a negative result. Given the structural\n # similarity, we can share large chunks of implementation between\n # errors and results DB code.\n self._WriteFlowResultsOrErrors(self.flow_errors, errors)", "def setError(self, index, error):\n\t\tself.membersWithErrors[index][1] = error", "def add_error(self, msg):\n self._add_message(msg, self._errors)", "def error(self, error):\n\n self._error = error", "def error(self, error):\n\n self._error = error", "def error(self, error):\n\n self._error = error", "def get_errors(self):\n df = self.get_events()\n return df[df.error.notnull()]", "def error(self, message, code='UnknownError', error_code=None, http_status=400):\n\n # Some backwards compatibility\n if error_code is not None and code == 'UnknownError':\n code = error_code\n\n self._add_message( message, self.ERROR, code=code )\n self.n_errors += 1\n self.status = 'ERROR'\n self.http_status = http_status\n self.error_code = code\n self.message = message", "def diagnostic_trouble_codes(self, diagnostic_trouble_codes):\n\n self._diagnostic_trouble_codes = diagnostic_trouble_codes", "def CalculateErrors(self, X, D):\n Y = self.Forward(X)\n self.errors = len([Y[i] for i in range(len(Y)) if Y[i] != D[i]])\n return self.errors", "def error(self, exception=None):\n self._error = exception", "def pin_errors(self):\n for m in range(self.stage_width_list[-1]):\n error, _ = rqrmilib.calculate_submodel_error(self._get_native_object(), self.probe, len(self)-1, m)\n if error < 0: error = 0\n self.error_list[m] = int(error)\n self.rqrmi_state_changed = True\n return self.error_list", "def display_errors(self):\r\n\r\n def format_name(field_name):\r\n \"\"\"Formats field names for error display\"\"\"\r\n if field_name == \"celebration_tier\":\r\n return \"{wLargesse{n\"\r\n return \"{w%s{n\" % field_name.capitalize()\r\n\r\n msg = \"Please correct the following errors:\\n\"\r\n msg += \"\\n\".join(\r\n \"%s: {r%s{n\" % (format_name(field), \", \".join(errs))\r\n for field, errs in self.errors.items()\r\n )\r\n return msg", "def setErrorLog(self, *args):\n return _libsbml.XMLInputStream_setErrorLog(self, *args)", "def error_entity(self, error_entity):\n \n self._error_entity = error_entity", "def error_map(self):\n return self._error_map", "def _update_errors_report(self, model_name, error_msg):\n errors_filename = os.path.join(self._get_results_path(), \"errors.md\")\n with open(errors_filename, \"a\") as fout:\n self.verbose_print(f\"There was an error during {model_name} training.\")\n self.verbose_print(f\"Please check {errors_filename} for details.\")\n fout.write(f\"## Error for {model_name}\\n\\n\")\n fout.write(error_msg)\n link = \"https://github.com/mljar/mljar-supervised/issues/new\"\n fout.write(\n f\"\\n\\nPlease set a GitHub issue with above error message at: {link}\"\n )\n fout.write(\"\\n\\n\")", "def set_error(cls, ekindataset, dp, col):\n if ekindataset[col][dp]['var'] == '':\n return\n if not ekindataset[col][dp].has_key('error'):\n ekindataset[col][dp]['error'] = 0\n return", "def error_reason(self, error_reason):\n\n self._error_reason = error_reason", "def _setErrorNodes(self, errorNodes):\n self._errorNodes = errorNodes", "def add_error(self, resource_type, seq, attr_name, error):\n resource_errors = self.errors.setdefault(resource_type, {})\n seq_errors = resource_errors.setdefault(seq, {})\n attr_errors = seq_errors.setdefault(attr_name, [])\n attr_errors.append(error)" ]
[ "0.7063868", "0.6316049", "0.62133807", "0.5995449", "0.5995449", "0.5993789", "0.5955976", "0.58611673", "0.5738866", "0.57359105", "0.56898946", "0.56828547", "0.56606567", "0.56536406", "0.5637317", "0.5577304", "0.5540206", "0.5453656", "0.54534554", "0.54445463", "0.5427907", "0.5297211", "0.5279484", "0.525997", "0.5244819", "0.5221872", "0.5194199", "0.51878995", "0.5151027", "0.51397586", "0.5121268", "0.5107964", "0.50943774", "0.5080314", "0.50717455", "0.506621", "0.50495", "0.50426507", "0.5004578", "0.5003011", "0.49701798", "0.4967973", "0.49484527", "0.49450395", "0.49142504", "0.49068043", "0.48854777", "0.48778656", "0.4864789", "0.48622867", "0.48615256", "0.4830137", "0.4813637", "0.48103064", "0.4775602", "0.47728068", "0.47710347", "0.47584313", "0.47584012", "0.47515887", "0.47194967", "0.47092918", "0.47020674", "0.4701304", "0.4700421", "0.4700421", "0.4686703", "0.46864355", "0.46795753", "0.46674326", "0.46588477", "0.4658053", "0.4642491", "0.46408594", "0.4626274", "0.46254867", "0.4603615", "0.45933828", "0.45904434", "0.45894766", "0.4588426", "0.45809337", "0.45782277", "0.45782277", "0.45782277", "0.45703274", "0.45577374", "0.4540596", "0.45389873", "0.45238033", "0.4520139", "0.45176798", "0.4509601", "0.45089304", "0.4501302", "0.44994268", "0.44923404", "0.44890392", "0.4487223", "0.4486627" ]
0.7172415
0
Calculate matrix of number of edits to convert every subset of y to every subset of x
def distance_matrix(self, x, y, keyboard_weight=None): # create distance matrix size_x = len(x) + 1 size_y = len(y) + 1 dist_matrix = np.zeros((size_x, size_y)) for i in range(size_x): dist_matrix[i, 0] = i for j in range(size_y): dist_matrix[0, j] = j ## fill distance matrix # no keyboard weight if not keyboard_weight: for i in range(1, size_x): for j in range(1, size_y): # if letters are same if x[i-1] == y[j-1]: dist_matrix[i, j] = dist_matrix[i-1, j-1] # if letters are different else: subs = dist_matrix[i-1, j-1] + 1 delete = dist_matrix[i-1, j] + 1 insert = dist_matrix[i, j-1] + 1 dist_matrix[i, j] = min(subs, delete, insert) # manhattan keyboard weight elif keyboard_weight == "manhattan": for i in range(1, size_x): for j in range(1, size_y): # if letters are same if x[i-1] == y[j-1]: dist_matrix[i, j] = dist_matrix[i-1, j-1] # if letters are different else: dist = self.key_distance(x[i-1], y[j-1], keyboard_weight) subs_weight = dist * self.manhattan_coef subs = dist_matrix[i-1, j-1] + subs_weight delete = dist_matrix[i-1, j] + 1 insert = dist_matrix[i, j-1] + 1 dist_matrix[i, j] = min(subs, delete, insert) # euclidean keyboard weight elif keyboard_weight == "euclidean": for i in range(1, size_x): for j in range(1, size_y): # if letters are same if x[i-1] == y[j-1]: dist_matrix[i, j] = dist_matrix[i-1, j-1] # if letters are different else: dist = self.key_distance(x[i-1], y[j-1], keyboard_weight) subs_weight = dist * self.euclidean_coef subs = dist_matrix[i-1, j-1] + subs_weight delete = dist_matrix[i-1, j] + 1 insert = dist_matrix[i, j-1] + 1 dist_matrix[i, j] = min(subs, delete, insert) return dist_matrix
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def topsolutions(self):\n answers = []\n for y in xrange(0, self.y):\n answer = self.retrieve(y,self.y)\n i = 0\n for x in xrange(0,y):\n answer -= self.retrieve(y,x)*answers[i]\n i += 1\n answers.append(answer)\n return answers", "def build_set(x, y):\n # E_w[yy^T]\n y_y_t = la.inv(np.dot(y, y.transpose()))\n h_matrix = np.dot(np.dot(x, y), y_y_t)\n return h_matrix", "def transform(self, y):\n\n y = self.ensure_output_format(\n y, sparse_format='lil', enforce_sparse=True)\n\n self.clean()\n self.label_count = y.shape[1]\n\n last_id = 0\n train_vector = []\n for labels_applied in y.rows:\n label_string = \",\".join(map(str, labels_applied))\n\n if label_string not in self.unique_combinations:\n self.unique_combinations[label_string] = last_id\n self.reverse_combinations.append(labels_applied)\n last_id += 1\n\n train_vector.append(self.unique_combinations[label_string])\n\n return np.array(train_vector)", "def _get_edit_distance_matrix(x: str, y: str) -> list:\n matrix = [[-1 for _ in range(len(y) + 1)] for _ in range(len(x) + 1)]\n\n for j in range(len(matrix[0])):\n matrix[0][j] = j\n\n for i, _ in enumerate(matrix):\n matrix[i][0] = i\n\n return matrix", "def editDist(X, Y, m, n, normalize_len=False):\n if normalize_len:\n if m < n:\n quot = n // m\n new_X = []\n for i in range(quot):\n new_X += X\n X = new_X\n # print(\"Normalized length for X: {}\".format(len(X)))\n else:\n quot = m // n\n new_Y = []\n for i in range(quot):\n new_Y += Y\n Y = new_Y\n # print(\"Normalized length for Y: {}\".format(len(Y)))\n\n\n dp = [[0 for x in range(n+1)] for x in range(m+1)]\n for i in range(m+1):\n for j in range(n+1):\n\n if i == 0:\n dp[i][j] = j\n elif j == 0:\n dp[i][j] = i\n elif X[i-1] == Y[j-1]:\n dp[i][j] = dp[i - 1][j - 1]\n else:\n dp[i][j] = 1 + min(dp[i][j - 1], dp[i-1][j], dp[i-1][j-1])\n\n return dp[m][n]", "def p_y_x_knn(y, k):\n result = np.zeros((len(y), 4))\n for i in range(len(y)):\n for j in range(k):\n result[i, y[i, j]] = result[i, y[i, j]] + 1\n return 1 / k * result", "def transform( self, X, y = None ):\n matrix = np.zeros((len(X),len(self.feature_names)))\n for i,bag in enumerate(X):\n for test in bag:\n try:\n matrix[i,self.feature_names.index(test)] = 1\n except ValueError:\n pass\n return matrix", "def toCartesian(self, y):\r\n return Size - y", "def add_matrices(x, y):\n return [[x[i][j] + y[i][j] for j in range(len(x[0]))] for i in range(len(x))]", "def associate_comp(x, y):\n return torch.cat([x[:1] * y[:1] - x[1:] * y[1:], x[:1] * y[1:] + x[1:] * y[:1]])", "def UpdateCostMatrix( self, extraXs ):\n for x in extraXs:\n newRow = [ self.EuclideanDistanceSq(x,y) for y in self.Y ]\n self.C.append(newRow)", "def copnorm_cat_1d(x, y):\n assert isinstance(x, np.ndarray) and (x.ndim == 1)\n assert isinstance(y, np.ndarray) and (x.ndim == 1)\n assert y.dtype in CONFIG['INT_DTYPE']\n x_cop = np.zeros_like(x)\n y_u = np.unique(y)\n for yi in y_u:\n _idx = y == yi\n x_cop[_idx] = copnorm_1d(x[_idx])\n return x_cop", "def cost_matrix(x, y, p=2):\n xc = tf.expand_dims(x, 1)\n yr = tf.expand_dims(y, 0)\n d = tf.math.pow(tf.abs(xc - yr), p)\n return tf.reduce_sum(d, axis=-1)", "def fillCostMatrix(xs0,ys0,xs1,ys1):\n M = int ( max(len(xs0),len(xs1)) ) #Number of centroids.\n costMatrix = np.ones((M,M))*-1\n x_rows = np.zeros(M)\n x_rows[0:len(xs0)] = xs0\n y_rows = np.zeros(M)\n y_rows[0:len(xs0)] = ys0\n \n x_cols = np.zeros(M)\n x_cols[0:len(xs1)] = xs1\n y_cols = np.zeros(M)\n y_cols[0:len(xs1)] = ys1\n\n for i in range(len(xs0)):\n for j in range(len(xs1)):\n costMatrix[i,j]=(y_rows[i]-y_cols[j])**2\n costMatrix[i,j] += (x_rows[i]-x_cols[j])**2\n return costMatrix", "def reconstruct_input(self, ix):", "def inverse_transform(self, y):\n n_samples = len(y)\n result = sparse.lil_matrix((n_samples, self.label_count), dtype='i8')\n for row in range(n_samples):\n assignment = y[row]\n result[row, self.reverse_combinations[assignment]] = 1\n\n return result", "def countNeighbors(oldgen, x, y):\n temp = 1\n\n count = 0\n for i in range(-1, 2):\n for j in range(-1, 2):\n\n # TODO: this needs rewritin to be more understandable\n if not (i == 0 and j == 0):\n count += int(oldgen[(x + i + WID) % WID][(y + j + HGT) % HGT])\n\n for i in range(-1, 2):\n for j in range(-1, 2):\n temp += 1\n\n count -= int(oldgen[x][y])\n\n return count", "def init_output_mat(self, y_list):", "def associate(x, y):\n x = torch.cat([x[1:], x])\n xx, yy = x.reshape(1,1,-1), y.flip(0).reshape(1,1,-1)\n zz = torch.nn.functional.conv1d(xx, yy)\n z = zz.reshape(-1)\n return normalize(z)", "def fillCostMatrix(xs0,ys0,xs1,ys1):\n M = int ( max(len(xs0),len(xs1)) ) #Number of centroids.\n costMatrix = np.zeros((M,M))\n x_rows = np.zeros(M)\n x_rows[0:len(xs0)] = xs0\n y_rows = np.zeros(M)\n y_rows[0:len(xs0)] = ys0\n \n x_cols = np.zeros(M)\n x_cols[0:len(xs1)] = xs1\n y_cols = np.zeros(M)\n y_cols[0:len(xs1)] = ys1\n\n for i in range(len(xs0)):\n for j in range(len(xs1)):\n costMatrix[i,j]=(y_rows[i]-y_cols[j])**2\n costMatrix[i,j] += (x_rows[i]-x_cols[j])**2\n return costMatrix", "def alloc2d(x,y,iv=0):\n return [[iv for j in range(int(x))] for i in range(int(y))]", "def dim_reduction(data_set, components):\n transformed = []\n index = -1\n transformed = data_set @ components\n return transformed", "def inverse_transform(self, y: Array2D) -> Array2D:", "def _lcs(x, y):\n n, m = len(x), len(y)\n table = dict()\n for i in range(n + 1):\n for j in range(m + 1):\n if i == 0 or j == 0:\n table[i, j] = 0\n elif x[i - 1] == y[j - 1]:\n table[i, j] = table[i - 1, j - 1] + 1\n else:\n table[i, j] = max(table[i - 1, j], table[i, j - 1])\n return table", "def _lcs(x, y):\n n, m = len(x), len(y)\n table = dict()\n for i in range(n + 1):\n for j in range(m + 1):\n if i == 0 or j == 0:\n table[i, j] = 0\n elif x[i - 1] == y[j - 1]:\n table[i, j] = table[i - 1, j - 1] + 1\n else:\n table[i, j] = max(table[i - 1, j], table[i, j - 1])\n return table", "def transform(self, x: Array2D) -> Array2D:", "def transform(self,X):\n # conver X to a list\n X = X.tolist()\n result = []\n\n # iterate over the length of X\n for b in range(len(X)):\n\n # change dataset accoring to bias\n if self.include_bias:\n X[b].insert(0, 1)\n \n # initialize an array to store dynamically all array of indices\n init_arr = []\n for j in range(len(X[b])):\n init_arr.append([j])\n\n # array of indices\n arr = [j for j in range(len(X[b]))]\n separate_arr = init_arr.copy()\n\n # iterate for the degree given\n for k in range(0,self.degree-1):\n # for len of the array containing indices\n for i in range(len(arr)):\n temp = i\n # this loop will have different length since length increases\n for j in range((k)*len(arr),len(separate_arr)):\n element = init_arr[j].copy()\n element.append(temp)\n init_arr.append(element) \n separate_arr = init_arr.copy()\n # sort the array obtained to remove repeated elements\n array = []\n for m in range(len(init_arr)):\n init_arr[m].sort()\n if(init_arr[m] not in array):\n array.append(init_arr[m])\n\n # calculate the final values by multiplying the numbers or columns at the place of indices\n final = []\n for i in array:\n lst = []\n # only if lenth satisfies the given degree\n if len(i)==self.degree:\n for j in i: \n lst.append(X[b][j]) \n final.append(np.product(lst))\n result.append(final)\n return result", "def solutions(self):\n answers = []\n for y in reversed(xrange(0, self.y)):\n answer = self.retrieve(y,self.y)\n i = 0\n for x in reversed(xrange(y+1, self.y)):\n answer -= self.retrieve(y,x)*answers[i]\n i += 1\n answers.append(answer)\n answers.reverse()\n return answers", "def transform(self, y):\n return self.cidx_by_size_[self.le_.transform(y)]", "def get_imin(x1, x2, y, k=1, normalize=None, norm=np.inf):\n\n if normalize:\n y = normalize(y)\n\n y_tree = cKDTree(y)\n\n n = len(y)\n i_spec = np.zeros((2, n))\n\n for jj, x in enumerate([x1, x2]):\n\n if normalize:\n x = normalize(x)\n\n # construct state array for the joint processes:\n xy = np.c_[x,y]\n\n # store data pts in kd-trees for efficient nearest neighbour computations\n # TODO: choose a better leaf size\n x_tree = cKDTree(x)\n xy_tree = cKDTree(xy)\n\n # kth nearest neighbour distances for every state\n # query with k=k+1 to return the nearest neighbour, not counting the data point itself\n # dist, idx = xy_tree.query(xy, k=k+1, p=norm)\n dist, idx = xy_tree.query(xy, k=k+1, p=np.inf)\n epsilon = dist[:, -1]\n\n # for each point, count the number of neighbours\n # whose distance in the x-subspace is strictly < epsilon\n # repeat for the y subspace\n nx = np.empty(n, dtype=np.int)\n ny = np.empty(n, dtype=np.int)\n for ii in xrange(N):\n # nx[ii] = len(x_tree.query_ball_point(x_tree.data[ii], r=epsilon[ii], p=norm)) - 1\n # ny[ii] = len(y_tree.query_ball_point(y_tree.data[ii], r=epsilon[ii], p=norm)) - 1\n nx[ii] = len(x_tree.query_ball_point(x_tree.data[ii], r=epsilon[ii], p=np.inf)) - 1\n ny[ii] = len(y_tree.query_ball_point(y_tree.data[ii], r=epsilon[ii], p=np.inf)) - 1\n\n i_spec[jj] = digamma(k) - digamma(nx+1) + digamma(ny+1) + digamma(n) # version (1)\n\n i_min = np.mean(np.min(i_spec, 0))\n\n return i_min", "def _lcs(x, y):\n n, m = len(x), len(y)\n table = dict()\n for i in range(n + 1):\n for j in range(m + 1):\n if i == 0 or j == 0:\n table[i, j] = 0\n elif x[i - 1] == y[j - 1]:\n table[i, j] = table[i - 1, j - 1] + 1\n else:\n table[i, j] = max(table[i - 1, j], table[i, j - 1])\n return table", "def create(matrix):\n limit_y = len(matrix)\n limit_x = len(matrix[0])\n\n for y in range(1, limit_y):\n bit.create(matrix[y])\n\n for x in range(1, limit_x):\n for y in range(1, limit_y):\n k = y + (y & -y)\n if k < limit_y:\n matrix[k][x] += matrix[y][x]", "def spaceeff_dyna(x, y, c):\n n = len(y)\n m = len(x)\n for i in range(1, m + 1):\n c[0][1] = 0\n for j in range(1, n + 1):\n if x[i - 1] == y[j - 1]:\n c[j][1] = c[j - 1][0] + 1\n else:\n ctop = c[j][0]\n cleft = c[j - 1][1]\n ctopleft = c[j - 1][0]\n\n c[j][1], d = max3(ctopleft, ctop, cleft)\n for k in range(len(c)):\n c[k][0] = c[k][1]", "def matrixReduction(setHor, setVer, arrayToReduce):\r\n listTemp = []\r\n for i in range(len(setVer)):\r\n listTemp.append(arrayToReduce[setVer[i].index, :])\r\n arrayTemp = numpy.array(listTemp)\r\n listTemp = []\r\n for i in range(len(setHor)):\r\n listTemp.append(arrayTemp[:, setHor[i].index])\r\n result = numpy.transpose(numpy.array(listTemp))\r\n\r\n return result", "def ipset_num_x_y_different():\n return IPSet(x=np.linspace(0, 10, 11), y=np.linspace(-1, 1, 3), x_new=np.linspace(2, 5, 4))", "def make_likelihood_table(x, y):\n\n Y = np.unique(y)\n X = np.unique(x)\n\n likelihood = [[0 for i in range(len(Y))] for j in range(len(X))]\n\n freq = make_frequency_table(x, y, X, Y)\n\n for j in range(len(Y)):\n Sum = (y == Y[j]).sum()\n for i in range(len(X)):\n likelihood[i][j] = freq[X[i]][j] / Sum\n\n return likelihood", "def fit_transform(self, x: Array2D) -> Array2D:", "def _permute_observations(x, y, num_perms):\r\n vals = hstack([array(x), array(y)])\r\n lenx = len(x)\r\n # sorting step is unnecessary for this code, but it ensure that test code\r\n # which relies on seeding the prng works (if we dont do this then different\r\n # observation orders in x and y for eg. the mc_t_two_sample test will fail\r\n # to produce the same results)\r\n vals.sort()\r\n inds = arange(vals.size)\r\n xs, ys = [], []\r\n for i in range(num_perms):\r\n shuffle(inds)\r\n xs.append(vals[inds[:lenx]])\r\n ys.append(vals[inds[lenx:]])\r\n return xs, ys", "def _lev(x, y):\n mat = initialize_matrix(x, y)\n\n for i in range(1, len(y)+1):\n for j in range(1, len(x)+1):\n mat[i].append(_generate_new_node(mat, i, j, x, y))\n\n return mat", "def _lev(x, y):\n mat = initialize_matrix(x, y)\n\n for i in range(1, len(y)+1):\n for j in range(1, len(x)+1):\n mat[i].append(_generate_new_node(mat, i, j, x, y))\n\n return mat", "def brute_multiply(x, y):\n \n n = x.shape[0]\n res = np.zeros(x.shape)\n \n for i in range(n):\n for j in range(n):\n for k in range(n):\n res[i, j] += x[i, k] * y[k, j]\n \n return res", "def filt1(X, yvals, xvals, ny, nx):\n \n ylen = X.shape[0]\n xlen = X.shape[1]\n\n yflen = (ylen-1)//ny\n xflen = (xlen-1)//nx\n\n Y = np.zeros((X.shape))\n\n #Y = Y[0:yflen,0:xflen,]\n\n ymax = ny*yflen+1\n xmax = nx*xflen+1\n\n Y = Y[0:ymax,0:xmax,]\n Xnew = X[0:ymax,0:xmax,]\n yvals = yvals[0:ymax,0:xmax,]\n xvals = xvals[0:ymax,0:xmax,] \n\n counter = np.zeros((Y.shape))\n \n for i in range(xflen):\n xmin = nx*i\n xmax = nx*(i+1)+1\n for j in range(yflen):\n ymin = ny*j\n ymax = ny*(j + 1)+1\n #print((xmin,xmax), (ymin,ymax))\n Y[ymin:ymax,xmin:xmax,] = Y[ymin:ymax,xmin:xmax,] + np.mean(X[ymin:ymax,xmin:xmax,], axis=(0,1))\n counter[ymin:ymax,xmin:xmax,] = counter[ymin:ymax,xmin:xmax,] + 1\n\n Y = Y/counter #We take the average of the points that appear more than once\n\n return Xnew, Y, yvals, xvals", "def fit(self, x, y):\n tempdf = pd.DataFrame({'x':x, 'y':y})\n self.d = tempdf.groupby('x').apply(lambda g: g.y.sum()/len(g)).to_dict()", "def cemap_cal(y_pred,y_true):\r\n nTest = y_true.shape[0]\r\n nLabel = y_true.shape[1]\r\n ap = np.zeros(nTest)\r\n for i in range(0,nTest):\r\n for j in range(0,nLabel):\r\n R = np.sum(y_true[i,:])\r\n if y_true[i,j]==1:\r\n r = np.sum(y_pred[i,:]>=y_pred[i,j])\r\n rb = np.sum(y_pred[i,np.nonzero(y_true[i,:])] >= y_pred[i,j])\r\n ap[i] = ap[i] + rb/(r*1.0)\r\n ap[i] = ap[i]/R\r\n imap = np.nanmean(ap)\r\n\r\n ap = np.zeros(nLabel)\r\n for i in range(0,nLabel):\r\n for j in range(0,nTest):\r\n R = np.sum(y_true[:,i])\r\n if y_true[j,i]==1:\r\n r = np.sum(y_pred[:,i] >= y_pred[j,i])\r\n rb = np.sum(y_pred[np.nonzero(y_true[:,i]),i] >= y_pred[j,i])\r\n ap[i] = ap[i] + rb/(r*1.0)\r\n ap[i] = ap[i]/R\r\n lmap = np.nanmean(ap)\r\n\r\n return lmap,imap", "def fit(self, X, y):\n # TODO: Implement\n self.cols = X.columns\n self.nCols = len(self.cols)\n X = np.array(X)\n y = np.array(y)\n \n for i in range(X.shape[1]): \n uniq = np.unique(X[:, i])\n self.possible.append(list(uniq)) # add possible values\n self.valN.append(len(uniq)) # and how many\n index = np.argmax(self.valN)\n print(index)\n self.tree = self.buildTree(X, y)", "def ipset_y_2d():\n return IPSet(x=np.linspace(0, 10, 11), y=np.random.randn(11, 4), x_new=np.linspace(1, 4, 3))", "def sum_outers(x, y):\n # In PyTorch 4.0, `einsum` modifies variables inplace. This will not work\n # unless you have PyTorch 4.1:\n #\n # https://github.com/pytorch/pytorch/issues/7763\n #\n return torch.einsum('ab,cb->ac', [x, y])", "def UpdateS(s, Difference, WorkingSet):\n for i in range(len(x_train)):\n Sum = 0.0\n for j in range(q):\n Sum = Sum + (Difference[j])*y_train[int(WorkingSet[j,0])]*Kernel(i, int(WorkingSet[j,0]))\n s[i] = s[i] + Sum\n return s", "def _diffmat_objective(a,X):\n \n (n,p) = X.shape\n return(X - np.tile(a,(n,1)))", "def multiple(x, y):\n curRow = [1] * x\n for _ in range(1,y):\n for N in range(1,x):\n curRow[N] = curRow[N-1] + curRow[N]\n return curRow[x-1]", "def interpolV(y, x, newX):\r\n \r\n num = len(x)\r\n #if (num != len(y)):\r\n #//System.out.println(\"Toolbox.interpolV(): Old x and y must be same length\"); \r\n \r\n newNum = len(newX)\r\n #//System.out.println(\"interpolV: newNum \" + newNum + \" num \" + num); \r\n #newY = [0.0 for i in range(newNum)]\r\n\r\n#//Renormalize ordinates:\r\n \r\n iMinAndMax = minMax(y)\r\n norm = y[iMinAndMax[1]]\r\n #//System.out.println(\"norm \" + norm);\r\n #yNorm = [0.0 for i in range(num)]\r\n newYNorm = [0.0 for i in range(newNum)] \r\n #for i in range(num):\r\n # yNorm[i] = y[i] / norm \r\n yNorm = [ x / norm for x in y ]\r\n\r\n#// Set any newX elements that are *less than* the first x element to th first \r\n#// x element - \"0th order extrapolation\"\r\n#//\r\n start = 0\r\n for i in range(newNum):\r\n if (newX[i] <= x[1]):\r\n newYNorm[i] = yNorm[0]\r\n start += 1\r\n \r\n if (newX[i] > x[1]):\r\n break\r\n \r\n \r\n#//System.out.println(\"start \" + start);\r\n#//System.out.println(\"x[0] \" + x[0] + \" x[1] \" + x[1] + \" newX[start] \" + newX[start]);\r\n#double jWght, jm1Wght, denom;\r\n\r\n\r\n if (start < newNum-1):\r\n\r\n j = 1 #//initialize old abscissae index\r\n #//outer loop over new abscissae\r\n for i in range(start, newNum):\r\n\r\n #//System.out.println(\"i \" + i + \" j \" + j);\r\n\r\n#// break out if current element newX is *greater* that last x element\r\n if ( (newX[i] > x[num-1]) or (j > (num-1)) ):\r\n break \r\n \r\n\r\n while (x[j] < newX[i]): \r\n j += 1\r\n \r\n #//System.out.println(\"i \" + i + \" newX[i] \" + newX[i] + \" j \" + j + \" x[j-1] \" + x[j-1] + \" x[j] \" + x[j]);\r\n #//1st order Lagrange method:\r\n jWght = newX[i] * (1.0 - (x[j-1]/newX[i])) #//(newX[i]-x[j-1])\r\n jm1Wght = x[j] * (1.0 - (newX[i]/x[j])) #//(x[j]-newX[i])\r\n denom = x[j] * (1.0 - (x[j-1]/x[j])) #//(x[j]-x[j-1])\r\n jWght = jWght / denom\r\n jm1Wght = jm1Wght / denom\r\n #//newYNorm[i] = (yNorm[j]*(newX[i]-x[j-1])) + (yNorm[j-1]*(x[j]-newX[i]));\r\n newYNorm[i] = (yNorm[j]*jWght) + (yNorm[j-1]*jm1Wght)\r\n #//System.out.println(\"i \" + i + \" newYNorm[i] \" + newYNorm[i] + \" j \" + j + \" yNorm[j-1] \" + yNorm[j-1] + \" yNorm[j] \" + yNorm[j]);\r\n \r\n\r\n#// Set any newX elements that are *greater than* the first x element to the last \r\n#// x element - \"0th order extrapolation\"\r\n#//\r\n for i in range(newNum):\r\n if (newX[i] >= x[num-1]):\r\n newYNorm[i] = yNorm[num-1]\r\n \r\n \r\n\r\n #//Restore orinate scale\r\n #for i in range(newNum):\r\n # newY[i] = newYNorm[i] * norm \r\n newY = [ x * norm for x in newYNorm ]\r\n\r\n\r\n return newY", "def back_spaceeff_dyna(x, y, c):\n n = len(y)\n m = len(x)\n for i in range(m - 1, -1, -1):\n c[n][1] = 0\n for j in range(n - 1, -1, -1):\n if x[i] == y[j]:\n c[j][1] = c[j + 1][0] + 1\n else:\n cbottom = c[j][0]\n cright = c[j + 1][1]\n cbottomright = c[j + 1][0]\n\n c[j][1], d = max3(cbottomright, cbottom, cright)\n for k in range(len(c)):\n c[k][0] = c[k][1]", "def expand_features_and_labels(x_feat, y_labels):\n x_expanded = []\n y_expanded = []\n for x, y in zip(x_feat, y_labels):\n for segment in x:\n x_expanded.append(segment)\n y_expanded.append(y)\n return x_expanded, y_expanded", "def transform(self, X, y=None):\n rows = []\n cols = []\n data = []\n #Loop through each reviews\n for row, word_count in enumerate(X):\n #Analyze each review with the total vocab of this dataset\n for word, count in word_count.items():\n rows.append(row)\n cols.append(self.vocabulary_.get(word, 0)) #If non, replace it with 0\n data.append(count)\n \n return csr_matrix((data, (rows, cols)), shape=(len(X), self.vocabulary_size + 1))", "def update_grid(self, x):\r\n\r\n # Append boundary rows and columns to matrix\r\n x = self.append_boundary(x) # the boundary is recomputed at each step\r\n y = np.copy(x)\r\n\r\n # For each cell within boundary, compute state according to rules.\r\n chg_0_1 = 0 # the number of cells that changed from state 0 to state 1\r\n chg_1_0 = 0 # the number of cells that changes from state 1 to state 0\r\n chg_none = 0 # the number of cells that did not change\r\n index = np.arange(1, x.shape[0] - 1)\r\n for i in index:\r\n for j in index:\r\n neighborhood = x[i - 1:i + 2:1, j - 1:j + 2:1] # 3x3 sub matrix centered at i, j\r\n y[i, j] = self.update_cell(neighborhood)\r\n change = int(y[i, j] - x[i, j])\r\n if change == -1:\r\n chg_1_0 += 1\r\n if change == 0:\r\n chg_none += 1\r\n if change == 1:\r\n chg_0_1 += 1\r\n\r\n # Compute statistics excluding boundary\r\n total = np.power(x[1:-1:1, 1:-1:1].shape[0] - 1, 2)\r\n start_1 = np.sum(x[1:-1:1, 1:-1:1])\r\n end_1 = np.sum(y[1:-1:1, 1:-1:1])\r\n stats = [total, start_1, end_1, chg_1_0, chg_none, chg_0_1]\r\n\r\n return y[1:-1:1, 1:-1:1], stats # remove the boundary\r", "def reconstruct(self, X, y):\n return self.sess.run(self.x_reconstr_mean,\n feed_dict={self.x: X, self.y: y.reshape([-1, 1])})", "def transform(i, j, k):\n return i * N * N + j * N + k + 1", "def poly_matrix(x, y, order=2):\r\n ncols = (order + 1)**2\r\n G = np.zeros((x.size, ncols))\r\n ij = itertools.product(range(order+1), range(order+1))\r\n for k, (i, j) in enumerate(ij):\r\n G[:, k] = x**i * y**j\r\n return G", "def y_to_z_mapping(self, Y):\n if len(Y[0])!=self.label_width**2:\n print('input labels have different dimension')\n Z = []\n for label in Y:\n z_label = np.array(label)\n for i in range(self.label_width**2):\n z_label = np.concatenate((z_label, (label[i+1:]==label[i]).astype(int)))\n Z.append(z_label)\n return Z", "def count_subs(x,y):\n\t# Encases diagonals in square grid of size 'square'\n\tsquare = x + y - 2\n\tsubs = 0\n\t# For every point counts the number of rectagles with (a,b) as upper left corner\n\tfor a in range(square):\n\t\tfor b in range(square):\n\t\t\tif valid(a,b,x,y):\n\t\t\t\tthis_subs = subs_at_point(a,b,x,y)\n\t\t\t\tprint \"%3d \" %(this_subs),\n\t\t\tprint \"\"\n\treturn subs", "def expand_ism(ism, Y1_labels):\n \n import random\n import pandas as pd\n import numpy as np\n import time\n print('debug expand ism1') \n voxel_num=len(Y1_labels)\n voxel_ism = np.zeros((voxel_num,voxel_num))\n transform_mat=np.zeros((len(ism),voxel_num))\n \n matrixtime = time.time()\n print('debug expand ism2') \n #import pdb; pdb.set_trace()\n\n for i in range(0,voxel_num):\n transform_mat[Y1_labels[i],i]=1\n\n print('debug expand ism3') \n\n temp=np.dot(ism,transform_mat)\n print('debug expand ism4') \n\n target_mat=np.dot(temp.T,transform_mat)\n \n \n XM_time= time.time() - matrixtime\n #print('Matrix expansion took', (time.time() - matrixtime), ' seconds')\n voxel_ism=target_mat\n \n return voxel_ism", "def get_pool_data(x, y):\n\n y_classes = np.max(y)+1\n\n xs = [] # xs to put in the training set\n ys = [] # ys to put in the training set\n idxs = [] # indexes of data put in the training set\n for y_class in range(y_classes):\n idx = np.array( np.where(y == y_class) ).T\n idx = idx[0:2, 0]\n xs.append(x[idx])\n ys.append(y[idx])\n idxs.extend(idx)\n\n x_train = np.concatenate(xs, axis=0)\n y_train = np.concatenate(ys, axis=0)\n\n x_pool = np.delete(x, idxs, axis=0)\n y_pool = np.delete(y, idxs, axis=0)\n \n return (x_train, y_train), (x_pool, y_pool)", "def pull(self,x,y):\n\t\tself.x_sum -= np.sum(x,axis=0)[:,np.newaxis]\n\t\tself.y_sum -= np.sum(y,axis=0)[:,np.newaxis]\n\t\tself.xy_sum -= np.matmul(np.transpose(x),y)\n\t\tself.xx_sum -= np.matmul(np.transpose(x),x)\n\t\tself.yy_sum -= np.matmul(np.transpose(y),y)\n\t\tself.n -= np.shape(x)[0]", "def get_X_Y_vectorized_int(dataset: dict):\n X = []\n Y = []\n\n d_list = list(dataset)\n\n for k in dataset:\n X += dataset[k]\n\n temp = [0] * len(d_list)\n\n index_in_d_list = d_list.index(k)\n\n temp[index_in_d_list] = 1\n\n for i in range(len(dataset[k])):\n Y += [temp]\n\n assert len(X) == len(Y)\n return X, Y", "def make_frequency_table(x, y, X, Y):\n freq = dict()\n\n for i in range(len(X)):\n freq[X[i]] = [0, 0]\n\n # merging the two to get a matrix\n\n M = np.array([[x[i], y[i]] for i in range(len(x))])\n\n for i in range(len(M)):\n if M[i][1] == Y[0]:\n freq[M[i][0]][0] += 1\n else:\n freq[M[i][0]][1] += 1\n\n return freq", "def _get_matrix(self, source_points, destination_points):\n return [\n [self.measure_between_two_points(point_a, point_b) for point_b in destination_points]\n for point_a in source_points\n ]", "def zenith_nadir(x, y):\n if y == 'm':\n bb = []\n cc = []\n for i in range(x.shape[1]):\n bb.append(amax(x[:, i:i + 1]))\n b = array(bb)\n cc.append(amin(x[:, i:i + 1]))\n c = array(cc)\n return (b, c)\n else:\n b = ones(x.shape[1])\n c = zeros(x.shape[1])\n return (b, c)", "def fit(self, X, y):\r\n from collections import defaultdict\r\n y_set = set(y)\r\n ver_cls = dict()\r\n for i in y_set:\r\n ver_cls[i] = y.count(i) / len(y)\r\n add_data_units_cls = defaultdict(dict)\r\n add_data_all_cls = defaultdict(int)\r\n ver_words = defaultdict(dict)\r\n for sentence in range(len(X)):\r\n for word in X[sentence].split():\r\n add_data_all_cls[word] += 1\r\n if y[sentence] in add_data_units_cls[word].keys():\r\n add_data_units_cls[word][y[sentence]] += 1\r\n else:\r\n add_data_units_cls[word][y[sentence]] = 1\r\n znam = defaultdict(int)\r\n for key in add_data_units_cls.keys():\r\n for i in y_set:\r\n if i in add_data_units_cls[key].keys():\r\n znam[i] += add_data_units_cls[key][i]\r\n print(znam)\r\n for key in add_data_all_cls.keys():\r\n for i in y_set:\r\n if i not in add_data_units_cls[key].keys():\r\n add_data_units_cls[key][i] = 0\r\n ver_words[key][i] = (add_data_units_cls[key][i] + self.alpha) / (\r\n znam[i] + self.alpha * len(add_data_all_cls.keys()))\r\n self.ver_dict = ver_words\r\n self.ver_cls = ver_cls\r\n pass", "def gen_ys(self, y_func):\n y_matrix = np.zeros((self.sample_size, self.n_act, self.ydim))\n for trt in range(1, self.n_act + 1):\n y_matrix[:, trt - 1] = y_func(self.x,\n np.ones(self.sample_size).reshape(-1, 1) * trt,\n self.ydim, self.generator)\n self.ys = y_matrix", "def update_cnt_map(self,s):\r\n cnts = []\r\n num_grid = self.cnt_map.shape[0]*self.cnt_map.shape[1]\r\n old_coverage =num_grid- self.cnt_map.flatten().tolist().count(0)\r\n for sj in s:\r\n grid_s = self.get_gridState(sj)\r\n self.cnt_map[grid_s[0], grid_s[1]] += 1\r\n cnts.append(self.cnt_map[grid_s[0], grid_s[1]])\r\n\r\n self.map_coverage = num_grid - self.cnt_map.flatten().tolist().count(0)\r\n print(\"Coverage:\",self.map_coverage)\r\n print(\"Change of coverage:\",self.map_coverage-old_coverage)\r\n\r\n return cnts", "def conv_1x1(x, y):\n (S, B, C, H, W) = x.shape\n assert x.shape[-1] == y.shape[-1]\n assert x.shape[-2] == y.shape[-2]\n assert x.shape[-4] == y.shape[-4]\n\n #[..., B, C, H, W] -> [..., B, C, HW]\n x = x.view(*x.shape[:-2], H*W)\n y = y.view(*y.shape[:-2], H*W)\n\n #[..., B, C, C']\n XTX = (x @ x.transpose(-1, -2)).sum(-3)\n XTY = (x @ y.transpose(-1, -2)).sum(-3)\n\n return XTX, XTY", "def __init__(self, xint, yint):\n self.xint = xint\n self.yint = yint\n self.n = len(xint)\n w = np.ones(self.n)\n self.C = (np.max(xint) - np.min(xint)) / 4\n shuffle = np.random.permutation(self.n - 1)\n for j in range(self.n):\n temp = (xint[j] - np.delete(xint,j)) / self.C\n temp = temp[shuffle]\n w[j] /= np.product(temp)\n self.weights = w", "def transform(self, Xs, y=None):\n pass # pragma: no cover", "def resizeXY(X, Y, occurrency, dx, dz):\n\tsumY = sum(Y) \n\tsumX = sum(X)\n\tvisitedY = [False]*len(Y)\n\tfor y_index in range(len(Y)):\n\t\tupdate = True\n\t\tfor x_index in range(len(X)):\n\t\t\tif(occurrency[x_index][y_index] == False):\n\t\t\t\tupdate = False \n\t\tif(update):\n\t\t\tsumY = sumY - Y[y_index]\n\t\t\tsumX = sumX - X[y_index]\n\t\t\tdx = dx - X[y_index]\n\t\t\tdz = dz - Y[y_index]\n\n\tfor x_index in range(len(X)):\n\t\tmodifyX = False\n\t\tfor y_index in range(len(Y)):\n\t\t\tif(occurrency[x_index][y_index] == False and visitedY[y_index] == False):\n\t\t\t\tY[y_index] = (dz * Y[y_index])/sumY\n\t\t\t\tvisitedY[y_index] = True\n\t\t\t\tmodifyX = True\n\t\t\tif(occurrency[x_index][y_index] == False and visitedY[y_index] == True and not modifyX):\n\t\t\t\tmodifyX = True\n\t\tif(modifyX):\n\t\t\tX[x_index] = (dx * X[x_index])/sumX", "def p_y_x_knn(y, k):\n number_of_classes = 4\n resized = np.delete(y, range(k, y.shape[1]), axis=1)\n summed_with_zero = np.vstack(np.apply_along_axis(np.bincount, axis=1, arr=resized, minlength=number_of_classes + 1))\n summed = np.delete(summed_with_zero, 0, axis=1)\n return summed / k", "def create_subsets(x, y):\n # initiate empty list for return variables.\n sets_x = []\n sets_y = []\n indices = []\n\n # iterate through value of PRI_JET_NUM (ranged inclusively from 0 until 3)\n for pri_jet_num_val in np.unique(x[:,22]):\n \n # Find subset which DER_MASS_MMC is not equal to -999\n mask = (x[:,22] == pri_jet_num_val) & (x[:,0] != -999)\n x_tmp = x[mask,:]\n y_tmp = y[mask]\n\n # store the subset into list\n sets_x.append(x_tmp)\n sets_y.append(y_tmp)\n indices.append(mask)\n\n # Find subset which DER_MASS_MMC is equal to -999\n mask = (x[:,22] == pri_jet_num_val) & (x[:,0] == -999)\n x_tmp = x[mask,:]\n y_tmp = y[mask]\n\n # store the subset into list\n sets_x.append(x_tmp)\n sets_y.append(y_tmp)\n indices.append(mask) \n \n # return subsets of x, y, and corresponding indices\n return sets_x, sets_y, indices", "def confusion_matrix(y_true, y_pred, labels):\r\n matrix = []\r\n\r\n for i, yt in enumerate(labels):\r\n matrix.append([])\r\n for _, yp in enumerate(labels):\r\n matrix[i].append(0)\r\n\r\n for t, p in zip(y_true, y_pred):\r\n t_num = labels.index(t)\r\n p_num = labels.index(p)\r\n matrix[t_num][p_num] += 1\r\n\r\n return matrix", "def get2DBins(x, y, binSizeX, binSizeY):\n\n result = []\n xlength = len(x)\n ylength = len(y)\n\n i = 0\n xcount = 0\n for i1 in range(0, xlength, binSizeX):\n i2 = i1 + binSizeX\n if i2 >= xlength:\n i2 = xlength - 1\n xcount += 1\n ycount = 0\n for j1 in range(0, ylength, binSizeY):\n j2 = j1 + binSizeY\n if j2 >= ylength:\n j2 = ylength - 1\n result.append((i1, i2, j1, j2))\n ycount += 1\n return result, xcount, ycount", "def needleman_wunsch(x, y, lodict={}, gop=-2.5, gep=-1.75, local=False, indel=''):\n n, m = len(x), len(y)\n dp = np.zeros((n + 1, m + 1))\n pointers = np.zeros((n + 1, m + 1), np.int32)\n if not local:\n for i1, c1 in enumerate(x):\n if gop is None:\n dp[i1 + 1, 0] = lodict.get((c1, indel), gep)\n else:\n dp[i1 + 1, 0] = dp[i1, 0]+(gep if i1 + 1 > 1 else gop)\n pointers[i1 + 1, 0] = 1\n for i2, c2 in enumerate(y):\n if gop is None:\n dp[0, i2 + 1] = lodict.get((indel, c2), gep)\n else:\n dp[0, i2 + 1] = dp[0, i2]+(gep if i2 + 1 > 1 else gop)\n pointers[0, i2 + 1] = 2\n for i1, c1 in enumerate(x):\n for i2, c2 in enumerate(y):\n match = dp[i1, i2] + lodict.get(\n (c1, c2),\n 1 if c1 == c2 else -1)\n insert = dp[i1, i2 + 1] + (\n lodict.get((c1, indel), gep) if gop is None else\n gep if pointers[i1, i2 + 1] == 1 else gop)\n delet = dp[i1 + 1, i2] + (\n lodict.get((indel, c2), gep) if gop is None else\n gep if pointers[i1 + 1, i2] == 2 else gop)\n pointers[i1 + 1, i2 + 1] = p = np.argmax([match, insert, delet])\n max_score = [match, insert, delet][p]\n if local and max_score < 0:\n max_score = 0\n dp[i1 + 1, i2 + 1] = max_score\n alg = []\n if local:\n i, j = np.unravel_index(dp.argmax(), dp.shape)\n else:\n i, j = n, m\n score = dp[i, j]\n while (i > 0 or j > 0):\n pt = pointers[i, j]\n if pt == 0:\n i -= 1\n j -= 1\n alg = [(x[i], y[j])] + alg\n if pt == 1:\n i -= 1\n alg = [(x[i], indel)] + alg\n if pt == 2:\n j -= 1\n alg = [(indel, y[j])] + alg\n if local and dp[i, j] == 0:\n break\n return score, alg", "def reformat(x, y):\r\n # img_size, num_ch, num_class = int(np.sqrt(x.shape[1])), 1, len(np.unique(np.argmax(y, 1)))\r\n img_size, num_ch, num_class = 14, 1, 16\r\n dataset = x.reshape((-1, img_size, img_size, num_ch)).astype(np.float32)\r\n labels = (np.arange(num_class) == y[:, None]).astype(np.float32) # =[1 2 3 ... 10]??\r\n return dataset, labels", "def reduce_y(y, mask):\n return y", "def possible_splits(self,feature,y):\n\n yi = y[:-1]\n yi1= y[1:]\n idx=np.argwhere((yi1-yi)!=0)\n return idx.flatten()", "def matrix_add(x,y) -> [[]]:\n X = x\n\n Y = y\n\n if len(X) == len(Y) and len(X[0]) == len(Y[0]):\n return [[X[a][b] + Y[a][b] for b in range(len(X[0]))]\n for a in range(len(X))]", "def __get_masks(x_shape, y, n_train=None):\n # type: (Tuple[int], np.ndarray, int) -> (np.ndarray, np.ndarray)\n n_train = n_train if n_train is not None else const.n_train\n\n if n_train <= 0 or n_train > x_shape[0]:\n return np.full(shape=x_shape, fill_value=True, dtype=bool), np.full(shape=y.shape, fill_value=True, dtype=bool)\n\n all_indexes = defaultdict(list) # type: Dict[int, List[int]]\n for i in range(len(y)):\n curr = int(y[i])\n all_indexes[curr].append(i)\n\n ratios = defaultdict() # type: Dict[int, float]\n\n for i, j in all_indexes.items():\n ratios[i] = (len(j) * 1. / len(all_indexes[0]))\n\n # Ratios split the whole dataset to ratios given class and first class.\n # Part scales these ratios up, so that, 'part' corresponds to size of first class.\n part = n_train * 1. / sum(ratios.values())\n if part == 0: # n_train is 0.\n part = len(y) * 1. / sum(ratios.values())\n\n # Masks of what to keep.\n indexes_x = np.full(shape=x_shape, fill_value=False, dtype=bool)\n indexes_y = np.full(shape=y.shape, fill_value=False, dtype=bool)\n\n for i in all_indexes.keys():\n chosen_idxs = random.sample(all_indexes[i], int(part * ratios[i]))\n indexes_y[chosen_idxs] = True\n indexes_x[chosen_idxs, ...] = True\n\n return indexes_x, indexes_y", "def effmat(tp: np.ndarray, t: np.ndarray) -> np.ndarray:\n n = len(t) # batch size\n max_true_n_sources = max(t)\n max_pred_n_sources = max(tp)\n eff_mat = np.zeros((max_pred_n_sources + 1, max_true_n_sources + 1))\n for ii in range(n):\n eff_mat[tp[ii], t[ii]] += 1\n return eff_mat", "def trans_o(self):\n temp_array = []\n for j in range(self.O.shape[1]):\n for i in range(self.V.shape[1]):\n if self.V[0, i] == self.O[0, j]:\n temp_array.append(i)\n self.O = mat(temp_array)", "def make_oneq_cliffords():\n ixyz_list = [g().to_matrix() for g in (IGate, XGate, YGate, ZGate)]\n ih_list = [g().to_matrix() for g in (IGate, HGate)]\n irs_list = [\n IGate().to_matrix(),\n SdgGate().to_matrix() @ HGate().to_matrix(),\n HGate().to_matrix() @ SGate().to_matrix(),\n ]\n oneq_cliffords = [\n Operator(ixyz @ ih @ irs) for ixyz in ixyz_list for ih in ih_list for irs in irs_list\n ]\n return oneq_cliffords", "def get_score_matrix(self) -> int:", "def fit(self, X, y):\n\t\tself._initialize_weights(X.shape[1])\n\t\tself.cost_ = []\n\n\t\tfor i in range(self.n_iter):\n\t\t\tif self.shuffle:\n\t\t\t\tX, y = self._shuffle(X,y)\n\t\t\tcost = []\n\t\t\t#calculate for each sample\n\t\t\tfor xi, target in zip(X, y):\n\t\t\t\tcost.append(self._update_weights(xi, target))\n\t\t\tave_cost = sum(cost)/len(y)\n\t\t\tself.cost_.append(ave_cost)\n\t\treturn self", "def monomio(x,datos_x,datos_y):\n matriz=np.zeros([datos_x.shape[0],datos_x.shape[0]])\n for j in range(datos_x.shape[0]): #Se contruye la matriz de vandermonde\n matriz[:,j]= datos_x**(j)\n matriz,datos_y=pivoteo_parcial(matriz,datos_y)\n x1= descompo_LU(matriz,datos_y)# se resulve el sistema de ecuaciones por metodo directo\n\n puntos=[] #se almacenan los valores de y para cada punto de x que se quiera calcular \n\n for p in x: #va a ir tomando los valores de x uno por uno \n prod=np.zeros(x1.shape[0])\n for i in range(x1.shape[0]):\n if i==0:\n prod[i]=1\n else:\n prod[i]=prod[i-1]*p #Se hace el calculo de los polimonios con todos los valores de x \n solucion=x1@prod\n puntos.append(solucion) # se agregan los valores de y a la lista final \n puntos=np.array(puntos)# se convierte la lista en array para mejor manejo\n\n return puntos", "def calcAllIntensities(self, xc, yc):\n\n tp = 0.0\n ix = 0\n iy = 0\n h = 0\n ints = np.zeros([5, 5])\n ints_inner = np.zeros([5, 5])\n # ints = [[0.0] * 5] * 5\n # ints_inner = [[0.0] * 5] * 5\n x = 0.0\n y = 0.0\n xc1 = 0.0\n yc1 = 0.0\n xc1 = xc\n yc1 = yc\n \n for h in np.arange(1,5,1):\n for k in np.arange(1,5,1):\n ints[h][k] = 0.0\n ints_inner[h][k] = 0.0\n\n for k in np.arange(0, 2, 1):\n for h in np.arange(0, 2, 1):\n for ix in np.arange(0, self.stepp + 1, 1):\n for iy in np.arange(0, self.stepp + 1, 1):\n #print(k, h, ix, iy)\n if self.qc_format == 0 :\n x = -(1 + self.G) + h * (1 + 2 * self.G) + (ix * (1.0 / self.stepp))\n y = -(1 + self.G) + k * (1 + 2 * self.G) + (iy * (1.0 / self.stepp))\n if self.spot_radius == 0 or math.sqrt(math.pow((x - xc1),2) + math.pow((y - yc1),2)) == 0 :\n tp = 0.0\n else :\n tp = (math.sin((1 / self.spot_radius) * math.sqrt(math.pow((x - xc1),2) + math.pow((y - yc1),2)))) / ((1 / self.spot_radius) * math.sqrt(math.pow((x - xc1),2) + math.pow((y - yc1),2)))\n tp = math.pow(tp,2)\n #print(tp)\n elif self.qc_format == 1 :\n x = -1 + h + (ix * (1 / self.stepp))\n y = -1 + k + (iy * (1 / self.stepp))\n ints[h + 1][k + 1] += math.pow(math.exp((math.pow((x - xc1),2) + math.pow((y - yc1),2) ) / math.pow(self.spot_radius,2)), -1)\n if (self.spot_radius * self.spot_radius) == 0 or ((x - xc1) * (y - yc1) * np.pi * np.pi) == 0 :\n tp = 0.0\n else :\n tp = (math.sin((x - xc1) * np.pi / self.spot_radius) * math.sin((y - yc1) * np.pi / self.spot_radius)) / (((x - xc1) * (y - yc1) * np.pi * np.pi) / (self.spot_radius * self.spot_radius))\n\n if (math.pow(x,2) + math.pow(y,2)) <= math.pow(self.radius_inner,2):\n ints_inner[h + 1][k + 1] += tp\n else :\n if self.qc_format == 1 :\n if (math.pow(x,2) + math.pow(y,2)) <= math.pow(self.cell_qc, 2):\n ints[h + 1][k + 1] += tp\n if (math.pow(x,2) + math.pow(y,2)) <= 1 :\n #print(math.pow(x,2) + math.pow(y,2))\n ints[h + 1][k + 1] += tp\n # print(ints[h + 1][k + 1])\t\t\t\t\t\t\n tp = 0.0\n\n # print(ints)\n\n Aq = 0.0\n Bq = 0.0\n Cq = 0.0\n Dq = 0.0\n Ac_inner = 0.0\n Bc_inner = 0.0\n Cc_inner = 0.0\n Dc_inner = 0.0\n Ac = 0.0\n Bc = 0.0\n Cc = 0.0\n Dc = 0.0\n Ac = ints[1][2]\n Bc = ints[2][2]\n Cc = ints[2][1]\n Dc = ints[1][1]\n\n Ac_inner = ints_inner[1][2]\n Bc_inner = ints_inner[2][2]\n Cc_inner = ints_inner[2][1]\n Dc_inner = ints_inner[1][1]\n Ac *= self.QE\n Bc *= self.QE\n Cc *= self.QE\n Dc *= self.QE\n\n Ac_inner *= self.QE_inner\n Bc_inner *= self.QE_inner\n Cc_inner *= self.QE_inner\n Dc_inner *= self.QE_inner\n Ac += Ac_inner\n Bc += Bc_inner\n Cc += Cc_inner\n Dc += Dc_inner\n\n Aq = Ac\n Bq = Bc\n Cq = Cc\n Dq = Dc\n\n #tp/TP = cotribution percentage of the spot with respect to max (spot center)\n if self.smooth == 0 :\n if (Config.hplk_c0_e * self.TP) == 0 :\n cnst = 0\n else :\n cnst = ((Parameters.TPS / (self.n_ml * self.n_ml)) * self.lamb) / (Config.hplk_c0_e * self.TP) #Número de fótons efeticos\n if Config.flag_spice == 1 :\n Ac *= Parameters.TPS / (self.n_ml * self.n_ml * self.TP) #W\n Bc *= Parameters.TPS / (self.n_ml * self.n_ml * self.TP)\n Cc *= Parameters.TPS / (self.n_ml * self.n_ml * self.TP)\n Dc *= Parameters.TPS / (self.n_ml * self.n_ml * self.TP)\n Ac *= 1 / (math.pow(self.cell_qc * 1e-6,2)) #W/(m^2)\n Bc *= 1 / (math.pow(self.cell_qc * 1e-6,2))\n Cc *= 1 / (math.pow(self.cell_qc * 1e-6,2))\n Dc *= 1 / (math.pow(self.cell_qc * 1e-6,2))\n #Ac *= 1 / (self.lamb * 1e6); #Adequação da irradiância para a unidade W/m2micm conforme necessário no SPICE\n #Bc *= 1 / (self.lamb * 1e6);\n #Cc *= 1 / (self.lamb * 1e6);\n #Dc *= 1 / (self.lamb * 1e6);\n \n ############################## DOUBLE CHECK ##############################\n # self.grava_arquivos = 1\n # self.flag_V_QC = 0\n # grava_le_arquivos(0) # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n # self.flag_V_QC = 1\n # self.grava_arquivos = 0\n ############################## DOUBLE CHECK ##############################\n Aq *= cnst * 1e9\n Bq *= cnst * 1e9\n Cq *= cnst * 1e9\n Dq *= cnst * 1e9\n else :\n Aq *= cnst * 1e9\n Bq *= cnst * 1e9\n Cq *= cnst * 1e9\n Dq *= cnst * 1e9\n\n # 'returns' all the intensities\n self.A_intensity = Aq\n self.B_intensity = Bq\n self.C_intensity = Cq\n self.D_intensity = Dq", "def correspondences(labels1,labels2,return_counts=True):\n q = 100000\n assert amin(labels1)>=0 and amin(labels2)>=0\n assert amax(labels2)<q\n combo = labels1*q+labels2\n result = unique(combo, return_counts=return_counts)\n if return_counts:\n result, counts = result\n result = array([result//q,result%q,counts])\n else:\n result = array([result//q,result%q])\n return result", "def clifford_set(u):\n i, x, y, z = u.v\n result = []\n result.append(u.clone()) # I\n result.append(Uop(-x, i, -z, y, u.hierarchy, u.construction + [\"X\"], gateset=u.gateset)) # iX, but treat it as X due to only phase difference\n result.append(Uop((i-x)/SQRT2, (x+i)/SQRT2, (y-z)/SQRT2, (z+y)/SQRT2, u.hierarchy, u.construction + [\"(I+iX)\"], gateset=u.gateset))\n result.append(Uop((i+x)/SQRT2, (x-i)/SQRT2, (y+z)/SQRT2, (z-y)/SQRT2, u.hierarchy, u.construction + [\"(I-iX)\"], gateset=u.gateset))\n result.append(Uop((i-y)/SQRT2, (x+z)/SQRT2, (y+i)/SQRT2, (z-x)/SQRT2, u.hierarchy, u.construction + [\"(I+iY)\"], gateset=u.gateset))\n result.append(Uop((i+y)/SQRT2, (x-z)/SQRT2, (y-i)/SQRT2, (z+x)/SQRT2, u.hierarchy, u.construction + [\"(I-iY)\"], gateset=u.gateset))\n for idx in range(6):\n i, x, y, z = result[idx].v\n c = result[idx].construction[-1:] if idx != 0 else []\n result.append(Uop(-z, -y, x, i, u.hierarchy, u.construction + c + [\"Z\"], gateset=u.gateset)) # iZ\n result.append(Uop((i-z)/SQRT2, (x-y)/SQRT2, (y+x)/SQRT2, (z+i)/SQRT2, u.hierarchy, u.construction + c + [\"(I+iZ)\"], gateset=u.gateset))\n result.append(Uop((i+z)/SQRT2, (x+y)/SQRT2, (y-x)/SQRT2, (z-i)/SQRT2, u.hierarchy, u.construction + c + [\"(I-iZ)\"], gateset=u.gateset))\n\n return result", "def _split_indices(X, y, n_folds=5):\n # TODO: check if indices are permuted\n n = X.shape[0]\n print('n:', n)\n #original_indices = np.arange(n)\n shuffle = np.random.permutation(n)\n subset_proportion = 1./float(n_folds)\n fold_size = int(subset_proportion*n)\n folds = [shuffle[i*fold_size:(i+1)*fold_size] for i in range(n_folds)]\n return folds", "def mi_bin_conn_time(x, y, bins_x, bins_y):\n n_times, n_trials = x.shape\n mi = np.zeros((n_times), dtype=np.float32)\n for t in range(n_times):\n mi[t] = mi_bin(x[t, :], y[t, :], bins_x, bins_y)\n return mi", "def cumulative_capacity_rule(_m, g, y):\r\n\r\n return sum(m.x_c[g, j] for j in m.Y if j <= y)", "def reconstructXY(self, inputs):\n return (self.reconstructX(inputs),\n self.reconstructY(inputs))", "def update_output(self, latent_mat, weight_mat, y_list):", "def _recon_lcs(x, y):\n i, j = len(x), len(y)\n table = _lcs(x, y)\n\n def _recon(i, j):\n \"\"\"private recon calculation\"\"\"\n if i == 0 or j == 0:\n return []\n elif x[i - 1] == y[j - 1]:\n return _recon(i - 1, j - 1) + [(x[i - 1], i)]\n elif table[i - 1, j] > table[i, j - 1]:\n return _recon(i - 1, j)\n else:\n return _recon(i, j - 1)\n\n recon_tuple = tuple(map(lambda x: x[0], _recon(i, j)))\n return recon_tuple", "def add_dims_to_match(x, y):\n x_shape = tf.shape(x)\n new_dims = tf.rank(y)-tf.rank(x)\n if new_dims > 0:\n new_shape = tf.concat((x_shape, tf.ones((new_dims,), dtype=tf.int32)), axis=0)\n return tf.reshape(x, new_shape)\n else:\n return x", "def inplace(block_size=20000):\n y = np.empty(len(x))\n for k in range(len(x) // block_size + 1):\n b, e = k * block_size, (k+1) * block_size\n y[b:e] = x[b:e]\n y[b:e] *= .25\n y[b:e] += .75\n y[b:e] *= x[b:e]\n y[b:e] -= 1.5\n y[b:e] *= x[b:e]\n y[b:e] -= 2\n\n return y" ]
[ "0.5772924", "0.573736", "0.57085884", "0.5653021", "0.56054556", "0.54707366", "0.5380854", "0.53584445", "0.5358038", "0.53385925", "0.5314144", "0.5285138", "0.52835494", "0.52652776", "0.52577674", "0.5253505", "0.5253083", "0.524562", "0.5237841", "0.52344394", "0.5211222", "0.51828897", "0.51802444", "0.5178981", "0.5178981", "0.51725096", "0.51699585", "0.5166547", "0.5157785", "0.515223", "0.51469713", "0.51321095", "0.5115334", "0.5087791", "0.50812095", "0.5076138", "0.5074466", "0.5057698", "0.50500864", "0.50500864", "0.5035716", "0.50207555", "0.49975905", "0.4982586", "0.49759686", "0.49691507", "0.49595478", "0.4948456", "0.49445125", "0.49383506", "0.49262735", "0.4912366", "0.4910826", "0.4906799", "0.49047226", "0.48978412", "0.4896438", "0.48920682", "0.4891731", "0.48900634", "0.48893338", "0.48886907", "0.4885065", "0.48842517", "0.4881399", "0.4880126", "0.48797235", "0.48771152", "0.4866483", "0.48636025", "0.48633644", "0.485049", "0.48362094", "0.4832051", "0.48197523", "0.48140198", "0.48039287", "0.4798375", "0.47875795", "0.47807667", "0.477661", "0.47749743", "0.47744423", "0.47732797", "0.47727358", "0.4771551", "0.47644138", "0.4764349", "0.47624502", "0.47509244", "0.47450113", "0.4740621", "0.47403514", "0.47384232", "0.47349578", "0.47333637", "0.47283027", "0.47274932", "0.47198072", "0.47189203", "0.4718727" ]
0.0
-1
A function for generating reaction likelihoods for a given genome according to the Probabilistic Annotation algorithm as
def generate_reaction_probabilities(fasta_file, template_model_file, genome_id=None): if genome_id is None: # Use fasta_file name minus extension. worker uses only for file names and logging genome_id = '.'.join(fasta_file.split('.')[0:-1]) # Create a worker for running the algorithm. worker = ProbAnnotationWorker(genome_id) try: template_model = _load_template_file(template_model_file) # Run blast using the fasta file. blast_result_file = worker.runBlast(fasta_file) # Calculate roleset probabilities. rolestring_tuples = worker.rolesetProbabilitiesMarble(blast_result_file) # Calculate per-gene role probabilities. role_probs = worker.rolesetProbabilitiesToRoleProbabilities(rolestring_tuples) # Calculate whole cell role probabilities. total_role_probs = worker.totalRoleProbabilities(role_probs) # Calculate complex probabilities. complex_probs = worker.complexProbabilities(total_role_probs, complexesToRequiredRoles=_complex_to_roles_dict(template_model)) # Calculate reaction probabilities. rxn_probs = worker.reactionProbabilities(complex_probs, rxnsToComplexes=_reactions_to_complexes_dict(template_model)) # Store in dictionary for better serialization return ReactionProbabilities([{'reaction': r[0], 'probability': r[1], 'type': r[2], 'complexes': _deserialize_cplx(r[3], worker.config['separator']), 'gpr': r[4]} for r in rxn_probs]) finally: worker.cleanup() # worker creates lots of temporary and intermediate files. Allow it to clean up
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \n \"*** YOUR CODE HERE ***\"\n \n # -- OUR CODE HERE\n \n \n import math\n for label in self.legalLabels:\n sumThing = 0.0\n for pixel in self.conditionalProb[label]:\n if datum[pixel] is 1:\n #assert self.conditionalProb[label][pixel] < 1.0 # -- sanity check that the probability is valid\n sumThing += math.log((self.conditionalProb[label][pixel]*1.0))\n else:\n sumThing+=math.log(1-self.conditionalProb[label][pixel]*1.0)\n logJoint[label] = math.log(self.prior[label]*1.0) + sumThing*1.0\n \n\n \n \n import time\n #print \"logJoint is :: \", logJoint\n #time.sleep(2)\n \n \n # -- uses the conditional probability tables computed in the current iteration\n # -- in train and tune\n \n return logJoint", "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \"*** YOUR CODE HERE ***\"\n\t#Adds log(P(y)) to calculate P(y|f1,f2...)\n for label in self.legalLabels:\n\t\tlogJoint[label] += math.log(self.prior[label])\n\t#Adds log(P(f1|y)), log(P(f2|y))... to calculate P(y|f1, f2...)\n for key in datum:\n\t\t#if key == (7, 3):\n\t\t\t#print self.condprobs[key, 0]\n\t\tfor label in self.legalLabels:\n\t\t\t#print str(key) + str(datum[key])\n\t\t\tlogJoint[label] += math.log(self.condprobs[key, label][datum[key]])\n return logJoint", "def log_prob(self):", "def evaluate(genome):\n # base fitness\n fit = 1.0\n # promote 1001 starting motif\n matches = 0\n if genome.sequence_A[0] == 1:\n matches += 1\n if genome.sequence_A[1] == 0:\n matches += 1\n if genome.sequence_A[2] == 0:\n matches += 1\n if genome.sequence_A[3] == 1:\n matches += 1\n fit += matches * 0.1\n # finish\n return fit", "def likelihood(seq):\n global qmap\n if qmap is None:\n qmap = {'!': 1.0, '\"': 0.7943282347242815, '#': 0.6309573444801932, '$': 0.5011872336272722, '%': 0.3981071705534972, '&': 0.31622776601683794, \"'\": 0.251188643150958, '(': 0.19952623149688797, ')': 0.15848931924611134, '*': 0.12589254117941673, '+': 0.1, ',': 0.07943282347242814, '-': 0.06309573444801933, '.': 0.05011872336272722, '/': 0.039810717055349734, '0': 0.03162277660168379, '1': 0.025118864315095794, '2': 0.0199526231496888, '3': 0.015848931924611134, '4': 0.012589254117941675, '5': 0.01, '6': 0.007943282347242814, '7': 0.00630957344480193, '8': 0.005011872336272725, '9': 0.003981071705534973, ':': 0.0031622776601683794, ';': 0.0025118864315095794, '<': 0.001995262314968879, '=': 0.001584893192461114, '>': 0.0012589254117941675, '?': 0.001, '@': 0.0007943282347242813, 'A': 0.000630957344480193, 'B': 0.0005011872336272725, 'C': 0.00039810717055349735, 'D': 0.00031622776601683794, 'E': 0.00025118864315095795, 'F': 0.00019952623149688788, 'G': 0.00015848931924611142, 'H': 0.00012589254117941674, 'I': 0.0001, 'J': 7.943282347242822e-05, 'K': 6.309573444801929e-05, 'L': 5.011872336272725e-05, 'M': 3.9810717055349695e-05, 'N': 3.1622776601683795e-05, 'O': 2.5118864315095822e-05, 'P': 1.9952623149688786e-05, 'Q': 1.584893192461114e-05, 'R': 1.2589254117941661e-05, 'S': 1e-05, 'T': 7.943282347242822e-06, 'U': 6.30957344480193e-06, 'V': 5.011872336272725e-06, 'W': 3.981071705534969e-06, 'X': 3.162277660168379e-06, 'Y': 2.5118864315095823e-06, 'Z': 1.9952623149688787e-06, '[': 1.584893192461114e-06, '\\\\': 1.2589254117941661e-06, ']': 1e-06, '^': 7.943282347242822e-07, '_': 6.30957344480193e-07, '`': 5.011872336272725e-07, 'a': 3.981071705534969e-07, 'b': 3.162277660168379e-07, 'c': 2.5118864315095823e-07, 'd': 1.9952623149688787e-07, 'e': 1.584893192461114e-07, 'f': 1.2589254117941662e-07, 'g': 1e-07, 'h': 7.943282347242822e-08, 'i': 6.30957344480193e-08, 'j': 5.011872336272725e-08, 'k': 3.981071705534969e-08, 'l': 3.162277660168379e-08, 'm': 2.511886431509582e-08, 'n': 1.9952623149688786e-08, 'o': 1.5848931924611143e-08, 'p': 1.2589254117941661e-08, 'q': 1e-08, 'r': 7.943282347242822e-09, 's': 6.309573444801943e-09, 't': 5.011872336272715e-09, 'u': 3.981071705534969e-09, 'v': 3.1622776601683795e-09, 'w': 2.511886431509582e-09, 'x': 1.9952623149688828e-09, 'y': 1.584893192461111e-09, 'z': 1.2589254117941663e-09, '{': 1e-09, '|': 7.943282347242822e-10, '}': 6.309573444801942e-10, '~': 5.011872336272714e-10, '\\x7f': 3.9810717055349694e-10, '\\x80': 3.1622776601683795e-10, '\\x81': 2.511886431509582e-10, '\\x82': 1.9952623149688828e-10, '\\x83': 1.584893192461111e-10, '\\x84': 1.2589254117941662e-10, '\\x85': 1e-10}\n return [qmap[i] for i in seq]", "def likelihood_genotype(genotype, bases_all_reads, error_rates):\n likelihood = 1\n for observed_base in bases_all_reads:\n p = 0\n for base in \"ACGT-\":\n l = prob_t_N(genotype, base) * error_rates[base][observed_base]\n p += l\n likelihood *= p\n\n return likelihood", "def calculateLogJointProbabilities(self, datum):\n\tlogJoint = util.Counter()\n\t#want to calculate log(P(y)) + log(sum(P(fi|y)))\n\t#where y is a label\n\tfor label in self.legalLabels:\n\t\tlogJoint[label] = math.log(self.prior_distribution_prob[label])\n\t\tfor feature, value in datum.items():\n\t\t\tcp = self.conditional_prob[label][feature][value]\n\t\t\tif cp > 0: #condition check for values < 0 because log(0) is undefined and math domain error occurs\n\t\t\t\tlogJoint[label] += math.log(cp) #summing up\n\t\t\t\t\n\treturn logJoint", "def likelihood(self):\n \n raise NotImplementedError()", "def annotate_effect(cds_dict, genome, snp):\n # List to save the coding effect\n coding_effect = []\n \n # Change the SNP position from 1-indexed to 0-indexed\n snp = (snp[0]-1, snp[1])\n \n # Determine which genes the SNP is located in\n genes = []\n for k,v in cds_dict.items():\n if snp[0] in range(v.location.start, v.location.end): \n genes.append(k)\n # Check that SNP is in a gene\n if genes: \n # Some SNPs will be in more than one gene, SARS has overlaping ORFs\n for gene in genes: \n gene_tuple = list(zip(list(cds_dict[gene].location), cds_dict[gene].location.extract(genome)))\n # Get the indicies relative to the gene, add 1 to get 1-indexed values\n indicies = [x + 1 for x, y in enumerate(gene_tuple) if y[0] == snp[0]]\n # Determine codon position from gene index\n for i in indicies:\n # First position in codon\n if i % 3 == 1:\n codonpos = 1\n wtcodon = [gene_tuple[i-1], gene_tuple[i], gene_tuple[i+1]]\n # Second position in codon\n elif i % 3 == 2:\n codonpos = 2\n wtcodon = [gene_tuple[i-2], gene_tuple[i-1], gene_tuple[i]]\n # Third position in codon \n elif i % 3 == 0:\n codonpos = 3\n wtcodon = [gene_tuple[i-3], gene_tuple[i-2], gene_tuple[i-1]]\n \n # From the wt codon sequence, determine the alterative codon, coding change, and effect\n altcodon = [snp if i == (codonpos-1) else b for i, b in enumerate(wtcodon)]\n wtaa = translate(\"\".join(y for x,y in wtcodon))\n altaa = translate(\"\".join(y for x,y in altcodon))\n if wtaa == altaa:\n effect = \"synonymous\"\n elif wtaa != altaa and altaa == '*':\n effect = \"nonsense\"\n elif wtaa != altaa and altaa != '*':\n effect = \"missense\"\n # Save the codon effects and information\n coding_effect.append((codonpos, f\"{wtaa}{-(i // -3)}{altaa}\", effect, gene))\n # If the SNP isn't in a gene, it's intergeneic and has no coding effect\n else:\n coding_effect.append((\"NA\", \"NA\", \"NA\", \"intergeneic\"))\n \n \n # Deal with SNPs in multiple genes with multiple effects \n if len(coding_effect) == 1:\n return list(coding_effect[0])\n else: \n if len(set([(a,b,c) for a,b,c,d in coding_effect])) == 1: \n return list(list(set(coding_effect))[0])\n # TODO: Deal with ambiguous sequences\n else:\n return [\"NA\", \"NA\", \"NA\", \"ambiguous\"]", "def target_log_prob_fn(self, *args, **kwargs): # pylint: disable=unused-argument\n\n def log_joint_fn(*args, **kwargs): # pylint: disable=unused-argument\n states = dict(zip(self.unobserved.keys(), args))\n states.update(self.observed)\n interceptor = interceptors.CollectLogProb(states)\n with ed.interception(interceptor):\n self._f(self._cfg)\n\n log_prob = sum(interceptor.log_probs)\n return log_prob\n return log_joint_fn", "def score_sequence(seq, ngramlogprobs):\n return", "def get_log_likelihood(response_probability, response):\n pass", "def probability(structure,seq, react=None):\n return energy_to_proba(get_ens_energy(seq,react),get_stru_energy(structure,seq,react))", "def Log_OB(xref,x):\n\n nX = np.shape(x)\n\n m = nX[0]\n n = nX[1]\n t = nX[2]\n\n G = np.zeros((m,n,t))\n\n for r in range(t):\n\n # Correct for permuations\n\n Xout,PiA= CorrectPerm(xref,x[:,:,r])\n\n G[:,:,r] = Xout - np.dot(xref,np.dot(PiA,Xout))\n\n return G", "def calc_prob(number_of_strings, GC_content, DNA):\r\n\r\n AT = 0\r\n GC = 0\r\n\r\n for nt in DNA:\r\n if nt == \"A\" or nt == \"T\":\r\n AT += 1\r\n elif nt == \"G\" or nt == \"C\":\r\n GC += 1\r\n\r\n #P(at least 1 match of s) = 1 − P(no matches out of N strings) = 1 − [1 - P_no_match]^N\r\n\r\n P_no_match = (((1 - GC_content)/2) **AT) * ((GC_content/2) **GC)\r\n prob = 1 - (1-P_no_match) **number_of_strings\r\n\r\n print(\"%0.3f\" %prob)", "def log_likelihood(self, data, reward_model, bias_params):", "def biopythonMM(pwmFileName,genomeDict,mpbsDict,scoringMethod,tempLocation,pseudocounts=0.1,bitscore=12.0,fpr=0.01,precision=10**4,highCutoff=0.7,functionalDepth=0.9):\n \n # Reading PWM\n pwm = readPwmFile(pwmFileName,tempLocation,pseudocounts)\n pwmName = pwmFileName.split(\"/\")[-1].split(\".\")[0]\n pwmLen = len(pwm)\n\n # Evaluating threshold\n pwmThreshold = 0.0\n if(scoringMethod == \"bitscore\"):\n pwmThreshold = bitscore\n elif(scoringMethod == \"fpr\"):\n sd = Motif.ScoreDistribution(pwm,precision=precision)\n pwmThreshold = sd.threshold_fpr(fpr)\n elif(scoringMethod == \"boyle\"):\n maxScore = pwm.max_score()\n minScore = 0.0 # TODO Boyle's rule is not suited for negative values.\n pwmThreshold = min(highCutoff*maxScore,functionalDepth*(maxScore-minScore))\n else:\n sys.stderr.write(\"Choose a valid scoring method.\\n\")\n sys.exit(0)\n\n # Creating aditional parameters\n chrList = constants.getChromList(reference=[mpbsDict])\n tempMpbsDict = dict([(e,[]) for e in chrList])\n maxValue = -99.0\n\n # Iterating on chromosomes\n for chrName in chrList:\n\n # Reading genome\n sequence = genomeDict[chrName]\n\n # Performing biopython's motif matching\n for pos, score in pwm.search_pwm(sequence,threshold=pwmThreshold):\n if(score > maxValue): maxValue = score\n if(pos >= 0): tempMpbsDict[chrName].append([pos,pos+pwmLen,pwmName,score,\"+\"])\n else: tempMpbsDict[chrName].append([-pos,-pos+pwmLen,pwmName,score,\"-\"])\n\n # Update scores - new scores are within [0,1000]\n for chrName in chrList:\n for e in tempMpbsDict[chrName]:\n mpbsDict[chrName].append([e[0],e[1],e[2],int(1000*(e[3]-pwmThreshold)/(maxValue-pwmThreshold)),e[4]])\n \n return 0", "def gomeroccupancyscore(pwm_dictionary, seq):\n if \"N\" in seq:\n return 0\n else:\n # pwm_length = len(pwm_dictionary)\n pwm_length = len(pwm_dictionary[\"A\"])\n gomer_occupancy = 1\n area_pwm_rc = rc_pwm(pwm_dictionary, pwm_length)\n for i in range(pwm_length - 1, 1, -1):\n prod_gomer = 1\n prod_gomer_rc = 1\n for j in range(pwm_length):\n if j <= i:\n prod_gomer *= 0.25\n prod_gomer_rc *= 0.25\n elif (j + i) > len(seq) - 1:\n prod_gomer *= 0.25\n prod_gomer_rc *= 0.25\n else:\n # print \"got to else\"\n s = seq[j + i]\n prod_gomer *= pwm_dictionary[s][j]\n prod_gomer_rc *= area_pwm_rc[s][j]\n gomer_occupancy *= (1 - prod_gomer) * (1 - prod_gomer_rc)\n for i in range(len(seq) - 1):\n prod_gomer = 1\n prod_gomer_rc = 1\n for j in range(pwm_length - 1):\n if (j + i) >= len(seq) - 1:\n prod_gomer *= 0.25\n prod_gomer_rc *= 0.25\n else:\n prod_gomer *= pwm_dictionary[seq[j + i]][j]\n prod_gomer_rc *= area_pwm_rc[seq[j + i]][j]\n gomer_occupancy *= (1 - prod_gomer) * (1 - prod_gomer_rc)\n gomer_occupancy = 1 - gomer_occupancy\n\n return gomer_occupancy", "def likelihood_prediction():\n # Get info\n selected_word = prompt_tech_selection()\n article_json = get_json_from_file()\n\n # Calculate results\n total_word_counter, selected_word_counter = count_occurrences(article_json, selected_word)\n probability = selected_word_counter / total_word_counter\n total_time = article_json[-1]['time'] - article_json[0]['time'] # unix subtraction = seconds\n months_in_train_set = total_time / SECONDS_IN_MONTH\n expected_posts_per_month = int(total_word_counter / months_in_train_set)\n\n # Show results\n print_text_results(expected_posts_per_month, probability, selected_word)\n plot_likelihood(expected_posts_per_month, probability)", "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n for cls in self.classes:\n class_probability = self.prior_prob[cls]\n for key, value in datum.items():\n relative_feature_values = self.likelihoods[cls][key]\n class_probability += math.log(relative_feature_values.get(datum[key], 0.01))\n\n logJoint[cls] = class_probability\n\n return logJoint", "def viterbi(prob_matrix):\n TINY = 1e-6 # to avoid NaNs in logs\n\n # if prob_matrix is 1D, make it 2D\n if len(np.shape(prob_matrix)) == 1:\n prob_matrix = [prob_matrix]\n \n length = len(prob_matrix)\n\n probs = np.zeros_like(prob_matrix)\n backpt = np.ones_like(prob_matrix, dtype=np.int32) * -1\n \n for i in [0,1,2,3,4]:\n probs[0][i] = np.log(prob_matrix[0][i]+TINY)\n \n # {B, M, E, S} <=== 0:begin, 1:middle, 2:end, 3:single\n for t in range(1, length):\n # E, S -> B | B, M -> M | B, M -> E | E, S -> S\n previous_of = [[0,0], [3,4], [1,2], [1,2], [3,4]]\n for i in range(5):\n prevs = previous_of[i]\n max_id = prevs[np.argmax([probs[t-1][prevs[0]], probs[t-1][prevs[1]]])]\n backpt[t][i] = max_id\n probs[t][i] = np.log(prob_matrix[t][i]+TINY) + probs[t-1][max_id]\n\n seq = np.ones(length, 'int32') * -1\n #print(probs[length-1])\n seq[length-1] = np.argmax(probs[length-1])\n #print(seq[length-1])\n max_prob = probs[length-1][seq[length-1]]\n for t in range(1, length):\n seq[length-1-t] = backpt[length-t][seq[length-t]]\n \n return seq", "def complex(self, sentence):\r\n repetition = 6000\r\n warmup = 2500\r\n pos_mcmc_dict = {\"pos_\" + str(i): {} for i in range(len(sentence))}\r\n sequence = [\"noun\"] * len(sentence)\r\n for i in range(len(sentence)):\r\n if i == 0:\r\n prob_first = self.posterior_first(sentence[i])\r\n sample_first = list(\r\n np.random.choice(\r\n [keys for keys in prob_first.keys()],\r\n repetition,\r\n p=[\r\n float(prob_first[keys]) / sum(prob_first.values())\r\n for keys in prob_first.keys()\r\n ],\r\n )\r\n )\r\n sample_first = sample_first[warmup:]\r\n pos_mcmc_dict[\"pos_\" + str(i)] = {\r\n pos: (float(sample_first.count(pos)) / len(sample_first))\r\n for pos in self.position_list\r\n }\r\n sequence[i] = max(\r\n pos_mcmc_dict[\"pos_\" + str(i)],\r\n key=pos_mcmc_dict[\"pos_\" + str(i)].get,\r\n )\r\n elif i == 1:\r\n prob_second = self.post_second(sentence[i], sequence[i - 1])\r\n sample_second = list(\r\n np.random.choice(\r\n [keys for keys in prob_second.keys()],\r\n repetition,\r\n p=[\r\n float(prob_second[keys]) / sum(prob_second.values())\r\n for keys in prob_second.keys()\r\n ],\r\n )\r\n )\r\n sample_second = sample_second[warmup:]\r\n pos_mcmc_dict[\"pos_\" + str(i)] = {\r\n pos: (float(sample_second.count(pos)) / len(sample_second))\r\n for pos in self.position_list\r\n }\r\n sequence[i] = max(\r\n pos_mcmc_dict[\"pos_\" + str(i)],\r\n key=pos_mcmc_dict[\"pos_\" + str(i)].get,\r\n )\r\n else:\r\n prob_other = self.posterior_else(\r\n sentence[i], sequence[i - 1], sequence[i - 2]\r\n )\r\n sample_other = list(\r\n np.random.choice(\r\n [keys for keys in prob_other.keys()],\r\n repetition,\r\n p=[\r\n float(prob_other[keys]) / sum(prob_other.values())\r\n for keys in prob_other.keys()\r\n ],\r\n )\r\n )\r\n sample_other = sample_other[warmup:]\r\n pos_mcmc_dict[\"pos_\" + str(i)] = {\r\n pos: (float(sample_other.count(pos)) / len(sample_other))\r\n for pos in self.position_list\r\n }\r\n sequence[i] = max(\r\n pos_mcmc_dict[\"pos_\" + str(i)],\r\n key=pos_mcmc_dict[\"pos_\" + str(i)].get,\r\n )\r\n return sequence", "def make_fitness_function(cantus_firmus):\n def fitness_function(genome, debug=False):\n \"\"\"\n Given a candidate solution will return its fitness score assuming\n the cantus_firmus in this closure. Caches the fitness score in the\n genome.\n \"\"\"\n cp = genome.chromosome\n\n # calculate some information for easier scoring\n ScoringInfo = namedtuple(\"ScoringInfo\", \"pitch duration measure beat vi hi_next hi_prev voice_dir\")\n melody_info = list()\n beat = 4\n measure = 0\n for i in range(1, len(cp)):\n hi_next = abs(cp[i][0] - cp[i + 1][0]) if i != len(cp) - 1 else -1 # next horizontal interval\n hi_prev = abs(cp[i][0] - cp[i - 1][0]) if i != 1 else -1 # previous horizontal interval\n vi = abs(cp[i][0] - cantus_firmus[measure]) # vertical interval\n # voice movement direction\n voice_dir = 0 if i == len(cp) - 1 or cp[i + 1][0] == cp[i][0] else copysign(1, cp[i + 1][0] - cp[i][0])\n melody_info.append(ScoringInfo(cp[i][0], cp[i][1], measure, beat, vi, hi_next, hi_prev, voice_dir))\n beat += cp[i][1]\n measure += beat / 8\n beat %= 8\n\n if debug:\n print \"MELODY INFO: \", melody_info\n\n hscores = list()\n vscores = list()\n # hscore 1: 8th notes must move in step\n amount_of_8th = 0\n amount_of_missteps = 0\n for note in melody_info:\n if note.duration == 1:\n amount_of_8th += 1\n if note.hi_next > 1:\n amount_of_missteps += 1\n if note.hi_prev > 1:\n amount_of_missteps += 1\n hscores.append(float(amount_of_missteps) / (amount_of_8th * 2))\n if debug:\n print \"HSCORE 1: 8TH - \", amount_of_8th, \", MISSTEPS - \", amount_of_missteps\n\n # hscore 2: one climax, that can be repeated only after neighboring tone\n # hscore 3: Climax should be on the strong beat\n highest_note = max([note.pitch for note in melody_info])\n climax_count = 0\n climax_on_weak_beat_count = 0\n for i, note in enumerate(melody_info):\n if note.pitch == highest_note:\n climax_count += 1\n if note.beat not in [0, 4]:\n climax_on_weak_beat_count += 1\n if i < len(melody_info) - 2 and note.pitch == melody_info[i + 2].pitch: # If next note is\n if note.hi_next == 1 and melody_info[i + 2].hi_prev == 1: # neighboring tone\n if note.vi in CONSONANCES and melody_info[i + 2].vi in CONSONANCES: # And surrounding notes are consonant\n climax_count -= 1 # we can allow 2nd climax\n if melody_info[i + 2].beat not in [0, 4]:\n climax_on_weak_beat_count -= 1 # And 2nd climax may be on weak beat\n\n hscores.append(float(climax_count - 1) / len(melody_info))\n hscores.append(float(climax_on_weak_beat_count) / climax_count)\n\n if debug:\n print \"HSCORE 2+3: CLIMAX CNT - \", climax_count, \", WEAK CLIMAX CNT - \", climax_on_weak_beat_count\n\n # hscore 4: Horizontal intervals are consonant\n unconsonant_amount = len(filter(lambda x: x.hi_next not in CONSONANCES + [1], melody_info[:-1]))\n hscores.append(float(unconsonant_amount) / (len(melody_info) - 1))\n\n if debug:\n print \"HSCORE 4: UNCONSANANT AMOUNT - \", unconsonant_amount\n\n # hscore 5: Stepwise movement should predominate\n leaps_count = len(filter(lambda x: x.hi_next != 1, melody_info[:-1]))\n sections = round(float(len(cantus_firmus)) / 16)\n if leaps_count < (2 * sections):\n hscores.append(float(leaps_count) / (len(melody_info) - 1 - 4 * sections))\n elif leaps_count > (4 * sections):\n hscores.append(float(leaps_count) / (len(melody_info) - 1 - 4 * sections))\n else:\n hscores.append(0.0)\n\n if debug:\n print \"HSCORE 5: LEAPS - \", leaps_count, \"SECTIONS - \", sections\n\n # hscore 6: After large leap - stepwise motion\n large_leaps_count = 0\n large_leaps_not_followed_count = 0\n for i, note in enumerate(melody_info[:-1]):\n if note.hi_next >= 3:\n large_leaps_count += 1\n if melody_info[i + 1].hi_next != 1:\n large_leaps_not_followed_count += 1\n hscores.append(float(large_leaps_not_followed_count) / large_leaps_count if large_leaps_count != 0 else 0.0)\n\n if debug:\n print \"HSCORE 6: LL CNT - \", large_leaps_count, \"LL NOT FOLLOWED CNT - \", large_leaps_not_followed_count\n\n # hscore 7: change direction after each large leap\n large_leaps_not_changedir_count = 0\n for i, note in enumerate(melody_info[:-1]):\n if note.hi_next >= 3 and note.voice_dir != -melody_info[i + 1].voice_dir:\n large_leaps_not_changedir_count += 1\n hscores.append(float(large_leaps_not_changedir_count) / large_leaps_count if large_leaps_count != 0 else 0.0)\n\n if debug:\n print \"HSCORE 7: LL NOT CHNGDIR CNT - \", large_leaps_not_changedir_count\n\n # hscore 8: climax should be melodically consonant with tonic\n hscores.append(1.0 if highest_note - 4 in CONSONANCES else 0.0)\n\n # hscore 9: no more than 2 consecutive leaps\n conseq_leaps = 0\n punish_score = 0\n for note in melody_info:\n conseq_leaps += 1\n if note.hi_next in [0, 1]:\n conseq_leaps = 0\n if conseq_leaps > 3:\n punish_score += 1\n hscores.append(float(punish_score) / (len(melody_info) - 3))\n\n if debug:\n print \"HSCORE 9: CONSEQ LEAPS PUNISH SCORE - \", punish_score\n\n # hscore 10: no more than 2 large leaps per section\n if large_leaps_count > 2 * sections:\n hscores.append(float(large_leaps_count - 2 * sections) / (len(melody_info) - 1 - 2 * sections))\n else:\n hscores.append(0.0)\n\n # hscore 11: not too long stepwise in same direction\n longest_stepwise_seq = 0\n current_stepwise_seq = 0\n prev_dir = 0\n num_changes = 0\n motion_vector = list()\n for note in melody_info:\n if note.hi_next <= 1:\n if note.voice_dir in [prev_dir, 0]:\n current_stepwise_seq += 1\n longest_stepwise_seq = max(longest_stepwise_seq, current_stepwise_seq)\n else:\n prev_dir = note.voice_dir\n current_stepwise_seq = 0\n num_changes += 1\n motion_vector.append(note.pitch)\n else:\n if note.voice_dir != prev_dir and note.voice_dir != 0:\n prev_dir = note.voice_dir\n num_changes += 1\n motion_vector.append(note.pitch)\n current_stepwise_seq = 0\n motion_vector.append(cp[-1][0])\n if longest_stepwise_seq < 5:\n longest_stepwise_seq = 0\n hscores.append(float(longest_stepwise_seq) / len(cp))\n\n if debug:\n print \"HSCORE 11: LONGEST STEPWISE SEQUENCE - \", longest_stepwise_seq\n\n # hscore 12: direction needs to change several times\n if num_changes < 3 * sections:\n hscores.append(1 - float(num_changes) / (3 * sections))\n else:\n hscores.append(0.0)\n\n # hscore 13: ending note is tonic\n hscores.append(0)\n\n # hscore 14: penultimate note is leading tone\n hscores.append(0)\n\n # hscore 15: the start of a motion is consonant with the end of a motion\n unconsotant_count = 0\n big_leaps_count = 0\n for i in range(1, len(motion_vector) - 1):\n if abs(motion_vector[i] - motion_vector[i + 1]) not in CONSONANCES:\n unconsotant_count += 1\n if abs(motion_vector[i] - motion_vector[i + 1]) > 6:\n big_leaps_count += 1\n hscores.append(float(unconsotant_count) / len(motion_vector))\n\n if debug:\n print \"HSCORE 15: UNCONSONANT MOTIONS - \", unconsotant_count\n\n # hscore 16: Large motion intervals (>6 tones) should be avoided\n hscores.append(float(big_leaps_count) / len(motion_vector))\n\n if debug:\n print \"HSCORE 16: LARGE MOTIONS - \", big_leaps_count\n\n # hscore 17: No frequent repetition of the same note\n rep_count = 0\n for note in melody_info:\n if note.hi_next == 0:\n rep_count += 1\n if rep_count > 2 * sections:\n rep_count -= 2 * sections\n else:\n rep_count = 0\n hscores.append(float(rep_count) / (len(cp) - 2 * sections))\n\n if debug:\n print \"HSCORE 17: REPETITIONS COUNT - \", rep_count\n\n # hscore 18: no repetition of sequence within a 4 measure interval\n repeated = set()\n for i in range(len(melody_info) - 2):\n j = i + 1\n while melody_info[j].measure < melody_info[i].measure + 4 and j < len(melody_info) - 1:\n if melody_info[i].pitch == melody_info[j].pitch:\n k = 1\n while j + k < len(melody_info) and melody_info[j + k].pitch == melody_info[i + k].pitch:\n if k == 1:\n repeated.add(j)\n repeated.add(j + k)\n k += 1\n j += 1\n\n hscores.append(float(len(repeated)) / len(cp))\n\n if debug:\n print \"HSCORE 18: REPEATED POSITIONS - \", repeated\n\n # hscore 19: largest allowed interval is octave\n more_than_ocatave_amount = len(filter(lambda x: x.hi_next > 7, melody_info[:-1]))\n hscores.append(float(more_than_ocatave_amount) / len(cp))\n\n if debug:\n print \"HSCORE 19: MORE THAN OCTAVES - \", more_than_ocatave_amount\n\n # vscore 1: whole notes should be consonant (ensured by generation and hscore 13)\n vscores.append(0.0)\n\n # vscores 2 and 3: halves and quarters should be consonant on first beat.\n # or can be dissonant on other beats beat, if passing tone\n amount_of_notes = 0\n amount_of_wrong_notes = 0\n for i, note in enumerate(melody_info):\n if note.duration >= 2:\n amount_of_notes += 1\n if note.beat == 0 and note.vi not in CONSONANCES:\n amount_of_wrong_notes += 1\n if note.beat != 0 and note.vi not in CONSONANCES:\n amount_of_wrong_notes += 1\n if note.hi_prev == 1 and note.hi_next == 1 and note.voice_dir == melody_info[i - 1].voice_dir:\n if melody_info[i - 1].vi in CONSONANCES and melody_info[i + 1].vi in CONSONANCES:\n amount_of_wrong_notes -= 1\n\n vscores.append(float(amount_of_wrong_notes) / amount_of_notes)\n vscores.append(float(amount_of_wrong_notes) / amount_of_notes)\n\n if debug:\n print \"VSCORE 2+3: NOTES > THAN 8TH - \", amount_of_notes, \",DISSONANT ONES - \", amount_of_wrong_notes\n\n # vscore 4: one of eight notes from pair should be consonant\n amount_of_wrong_notes = 0\n for i, note in enumerate(melody_info[:-1]):\n if note.duration == 1 and melody_info[i + 1].duration == 1:\n if note.vi not in CONSONANCES and melody_info[i + 1].vi not in CONSONANCES:\n amount_of_wrong_notes += 1\n beat += cp[i][1]\n vscores.append(float(amount_of_wrong_notes) / amount_of_8th)\n\n if debug:\n print \"VSCORE 4: 8TH NOTES - \", amount_of_8th, \",DISSONANT ONES - \", amount_of_wrong_notes\n\n # vscore 5: unisons ok if on 1st beat through suspension or if tied over (ensured by storing format)\n # else: if followed by step\n\n wrong_unsiones = len(filter(lambda x: x.vi == 0 and x.hi_next != 1, melody_info))\n vscores.append(float(wrong_unsiones) / len(cp))\n\n if debug:\n print \"VSCORE 5: WRONG UNISONES - \", wrong_unsiones\n\n # vscore 6: max allowed interval between voices is 10th, except for climax\n big_vert_intervals = len(filter(lambda x: x.vi > 9 and x.pitch != highest_note, melody_info))\n vscores.append(float(big_vert_intervals) / len(melody_info))\n\n if debug:\n print \"VSCORE 6: VERT INTERVALS > 10TH - \", big_vert_intervals\n\n\n # vscore 7: There should be no crossing (ensured by generation)\n vscores.append(0.0)\n\n # vscore 8: avoid the overlapping of parts (ensured by generation)\n vscores.append(0.0)\n\n # vscore 9: no leaps from unison to octave and vice versa\n uni_to_oct_count = 0\n for i, note in enumerate(melody_info[:-1]):\n if note.vi == 7 and melody_info[i + 1].vi == 0:\n uni_to_oct_count += 1\n if note.vi == 0 and melody_info[i + 1].vi == 7:\n uni_to_oct_count += 1\n\n vscores.append(float(uni_to_oct_count) / len(melody_info))\n\n if debug:\n print \"VSCORE 9: UNISON-OCTAVE LEAPS - \", uni_to_oct_count\n\n # vscore 10: The ending is unison or octave (ensured by generation)\n vscores.append(0.0)\n\n # vscore 11: all perfect intervals (also perfect fourth) should be approached by contrary or oblique motion\n bad_perfect_intervals_count = 0\n battuda = 0\n for i in range(1, len(melody_info)):\n if melody_info[i].beat == 0 and melody_info[i].vi in PERFECT_INTERVALS:\n bad_perfect_intervals_count += 1\n prev_cf = cantus_firmus[melody_info[i - 1].measure]\n prev_cp = melody_info[i - 1].pitch\n cur_cf = cantus_firmus[melody_info[i].measure]\n cur_cp = melody_info[i].pitch\n if prev_cp == cur_cp and prev_cf != cur_cf: # oblique\n bad_perfect_intervals_count -= 1\n if prev_cp > cur_cp and prev_cf <= cur_cf: # contrary\n bad_perfect_intervals_count -= 1\n if melody_info[i].vi in [4, 7]:\n battuda += 1\n if prev_cp < cur_cp and prev_cf >= cur_cf: # contrary\n bad_perfect_intervals_count -= 1\n if melody_info[i].vi in [4, 7]:\n battuda += 1\n\n vscores.append(float(bad_perfect_intervals_count) / len(cantus_firmus))\n\n if debug:\n print \"VSCORE 11: PERF INTERVALS APPROACHED BADLY - \", bad_perfect_intervals_count\n\n # vscore 12: avoid simultaneous leaps in cf and cp, especially large leaps in same direction\n leaps_count = 0\n large_leaps_count = 0\n for i in range(len(melody_info) - 1):\n if melody_info[i + 1].beat == 0:\n leap_cp = melody_info[i + 1].pitch - melody_info[i].pitch\n leap_cf = cantus_firmus[melody_info[i + 1].measure] - cantus_firmus[melody_info[i].measure]\n if abs(leap_cf) > 1 and abs(leap_cp) > 1:\n leaps_count += 1\n if leap_cf > 6 and leap_cp > 6:\n large_leaps_count += 1\n if leap_cf < 6 and leap_cp < 6:\n large_leaps_count += 1\n vscores.append(float(leaps_count + large_leaps_count) / (len(cantus_firmus) * 2))\n\n if debug:\n print \"VSCORE 12: SIM LEAPS - \", leaps_count, \", LARGE SIM LEAPS - \", large_leaps_count\n\n # vscore 13: use all types of motion\n similar = 0\n contrary = 0\n oblique = 0\n parallel = 0\n for i in range(1, len(melody_info)):\n if melody_info[i].beat == 0:\n prev_cf = cantus_firmus[melody_info[i - 1].measure]\n prev_cp = melody_info[i - 1].pitch\n cur_cf = cantus_firmus[melody_info[i].measure]\n cur_cp = melody_info[i].pitch\n if prev_cp == cur_cp:\n if prev_cf != cur_cf:\n oblique = 1\n else:\n similar = 1\n if prev_cp > cur_cp:\n if prev_cf <= cur_cf:\n contrary = 1\n else:\n parallel = 1\n if prev_cp < cur_cp:\n if prev_cf >= cur_cf:\n contrary = 1\n else:\n parallel = 1\n types_of_motion = similar + oblique + contrary + parallel\n vscores.append(1 - float(types_of_motion) / 4)\n if debug:\n print \"VSCORE 13: MOTION TYPES (SOCP) - \", similar, oblique, contrary, parallel\n\n # vscore 14: climax of the CF and CP should not coincide\n cf_highest_note = max(cantus_firmus)\n coincide = 0\n for note in melody_info:\n if note.pitch == highest_note and cantus_firmus[note.measure] == cf_highest_note:\n coincide = 1\n\n vscores.append(coincide)\n if debug:\n print \"VSCORE 14: COINCIDE - \", coincide\n\n # vscore 15: Successive unisons, octaves and fifths on first beats are only valid\n # when separated by three quarter notes.\n bad_intervals_count = 0\n\n for i in range(len(melody_info) - 1):\n if melody_info[i].beat == 0 and melody_info[i].measure != len(cantus_firmus) - 1:\n if melody_info[i].vi in [0, 4, 7]:\n separated = True\n j = 1\n while melody_info[i + j].measure == melody_info[i].measure:\n if melody_info[i + j].duration > 2:\n separated = False\n j += 1\n if melody_info[i + j].vi in [0, 4, 7] and not separated:\n bad_intervals_count += 1\n vscores.append(float(bad_intervals_count) / (len(cantus_firmus) - 1))\n if debug:\n print \"VSCORE 15: BAD INTERVALS - \", bad_intervals_count\n\n # vscore 16: successive unisons, octaves and fifths not on first beats:\n # valid when separated by at least 2 notes, otherwise not.\n # Unless it is a consonant suspension of quarter note: ok for afterbeat fifths and octaves\n # separated only by a single quearter.\n # ***what a complex rule, we don't care about consonant suspension due to storing format >_<***\n bad_intervals_count = 0\n\n for i in range(len(melody_info) - 2):\n separated = True\n if melody_info[i].beat != 0:\n if melody_info[i].vi in [0, 4, 7]:\n if melody_info[i + 1].vi in [0, 4, 7]:\n separated = False\n if separated:\n if melody_info[i + 2].vi in [0, 4, 7]:\n separated = False\n if not separated:\n bad_intervals_count += 1\n\n vscores.append(float(bad_intervals_count) / (len(melody_info) - len(cantus_firmus)))\n\n if debug:\n print \"VSCORE 16: BAD INTERVALS - \", bad_intervals_count\n\n # vscore 17: no ottava or quinta battuda, whatever it means\n vscores.append(float(battuda) / len(cantus_firmus))\n\n if debug:\n print \"VSCORE 17: BATTUDAS - \", battuda\n\n # vscore 18: best ending: dissonant suspension into the leading tone (ensured by generation)\n vscores.append(0)\n\n # vscore 19: Thirds, sixths and tenths should predominate.\n good_interval_count = len(filter(lambda x: x.vi in [2, 5, 9], melody_info))\n if good_interval_count * 2 > len(melody_info):\n vscores.append(0.0)\n else:\n vscores.append(1.0 - float(2 * good_interval_count) / len(melody_info))\n\n if debug:\n print \"VSCORE 19: 3RDS 6THS 10THS - \", good_interval_count\n\n genome.fitness = sum([x * y for x, y in zip(hscores, HSCORE_WEIGHTS)]) + \\\n sum([x * y for x, y in zip(vscores, VSCORE_WEIGHTS)])\n if debug:\n print \"HSCORES: \", hscores\n print \"VSCORES: \", vscores\n print \"FINAL SCORE: \", genome.fitness\n print \"FINAL SCORE UNSCALED: \", sum(hscores) + sum(vscores)\n return genome.fitness\n\n return fitness_function", "def get_likelihood(\n self,\n qb,\n inv_fish,\n map_tag=None,\n null_first_cmb=False,\n lmin=33,\n lmax=250,\n mcmc=True,\n alpha_tags=[\"95\", \"150\"],\n beam_tags=[\"95\", \"150\"],\n r_prior=[0, np.inf],\n alpha_prior=[0, np.inf],\n res_prior=None,\n beam_prior=[0, 1],\n betad_prior=[0, 1],\n dust_amp_prior=[0, np.inf],\n dust_ellind_prior=[0, 1],\n num_walkers=50,\n num_steps=20000,\n converge_criteria=0.01,\n reset_backend=None,\n file_tag=None,\n ):\n\n for x in [\n r_prior,\n alpha_prior,\n res_prior,\n beam_prior,\n betad_prior,\n dust_amp_prior,\n dust_ellind_prior,\n ]:\n if x is not None:\n x[:] = [float(x[0]), float(x[1])]\n\n save_name = \"like_mcmc\"\n if not mcmc:\n alpha_prior = None\n res_prior = None\n beam_prior = None\n betad_prior = None\n dust_amp_prior = None\n dust_ellind_prior = None\n\n # no template cleaning if there aren't any templates specified\n if not getattr(self, \"template_cleaned\", False):\n alpha_prior = None\n\n # null out unused priors\n self.template_alpha = getattr(self, \"template_alpha\", None)\n if self.template_alpha is None or all(\n [x is None for x in self.template_alpha.values()]\n ):\n alpha_prior = None\n\n # count alpha parameters to fit\n alpha_tags = [x for x in alpha_tags if x in self.map_tags_orig]\n if not len(alpha_tags):\n alpha_prior = None\n\n num_alpha = 0\n if alpha_prior is not None:\n num_alpha = len(alpha_tags)\n\n # count beam parameters to fit\n beam_tags = [x for x in beam_tags if x in self.map_tags_orig]\n if not len(beam_tags):\n beam_prior = None\n\n num_beam = 0\n if beam_prior is not None:\n num_beam = len(beam_tags)\n\n if not any([k.startswith(\"res_\") for k in qb]):\n res_prior = None\n\n if np.any(\n [\n betad_prior is not None,\n dust_amp_prior is not None,\n dust_ellind_prior is not None,\n ]\n ):\n dust_ell_fit = True\n else:\n dust_ell_fit = False\n\n # bookkeeping: ordered priors\n priors = {\n \"r_prior\": r_prior,\n \"alpha_prior\": alpha_prior,\n \"res_prior\": res_prior,\n \"beam_prior\": beam_prior,\n \"betad_prior\": betad_prior,\n \"dust_amp_prior\": dust_amp_prior,\n \"dust_ellind_prior\": dust_ellind_prior,\n }\n # priors on quantities that affect Dmat_obs or gmat (precalculated)\n obs_priors = [alpha_prior]\n\n # check parameter space\n if all([x is None for x in priors.values()]):\n raise RuntimeError(\"Empty parameter space\")\n\n out = dict(\n r_prior=r_prior,\n alpha_prior=alpha_prior,\n res_prior=res_prior,\n beam_prior=beam_prior,\n betad_prior=betad_prior,\n dust_amp_prior=dust_amp_prior,\n dust_ellind_prior=dust_ellind_prior,\n alpha_tags=alpha_tags,\n num_walkers=num_walkers,\n null_first_cmb=null_first_cmb,\n apply_gcorr=self.apply_gcorr,\n weighted_bins=self.weighted_bins,\n lmin=lmin,\n lmax=lmax,\n )\n\n if mcmc and reset_backend is None:\n ret = self.load_data(\n save_name,\n \"likelihood\",\n bp_opts=True,\n to_attrs=False,\n map_tag=map_tag,\n value_ref=out,\n extra_tag=file_tag,\n )\n if ret is not None and ret.get(\"converged\", False):\n if converge_criteria >= ret.get(\"converge_criteria\", 0.01):\n return ret\n if ret is not None:\n for pname, pval in priors.items():\n if np.all(pval != ret.get(pname, None)):\n ret = None\n # clear chain cache if rerunning, otherwise append to chain by default\n reset_backend = ret is None\n\n out.update(converge_criteria=converge_criteria)\n\n # save state\n if mcmc and reset_backend:\n self.save_data(\n save_name, map_tag=map_tag, extra_tag=file_tag, bp_opts=True, **out\n )\n\n # clear pre-computed quantities\n self.clear_precalc()\n use_precalc = all([x is None for x in obs_priors])\n\n cls_input, cls_noise, cls_debias = self.get_data_spectra()\n\n # extract residual bins, ignoring bins outside of lmin/lmax\n if res_prior is not None:\n bin_def_orig = copy.deepcopy(self.bin_def)\n nbins_res_orig = self.nbins_res\n qb_res = OrderedDict()\n num_res = 0\n for k in list(qb):\n if k.startswith(\"res_\"):\n bd = self.bin_def[k]\n good = np.where((bd[:, 1] > lmin) & (bd[:, 0] < lmax))[0]\n # use all qb res in range lmin, lmax\n self.bin_def[k] = bd[good]\n v = qb.pop(k)[good]\n num_res += len(v)\n\n # use average qb res in good range per map\n # self.bin_def[k] = np.array([[lmin, lmax + 1]])\n # v = np.array([(qb.pop(k)[good]).mean()])\n # num_res += 1\n qb_res[k] = v\n self.nbins_res = num_res\n\n # set CMB model bandpowers to unity, since we are computing\n # the likelihood of this model given the data\n if r_prior is None:\n self.log(\"Computing model spectrum\", \"debug\")\n self.warn(\"Beam variation not implemented for case of no r fit\")\n cbl = self.bin_cl_template(map_tag=map_tag)\n cls_model = self.get_model_spectra(qb, cbl, delta=True, cls_noise=cls_noise)\n else:\n qb = copy.deepcopy(qb)\n for spec in self.specs:\n stags = [\"cmb_{}\".format(spec), \"fg_{}\".format(spec)]\n for stag in stags:\n if stag not in qb:\n continue\n qb[stag] = np.ones_like(qb[stag])\n\n self.log(\"Computing r model spectrum\", \"debug\")\n cls_shape_scalar = self.get_signal_shape(\n r=1.0, save=False, component=\"scalar\"\n )\n\n cls_shape_tensor = self.get_signal_shape(\n r=1.0, save=False, component=\"tensor\"\n )\n\n # load tensor and scalar terms separately\n cbl_scalar = self.bin_cl_template(cls_shape_scalar, map_tag)\n cls_model_scalar = self.get_model_spectra(\n qb, cbl_scalar, delta=True, cls_noise=cls_noise\n )\n cbl_tensor = self.bin_cl_template(cls_shape_tensor, map_tag)\n cls_model_tensor = self.get_model_spectra(\n qb, cbl_tensor, delta=False, res=False\n )\n if beam_prior is not None:\n # load beam error term for tensor and scalar\n cbl_scalar_beam = self.bin_cl_template(\n cls_shape_scalar, map_tag, beam_error=True\n )\n cls_mod_scal_beam = self.get_model_spectra(\n qb, cbl_scalar_beam, delta=True, res=False\n )\n cbl_tensor_beam = self.bin_cl_template(\n cls_shape_tensor, map_tag, beam_error=True\n )\n cls_mod_tens_beam = self.get_model_spectra(\n qb, cbl_tensor_beam, delta=False, res=False\n )\n\n # load foreground shape\n if dust_ell_fit:\n cls_shape_dust = self.get_signal_shape(save=False, component=\"fg\")\n # if dust_ellind_prior is None:\n # # can preload shape since not varying ell index\n cbl_fg = self.bin_cl_template(cls_shape_dust, map_tag=map_tag)\n if beam_prior is not None:\n cbl_fg_beam = self.bin_cl_template(\n cls_shape_dust, map_tag, beam_error=True\n )\n\n cbl = copy.deepcopy(cbl_scalar)\n cls_model = copy.deepcopy(cls_model_scalar)\n\n # XXX TODO\n # how to marginalize over the garbage bin?\n\n def parse_params(theta):\n \"\"\"\n Parse array of parameters into a dict\n \"\"\"\n params = {}\n if r_prior is not None:\n params[\"r\"] = theta[0]\n theta = theta[1:]\n if alpha_prior is not None:\n params[\"alpha\"] = theta[:num_alpha]\n theta = theta[num_alpha:]\n if res_prior is not None:\n params[\"res\"] = theta[:num_res]\n theta = theta[num_res:]\n if beam_prior is not None:\n params[\"beam\"] = theta[:num_beam]\n theta = theta[num_beam:]\n if betad_prior is not None:\n params[\"betad\"] = theta[0]\n theta = theta[1:]\n if dust_amp_prior is not None:\n # param for ee and bb\n params[\"dust_amp\"] = theta[:2]\n theta = theta[2:]\n if dust_ellind_prior is not None:\n params[\"dust_ellind\"] = theta[0]\n theta = theta[1:]\n if len(theta):\n raise ValueError(\"Too many parameters to parse\")\n return params\n\n def log_prior(\n r=None,\n alpha=None,\n res=None,\n beam=None,\n betad=None,\n dust_amp=None,\n dust_ellind=None,\n ):\n \"\"\"\n Log prior function constructed from input options\n \"\"\"\n values = {\n \"r_prior\": r,\n \"alpha_prior\": alpha,\n \"res_prior\": res,\n \"dust_amp_prior\": dust_amp,\n }\n for v, pval in values.items():\n prior = priors[v]\n if pval is not None and prior is not None:\n if np.any(pval < prior[0]) or np.any(pval > prior[1]):\n return -np.inf\n\n values_gauss = {\n \"beam_prior\": beam,\n \"betad_prior\": betad,\n \"dust_ellind_prior\": dust_ellind,\n }\n # for beam and betad, use gaussian prior\n log_prob = 0.0\n for v, pval in values_gauss.items():\n prior = priors[v]\n if pval is not None and prior is not None:\n pval = np.atleast_1d(pval)\n norm = np.log(1.0 / (prior[1] * np.sqrt(2 * np.pi)))\n chi = (pval - prior[0]) / prior[1]\n log_prob += np.sum(norm - chi ** 2 / 2.0)\n\n return log_prob\n\n def log_like(\n r=None,\n alpha=None,\n res=None,\n beam=None,\n betad=None,\n dust_amp=None,\n dust_ellind=None,\n ):\n \"\"\"\n Log likelihood function constructed from input options\n \"\"\"\n cls_model0 = copy.deepcopy(cls_model)\n\n # compute new template subtracted data spectra\n if alpha is None:\n clsi = cls_input\n else:\n self.get_masked_data(template_alpha=OrderedDict(zip(alpha_tags, alpha)))\n clsi = self.get_data_spectra(do_noise=False)\n\n if beam is not None:\n beam = dict(zip(beam_tags, beam))\n beam_coeffs = dict()\n for xname, (m0, m1) in self.map_pairs_orig.items():\n d = {}\n b0, b1 = [beam.get(m, None) for m in (m0, m1)]\n if b0 is not None:\n d[\"b1\"] = b0\n if b1 is not None:\n d[\"b2\"] = b1\n if b0 is not None:\n d[\"b3\"] = b0 * b1\n beam_coeffs[xname] = d\n\n # compute new signal shape by scaling tensor component by r\n if r is not None:\n for stag, d in cls_model0.items():\n comp, spec = stag.split(\"_\", 1)\n if spec not in [\"ee\", \"bb\"] or comp not in [\"cmb\", \"total\"]:\n continue\n ctag = \"cmb_{}\".format(spec)\n for xname, dd in d.items():\n dd[:] = (\n cls_model_scalar[stag][xname]\n + r * cls_model_tensor[ctag][xname]\n )\n\n if beam is None:\n continue\n beam_term = 0\n for bn, bc in beam_coeffs[xname].items():\n beam_term += bc * (\n cls_mod_scal_beam[ctag][xname][bn]\n + r * cls_mod_tens_beam[ctag][xname][bn]\n )\n dd[:] += beam_term\n\n elif beam is not None:\n for stag, d in cls_model0.items():\n comp, spec = stag.split(\"_\", 1)\n if spec not in [\"ee\", \"bb\"] or comp not in [\"cmb\", \"total\"]:\n continue\n ctag = \"cmb_{}\".format(spec)\n for xname, dd in d.items():\n beam_term = 0\n for bn, bc in beam_coeffs[xname].items():\n beam_term += bc * cls_mod_scal_beam[ctag][xname][bn]\n dd[:] = cls_model_scalar[stag][xname] + beam_term\n\n # fg term, including beam modifications. Because mix terms are\n # dependent on dust amp, get model specs here.\n if dust_ell_fit:\n if dust_amp is None:\n qb[\"fg_ee\"][:] = 1\n qb[\"fg_bb\"][:] = 1\n else:\n qb[\"fg_ee\"][:] = dust_amp[0]\n qb[\"fg_bb\"][:] = dust_amp[1]\n if betad is None:\n qb[\"delta_beta\"][:] = 0\n else:\n qb[\"delta_beta\"][:] = betad\n if dust_ellind is not None:\n cbl_fg0 = self.bin_cl_template(\n cls_shape_dust, map_tag=map_tag, fg_ell_ind=dust_ellind\n )\n if beam is not None:\n cbl_fg_beam0 = self.bin_cl_template(\n cls_shape_dust,\n map_tag,\n fg_ell_ind=dust_ellind,\n beam_error=True,\n )\n else:\n cbl_fg0 = cbl_fg\n if beam is not None:\n cbl_fg_beam0 = cbl_fg_beam\n\n cls_model_fg = self.get_model_spectra(\n qb, cbl_fg0, delta=True, res=False\n )\n if beam is not None:\n cls_mod_fg_beam = self.get_model_spectra(\n qb, cbl_fg_beam0, delta=True, res=False\n )\n # add fg field to model, and add fg to total model\n for stag, d in cls_model_fg.items():\n comp, spec = stag.split(\"_\", 1)\n if spec not in [\"ee\", \"bb\"] or comp not in [\"fg\", \"total\"]:\n continue\n ftag = \"fg_{}\".format(spec)\n if stag not in cls_model0:\n cls_model0[stag] = OrderedDict()\n for xname, dd in d.items():\n if xname not in cls_model0[stag]:\n cls_model0[stag][xname] = cls_model_fg[ftag][xname]\n else:\n cls_model0[stag][xname] += cls_model_fg[ftag][xname]\n\n # add beam terms to fg and total fields\n if beam is not None:\n beam_term = 0\n for bn, bc in beam_coeffs[xname].items():\n beam_term += bc * cls_mod_fg_beam[ftag][xname][bn]\n cls_model0[stag][xname] += beam_term\n\n # compute noise model terms\n if res is None:\n clsm = cls_model0\n else:\n res = pt.arr_to_dict(res, qb_res)\n clsm = copy.deepcopy(cls_model0)\n cls_res = self.get_model_spectra(res, cbl)\n for stag, d in cls_res.items():\n if stag not in clsm:\n clsm[stag] = OrderedDict()\n for xname, dd in d.items():\n if xname not in clsm[stag]:\n clsm[stag][xname] = dd\n else:\n clsm[stag][xname] += dd\n\n # compute likelihood\n like = self.fisher_calc(\n qb,\n cbl,\n clsi,\n cls_noise=cls_noise,\n cls_debias=cls_debias,\n cls_model=clsm,\n null_first_cmb=null_first_cmb,\n likelihood=True,\n use_precalc=use_precalc,\n like_lmin=lmin,\n like_lmax=lmax,\n )\n return like\n\n def log_prob(theta):\n \"\"\"\n Log posterior probability from prior and likelihood\n\n Returns log_prior with each step\n \"\"\"\n params = parse_params(theta)\n prior = log_prior(**params)\n if not np.isfinite(prior):\n return -np.inf, -np.inf\n like = log_like(**params)\n if not np.isfinite(like):\n return -np.inf, prior\n return prior + like, prior\n\n # initial values\n x0 = []\n brute_force = True if not mcmc else False # only vary r\n if r_prior is not None:\n x0 += [0.01]\n if alpha_prior is not None:\n alphas = [self.template_alpha[tag] for tag in alpha_tags]\n x0 += [0.01 if a == 0 else a for a in alphas]\n brute_force = False\n if res_prior is not None:\n x0 += list(pt.dict_to_arr(qb_res, flatten=True))\n brute_force = False\n if beam_prior is not None:\n # add a beam term for each frequency\n x0 += [0.01] * len(beam_tags)\n brute_force = False\n if betad_prior is not None:\n x0 += [0.01]\n brute_force = False\n if dust_amp_prior is not None:\n x0 += [1, 1]\n brute_force = False\n if dust_ellind_prior is not None:\n x0 += [0.01]\n brute_force = False\n\n ndim = len(x0)\n if ndim * 2 > num_walkers:\n num_walkers = int(np.round(ndim / float(num_walkers)) * num_walkers * 2)\n self.warn(\n \"Found {} parameters, increasing number of MCMC walkers to {}\".format(\n ndim, num_walkers\n )\n )\n x0 = np.array(x0)[None, :] * (1 + 1e-4 * np.random.randn(num_walkers, len(x0)))\n\n if brute_force or (r_prior is not None and ndim == 1):\n self.log(\"Computing brute-force r profile likelihood\", \"info\")\n likefile = self.get_filename(\n save_name, ext=\".txt\", map_tag=map_tag, extra_tag=file_tag, bp_opts=True\n )\n rs = np.linspace(0, 3, 500)\n likes = np.zeros_like(rs)\n for idx, r in enumerate(rs):\n like = log_like(r=r)\n if idx % 20 == 0:\n self.log(\"r = {:.3f}, loglike = {:.2f}\".format(r, like), \"debug\")\n likes[idx] = like\n header = \"{} r likelihood\\nColumns: r, loglike\".format(\n \"Multi-map\" if map_tag is None else \"Map {}\".format(map_tag)\n )\n np.savetxt(likefile, np.column_stack((rs, likes)), header=header)\n\n if not mcmc:\n return [rs, likes]\n\n # run chains!\n import emcee\n\n # setup sampler output file\n filename = self.get_filename(\n save_name, ext=\".h5\", map_tag=map_tag, extra_tag=file_tag, bp_opts=True\n )\n backend_exists = os.path.exists(filename)\n backend = emcee.backends.HDFBackend(filename)\n if backend_exists and backend.shape != (num_walkers, ndim):\n self.warn(\n \"Expected backend of shape ({}, {}), found {}. Resetting\".format(\n num_walkers, ndim, backend.shape\n )\n )\n reset_backend = True\n if reset_backend:\n backend.reset(num_walkers, ndim)\n\n # initialize sampler\n self.log(\"Initializing sampler\", \"info\")\n sampler = emcee.EnsembleSampler(num_walkers, ndim, log_prob, backend=backend)\n if not reset_backend and backend_exists:\n # grab the last sample if appending to an existing run\n x0 = sampler.run_mcmc(None, 1)\n\n # track autocorrelation time\n old_tau = np.inf\n converged = False\n\n self.log(\n \"Starting {} iterations with {} parameters\".format(num_steps, ndim), \"info\"\n )\n for sample in sampler.sample(x0, iterations=num_steps):\n if not sampler.iteration % 10:\n self.log(\"MCMC iteration {}\".format(sampler.iteration), \"debug\")\n # check convergence every 100 steps\n if sampler.iteration % 100:\n continue\n\n # compute autocorrelation time\n tau = sampler.get_autocorr_time(tol=0)\n\n # check convergence\n converged = np.all(tau / converge_criteria < sampler.iteration)\n converged &= np.all(np.abs(old_tau - tau) / tau < converge_criteria)\n self.log(\n \"MCMC iteration {} autocorr time: mean {:.1f} min {:.1f} max {:.1f}\".format(\n sampler.iteration, np.mean(tau), np.min(tau), np.max(tau)\n ),\n \"info\",\n )\n if converged:\n break\n old_tau = tau\n\n out.update(converged=converged, num_steps=sampler.iteration)\n\n # converged posterior distribution\n if converged:\n self.log(\n \"MCMC converged in {} iterations\".format(sampler.iteration), \"info\"\n )\n tau = sampler.get_autocorr_time()\n burnin = int(2 * np.max(tau))\n thin = int(0.5 * np.min(tau))\n samples = sampler.get_chain(discard=burnin, thin=thin, flat=True)\n out.update(tau=tau, burnin=burnin, thin=thin, samples=samples)\n else:\n self.warn(\"MCMC not converged in {} iterations\".format(num_steps))\n\n if res_prior is not None:\n self.bin_def = bin_def_orig\n self.nbins_res = nbins_res_orig\n\n # save and return\n return self.save_data(\n save_name, map_tag=map_tag, extra_tag=file_tag, bp_opts=True, **out\n )", "def __logprob__(self, cv, vsense):\n return 1.0 / (1.0 + np.exp(-np.dot(cv, vsense)))", "def generate_log(nexp, clpeaks, timepeaks, remap, gap=0):\n # Select the index of the experiment\n peakini = 0\n i = 0\n while i < nexp:\n exp = timepeaks[i]\n peakini += exp.shape[0]\n i += 1\n\n exp = timepeaks[nexp]\n peakend = peakini + exp.shape[0]\n\n # Build the sequence string\n peakstr = []\n peakset = []\n\n for i in range(peakini, peakend):\n peakset.append(voc[remap[clpeaks[i][0] - 1]])\n if i < peakend - 1 and gap != 0:\n if (timepeaks[nexp][i - peakini + 1] - timepeaks[nexp][i - peakini]) > gap:\n peakstr.append(peakset)\n peakset = []\n\n return peakstr", "def log_likelihood(self, state, obs, act):\n indices = np.array([self.Gittins[state['successes'][i], state['failures'][i]] for i in range(self.env.n_arms)])\n greedy_arms = np.where(np.isclose(indices,indices.max()))[0]\n return np.log(1/len(greedy_arms)) if act in greedy_arms else -1e8", "def fitness_function(genome, debug=False):\n cp = genome.chromosome\n\n # calculate some information for easier scoring\n ScoringInfo = namedtuple(\"ScoringInfo\", \"pitch duration measure beat vi hi_next hi_prev voice_dir\")\n melody_info = list()\n beat = 4\n measure = 0\n for i in range(1, len(cp)):\n hi_next = abs(cp[i][0] - cp[i + 1][0]) if i != len(cp) - 1 else -1 # next horizontal interval\n hi_prev = abs(cp[i][0] - cp[i - 1][0]) if i != 1 else -1 # previous horizontal interval\n vi = abs(cp[i][0] - cantus_firmus[measure]) # vertical interval\n # voice movement direction\n voice_dir = 0 if i == len(cp) - 1 or cp[i + 1][0] == cp[i][0] else copysign(1, cp[i + 1][0] - cp[i][0])\n melody_info.append(ScoringInfo(cp[i][0], cp[i][1], measure, beat, vi, hi_next, hi_prev, voice_dir))\n beat += cp[i][1]\n measure += beat / 8\n beat %= 8\n\n if debug:\n print \"MELODY INFO: \", melody_info\n\n hscores = list()\n vscores = list()\n # hscore 1: 8th notes must move in step\n amount_of_8th = 0\n amount_of_missteps = 0\n for note in melody_info:\n if note.duration == 1:\n amount_of_8th += 1\n if note.hi_next > 1:\n amount_of_missteps += 1\n if note.hi_prev > 1:\n amount_of_missteps += 1\n hscores.append(float(amount_of_missteps) / (amount_of_8th * 2))\n if debug:\n print \"HSCORE 1: 8TH - \", amount_of_8th, \", MISSTEPS - \", amount_of_missteps\n\n # hscore 2: one climax, that can be repeated only after neighboring tone\n # hscore 3: Climax should be on the strong beat\n highest_note = max([note.pitch for note in melody_info])\n climax_count = 0\n climax_on_weak_beat_count = 0\n for i, note in enumerate(melody_info):\n if note.pitch == highest_note:\n climax_count += 1\n if note.beat not in [0, 4]:\n climax_on_weak_beat_count += 1\n if i < len(melody_info) - 2 and note.pitch == melody_info[i + 2].pitch: # If next note is\n if note.hi_next == 1 and melody_info[i + 2].hi_prev == 1: # neighboring tone\n if note.vi in CONSONANCES and melody_info[i + 2].vi in CONSONANCES: # And surrounding notes are consonant\n climax_count -= 1 # we can allow 2nd climax\n if melody_info[i + 2].beat not in [0, 4]:\n climax_on_weak_beat_count -= 1 # And 2nd climax may be on weak beat\n\n hscores.append(float(climax_count - 1) / len(melody_info))\n hscores.append(float(climax_on_weak_beat_count) / climax_count)\n\n if debug:\n print \"HSCORE 2+3: CLIMAX CNT - \", climax_count, \", WEAK CLIMAX CNT - \", climax_on_weak_beat_count\n\n # hscore 4: Horizontal intervals are consonant\n unconsonant_amount = len(filter(lambda x: x.hi_next not in CONSONANCES + [1], melody_info[:-1]))\n hscores.append(float(unconsonant_amount) / (len(melody_info) - 1))\n\n if debug:\n print \"HSCORE 4: UNCONSANANT AMOUNT - \", unconsonant_amount\n\n # hscore 5: Stepwise movement should predominate\n leaps_count = len(filter(lambda x: x.hi_next != 1, melody_info[:-1]))\n sections = round(float(len(cantus_firmus)) / 16)\n if leaps_count < (2 * sections):\n hscores.append(float(leaps_count) / (len(melody_info) - 1 - 4 * sections))\n elif leaps_count > (4 * sections):\n hscores.append(float(leaps_count) / (len(melody_info) - 1 - 4 * sections))\n else:\n hscores.append(0.0)\n\n if debug:\n print \"HSCORE 5: LEAPS - \", leaps_count, \"SECTIONS - \", sections\n\n # hscore 6: After large leap - stepwise motion\n large_leaps_count = 0\n large_leaps_not_followed_count = 0\n for i, note in enumerate(melody_info[:-1]):\n if note.hi_next >= 3:\n large_leaps_count += 1\n if melody_info[i + 1].hi_next != 1:\n large_leaps_not_followed_count += 1\n hscores.append(float(large_leaps_not_followed_count) / large_leaps_count if large_leaps_count != 0 else 0.0)\n\n if debug:\n print \"HSCORE 6: LL CNT - \", large_leaps_count, \"LL NOT FOLLOWED CNT - \", large_leaps_not_followed_count\n\n # hscore 7: change direction after each large leap\n large_leaps_not_changedir_count = 0\n for i, note in enumerate(melody_info[:-1]):\n if note.hi_next >= 3 and note.voice_dir != -melody_info[i + 1].voice_dir:\n large_leaps_not_changedir_count += 1\n hscores.append(float(large_leaps_not_changedir_count) / large_leaps_count if large_leaps_count != 0 else 0.0)\n\n if debug:\n print \"HSCORE 7: LL NOT CHNGDIR CNT - \", large_leaps_not_changedir_count\n\n # hscore 8: climax should be melodically consonant with tonic\n hscores.append(1.0 if highest_note - 4 in CONSONANCES else 0.0)\n\n # hscore 9: no more than 2 consecutive leaps\n conseq_leaps = 0\n punish_score = 0\n for note in melody_info:\n conseq_leaps += 1\n if note.hi_next in [0, 1]:\n conseq_leaps = 0\n if conseq_leaps > 3:\n punish_score += 1\n hscores.append(float(punish_score) / (len(melody_info) - 3))\n\n if debug:\n print \"HSCORE 9: CONSEQ LEAPS PUNISH SCORE - \", punish_score\n\n # hscore 10: no more than 2 large leaps per section\n if large_leaps_count > 2 * sections:\n hscores.append(float(large_leaps_count - 2 * sections) / (len(melody_info) - 1 - 2 * sections))\n else:\n hscores.append(0.0)\n\n # hscore 11: not too long stepwise in same direction\n longest_stepwise_seq = 0\n current_stepwise_seq = 0\n prev_dir = 0\n num_changes = 0\n motion_vector = list()\n for note in melody_info:\n if note.hi_next <= 1:\n if note.voice_dir in [prev_dir, 0]:\n current_stepwise_seq += 1\n longest_stepwise_seq = max(longest_stepwise_seq, current_stepwise_seq)\n else:\n prev_dir = note.voice_dir\n current_stepwise_seq = 0\n num_changes += 1\n motion_vector.append(note.pitch)\n else:\n if note.voice_dir != prev_dir and note.voice_dir != 0:\n prev_dir = note.voice_dir\n num_changes += 1\n motion_vector.append(note.pitch)\n current_stepwise_seq = 0\n motion_vector.append(cp[-1][0])\n if longest_stepwise_seq < 5:\n longest_stepwise_seq = 0\n hscores.append(float(longest_stepwise_seq) / len(cp))\n\n if debug:\n print \"HSCORE 11: LONGEST STEPWISE SEQUENCE - \", longest_stepwise_seq\n\n # hscore 12: direction needs to change several times\n if num_changes < 3 * sections:\n hscores.append(1 - float(num_changes) / (3 * sections))\n else:\n hscores.append(0.0)\n\n # hscore 13: ending note is tonic\n hscores.append(0)\n\n # hscore 14: penultimate note is leading tone\n hscores.append(0)\n\n # hscore 15: the start of a motion is consonant with the end of a motion\n unconsotant_count = 0\n big_leaps_count = 0\n for i in range(1, len(motion_vector) - 1):\n if abs(motion_vector[i] - motion_vector[i + 1]) not in CONSONANCES:\n unconsotant_count += 1\n if abs(motion_vector[i] - motion_vector[i + 1]) > 6:\n big_leaps_count += 1\n hscores.append(float(unconsotant_count) / len(motion_vector))\n\n if debug:\n print \"HSCORE 15: UNCONSONANT MOTIONS - \", unconsotant_count\n\n # hscore 16: Large motion intervals (>6 tones) should be avoided\n hscores.append(float(big_leaps_count) / len(motion_vector))\n\n if debug:\n print \"HSCORE 16: LARGE MOTIONS - \", big_leaps_count\n\n # hscore 17: No frequent repetition of the same note\n rep_count = 0\n for note in melody_info:\n if note.hi_next == 0:\n rep_count += 1\n if rep_count > 2 * sections:\n rep_count -= 2 * sections\n else:\n rep_count = 0\n hscores.append(float(rep_count) / (len(cp) - 2 * sections))\n\n if debug:\n print \"HSCORE 17: REPETITIONS COUNT - \", rep_count\n\n # hscore 18: no repetition of sequence within a 4 measure interval\n repeated = set()\n for i in range(len(melody_info) - 2):\n j = i + 1\n while melody_info[j].measure < melody_info[i].measure + 4 and j < len(melody_info) - 1:\n if melody_info[i].pitch == melody_info[j].pitch:\n k = 1\n while j + k < len(melody_info) and melody_info[j + k].pitch == melody_info[i + k].pitch:\n if k == 1:\n repeated.add(j)\n repeated.add(j + k)\n k += 1\n j += 1\n\n hscores.append(float(len(repeated)) / len(cp))\n\n if debug:\n print \"HSCORE 18: REPEATED POSITIONS - \", repeated\n\n # hscore 19: largest allowed interval is octave\n more_than_ocatave_amount = len(filter(lambda x: x.hi_next > 7, melody_info[:-1]))\n hscores.append(float(more_than_ocatave_amount) / len(cp))\n\n if debug:\n print \"HSCORE 19: MORE THAN OCTAVES - \", more_than_ocatave_amount\n\n # vscore 1: whole notes should be consonant (ensured by generation and hscore 13)\n vscores.append(0.0)\n\n # vscores 2 and 3: halves and quarters should be consonant on first beat.\n # or can be dissonant on other beats beat, if passing tone\n amount_of_notes = 0\n amount_of_wrong_notes = 0\n for i, note in enumerate(melody_info):\n if note.duration >= 2:\n amount_of_notes += 1\n if note.beat == 0 and note.vi not in CONSONANCES:\n amount_of_wrong_notes += 1\n if note.beat != 0 and note.vi not in CONSONANCES:\n amount_of_wrong_notes += 1\n if note.hi_prev == 1 and note.hi_next == 1 and note.voice_dir == melody_info[i - 1].voice_dir:\n if melody_info[i - 1].vi in CONSONANCES and melody_info[i + 1].vi in CONSONANCES:\n amount_of_wrong_notes -= 1\n\n vscores.append(float(amount_of_wrong_notes) / amount_of_notes)\n vscores.append(float(amount_of_wrong_notes) / amount_of_notes)\n\n if debug:\n print \"VSCORE 2+3: NOTES > THAN 8TH - \", amount_of_notes, \",DISSONANT ONES - \", amount_of_wrong_notes\n\n # vscore 4: one of eight notes from pair should be consonant\n amount_of_wrong_notes = 0\n for i, note in enumerate(melody_info[:-1]):\n if note.duration == 1 and melody_info[i + 1].duration == 1:\n if note.vi not in CONSONANCES and melody_info[i + 1].vi not in CONSONANCES:\n amount_of_wrong_notes += 1\n beat += cp[i][1]\n vscores.append(float(amount_of_wrong_notes) / amount_of_8th)\n\n if debug:\n print \"VSCORE 4: 8TH NOTES - \", amount_of_8th, \",DISSONANT ONES - \", amount_of_wrong_notes\n\n # vscore 5: unisons ok if on 1st beat through suspension or if tied over (ensured by storing format)\n # else: if followed by step\n\n wrong_unsiones = len(filter(lambda x: x.vi == 0 and x.hi_next != 1, melody_info))\n vscores.append(float(wrong_unsiones) / len(cp))\n\n if debug:\n print \"VSCORE 5: WRONG UNISONES - \", wrong_unsiones\n\n # vscore 6: max allowed interval between voices is 10th, except for climax\n big_vert_intervals = len(filter(lambda x: x.vi > 9 and x.pitch != highest_note, melody_info))\n vscores.append(float(big_vert_intervals) / len(melody_info))\n\n if debug:\n print \"VSCORE 6: VERT INTERVALS > 10TH - \", big_vert_intervals\n\n\n # vscore 7: There should be no crossing (ensured by generation)\n vscores.append(0.0)\n\n # vscore 8: avoid the overlapping of parts (ensured by generation)\n vscores.append(0.0)\n\n # vscore 9: no leaps from unison to octave and vice versa\n uni_to_oct_count = 0\n for i, note in enumerate(melody_info[:-1]):\n if note.vi == 7 and melody_info[i + 1].vi == 0:\n uni_to_oct_count += 1\n if note.vi == 0 and melody_info[i + 1].vi == 7:\n uni_to_oct_count += 1\n\n vscores.append(float(uni_to_oct_count) / len(melody_info))\n\n if debug:\n print \"VSCORE 9: UNISON-OCTAVE LEAPS - \", uni_to_oct_count\n\n # vscore 10: The ending is unison or octave (ensured by generation)\n vscores.append(0.0)\n\n # vscore 11: all perfect intervals (also perfect fourth) should be approached by contrary or oblique motion\n bad_perfect_intervals_count = 0\n battuda = 0\n for i in range(1, len(melody_info)):\n if melody_info[i].beat == 0 and melody_info[i].vi in PERFECT_INTERVALS:\n bad_perfect_intervals_count += 1\n prev_cf = cantus_firmus[melody_info[i - 1].measure]\n prev_cp = melody_info[i - 1].pitch\n cur_cf = cantus_firmus[melody_info[i].measure]\n cur_cp = melody_info[i].pitch\n if prev_cp == cur_cp and prev_cf != cur_cf: # oblique\n bad_perfect_intervals_count -= 1\n if prev_cp > cur_cp and prev_cf <= cur_cf: # contrary\n bad_perfect_intervals_count -= 1\n if melody_info[i].vi in [4, 7]:\n battuda += 1\n if prev_cp < cur_cp and prev_cf >= cur_cf: # contrary\n bad_perfect_intervals_count -= 1\n if melody_info[i].vi in [4, 7]:\n battuda += 1\n\n vscores.append(float(bad_perfect_intervals_count) / len(cantus_firmus))\n\n if debug:\n print \"VSCORE 11: PERF INTERVALS APPROACHED BADLY - \", bad_perfect_intervals_count\n\n # vscore 12: avoid simultaneous leaps in cf and cp, especially large leaps in same direction\n leaps_count = 0\n large_leaps_count = 0\n for i in range(len(melody_info) - 1):\n if melody_info[i + 1].beat == 0:\n leap_cp = melody_info[i + 1].pitch - melody_info[i].pitch\n leap_cf = cantus_firmus[melody_info[i + 1].measure] - cantus_firmus[melody_info[i].measure]\n if abs(leap_cf) > 1 and abs(leap_cp) > 1:\n leaps_count += 1\n if leap_cf > 6 and leap_cp > 6:\n large_leaps_count += 1\n if leap_cf < 6 and leap_cp < 6:\n large_leaps_count += 1\n vscores.append(float(leaps_count + large_leaps_count) / (len(cantus_firmus) * 2))\n\n if debug:\n print \"VSCORE 12: SIM LEAPS - \", leaps_count, \", LARGE SIM LEAPS - \", large_leaps_count\n\n # vscore 13: use all types of motion\n similar = 0\n contrary = 0\n oblique = 0\n parallel = 0\n for i in range(1, len(melody_info)):\n if melody_info[i].beat == 0:\n prev_cf = cantus_firmus[melody_info[i - 1].measure]\n prev_cp = melody_info[i - 1].pitch\n cur_cf = cantus_firmus[melody_info[i].measure]\n cur_cp = melody_info[i].pitch\n if prev_cp == cur_cp:\n if prev_cf != cur_cf:\n oblique = 1\n else:\n similar = 1\n if prev_cp > cur_cp:\n if prev_cf <= cur_cf:\n contrary = 1\n else:\n parallel = 1\n if prev_cp < cur_cp:\n if prev_cf >= cur_cf:\n contrary = 1\n else:\n parallel = 1\n types_of_motion = similar + oblique + contrary + parallel\n vscores.append(1 - float(types_of_motion) / 4)\n if debug:\n print \"VSCORE 13: MOTION TYPES (SOCP) - \", similar, oblique, contrary, parallel\n\n # vscore 14: climax of the CF and CP should not coincide\n cf_highest_note = max(cantus_firmus)\n coincide = 0\n for note in melody_info:\n if note.pitch == highest_note and cantus_firmus[note.measure] == cf_highest_note:\n coincide = 1\n\n vscores.append(coincide)\n if debug:\n print \"VSCORE 14: COINCIDE - \", coincide\n\n # vscore 15: Successive unisons, octaves and fifths on first beats are only valid\n # when separated by three quarter notes.\n bad_intervals_count = 0\n\n for i in range(len(melody_info) - 1):\n if melody_info[i].beat == 0 and melody_info[i].measure != len(cantus_firmus) - 1:\n if melody_info[i].vi in [0, 4, 7]:\n separated = True\n j = 1\n while melody_info[i + j].measure == melody_info[i].measure:\n if melody_info[i + j].duration > 2:\n separated = False\n j += 1\n if melody_info[i + j].vi in [0, 4, 7] and not separated:\n bad_intervals_count += 1\n vscores.append(float(bad_intervals_count) / (len(cantus_firmus) - 1))\n if debug:\n print \"VSCORE 15: BAD INTERVALS - \", bad_intervals_count\n\n # vscore 16: successive unisons, octaves and fifths not on first beats:\n # valid when separated by at least 2 notes, otherwise not.\n # Unless it is a consonant suspension of quarter note: ok for afterbeat fifths and octaves\n # separated only by a single quearter.\n # ***what a complex rule, we don't care about consonant suspension due to storing format >_<***\n bad_intervals_count = 0\n\n for i in range(len(melody_info) - 2):\n separated = True\n if melody_info[i].beat != 0:\n if melody_info[i].vi in [0, 4, 7]:\n if melody_info[i + 1].vi in [0, 4, 7]:\n separated = False\n if separated:\n if melody_info[i + 2].vi in [0, 4, 7]:\n separated = False\n if not separated:\n bad_intervals_count += 1\n\n vscores.append(float(bad_intervals_count) / (len(melody_info) - len(cantus_firmus)))\n\n if debug:\n print \"VSCORE 16: BAD INTERVALS - \", bad_intervals_count\n\n # vscore 17: no ottava or quinta battuda, whatever it means\n vscores.append(float(battuda) / len(cantus_firmus))\n\n if debug:\n print \"VSCORE 17: BATTUDAS - \", battuda\n\n # vscore 18: best ending: dissonant suspension into the leading tone (ensured by generation)\n vscores.append(0)\n\n # vscore 19: Thirds, sixths and tenths should predominate.\n good_interval_count = len(filter(lambda x: x.vi in [2, 5, 9], melody_info))\n if good_interval_count * 2 > len(melody_info):\n vscores.append(0.0)\n else:\n vscores.append(1.0 - float(2 * good_interval_count) / len(melody_info))\n\n if debug:\n print \"VSCORE 19: 3RDS 6THS 10THS - \", good_interval_count\n\n genome.fitness = sum([x * y for x, y in zip(hscores, HSCORE_WEIGHTS)]) + \\\n sum([x * y for x, y in zip(vscores, VSCORE_WEIGHTS)])\n if debug:\n print \"HSCORES: \", hscores\n print \"VSCORES: \", vscores\n print \"FINAL SCORE: \", genome.fitness\n print \"FINAL SCORE UNSCALED: \", sum(hscores) + sum(vscores)\n return genome.fitness", "def _compute_likelihood(self, mus, pmfs):\n expected_counts = pmfs.copy()\n for mu, _p_bin_source in zip(mus, expected_counts):\n _p_bin_source *= mu # Works because of numpy view magic...\n expected_total = np.sum(expected_counts, axis=0)\n\n observed_counts = self.data_events_per_bin.histogram\n\n ret = observed_counts * np.log(expected_total) - expected_total - gammaln(observed_counts + 1.).real\n return np.sum(ret)", "def compute_movie_rating_likelihood(M):\n\n # define the size to begin with\n likelihood = np.zeros((M, M))\n\n # -------------------------------------------------------------------------\n # YOUR CODE GOES HERE FOR PART (c)\n #\n # Remember to normalize the likelihood, so that each column is a\n # probability distribution.\n \n for i in range(M):\n for j in range(M):\n if i == j:\n likelihood[i][j] = 2\n else:\n likelihood[i][j] = 1/abs(j-i)\n \n likelihood = likelihood / likelihood.sum(axis = 1)\n \n #\n # END OF YOUR CODE FOR PART (c)\n # -------------------------------------------------------------------------\n\n return likelihood", "def translate_sequence(sequence, genetic_code = {'GUC': 'V', 'ACC': 'T', 'GUA': 'V', 'GUG': 'V', 'ACU': 'T', 'AAC': 'N', 'CCU': 'P', 'UGG': 'W', 'AGC': 'S', 'AUC': 'I', 'CAU': 'H', 'AAU': 'N', 'AGU': 'S', 'GUU': 'V', 'CAC': 'H', 'ACG': 'T', 'CCG': 'P', 'CCA': 'P', 'ACA': 'T', 'CCC': 'P', 'UGU': 'C', 'GGU': 'G', 'UCU': 'S', 'GCG': 'A', 'UGC': 'C', 'CAG': 'Q', 'GAU': 'D', 'UAU': 'Y', 'CGG': 'R', 'UCG': 'S', 'AGG': 'R', 'GGG': 'G', 'UCC': 'S', 'UCA': 'S', 'UAA': '*', 'GGA': 'G', 'UAC': 'Y', 'GAC': 'D', 'UAG': '*', 'AUA': 'I', 'GCA': 'A', 'CUU': 'L', 'GGC': 'G', 'AUG': 'M', 'CUG': 'L', 'GAG': 'E', 'CUC': 'L', 'AGA': 'R', 'CUA': 'L', 'GCC': 'A', 'AAA': 'K', 'AAG': 'K', 'CAA': 'Q', 'UUU': 'F', 'CGU': 'R', 'CGC': 'R', 'CGA': 'R', 'GCU': 'A', 'GAA': 'E', 'AUU': 'I', 'UUG': 'L', 'UUA': 'L', 'UGA': '*', 'UUC': 'F'}, start_pos = 0):\n #find first orf\n #first_orf_seq = find_first_orf(sequence)\n\n # ensure sequence is uppercase\n seq = sequence.upper()\n\n #translate the sequence\n protein = \"\"\n for i in range(0, len(seq) - (len(seq) % 3), 3):\n codon = seq[i:i + 3]\n if genetic_code[codon] == \"*\":\n break\n protein += genetic_code[codon]\n return protein", "def outputGenerationProbability(self):\n self.b = zeros((self.noOfEmmittingStates, self.T))\n for row in range(self.noOfEmmittingStates):\n for col in range(self.T):\n self.b[row, col] = self.gaussianDist(self.observationSequence[0, col],\n self.outputProbabilities[row, 0],\n self.outputProbabilities[row, 1])", "def boost_probability_for(fixation):\n probabilities = np.zeros(Number_of_locs) #MOD Number_of_locs deleted\n for possible_target_location in xrange(Number_of_locs): #MOD Number_of_locs deleted\n Lib_c.set_target(possible_target_location)\n probabilities[possible_target_location] = integrate.quad(\n Lib_c.function,\n -np.inf, np.inf,\n epsabs=0,\n limit=50,\n full_output=1\n )[0]\n return np.sum(Post_probs * probabilities) #MOD Post_probs deleted", "def _compute_log_likelihood(self, parameters):\n raise NotImplementedError('')", "def get_ngramlogprobs_fromcorpus(tokenizedseqs, n):\n return", "def design_guide_and_donor_for_genes(input_design_table_filename, input_annot_gff_filename, input_genome_fasta_filename,\n PAM_seq = 'GA', excluded_seqs = ['TTTT'], BOWTIE_exe = '',\n off_targets_min_mismatch = 10,\n min_azimuth_score = 0.5,\n sort_by = '5to3', \n min_dist_cut_to_donor_edge = 30,\n filter_out_donors_containing_excluded_seqs = False,\n max_guide_pos_frac_in_gene = 1.1,\n output_guides_df_filename = '', output_donors_df_filename = ''):\n \n ##################\n # processing input\n ##################\n\n # fixing the mapping command to be the default bowtie\n if not BOWTIE_exe or len(BOWTIE_exe) < 3:\n BOWTIE_exe = '/Users/eilon/software/bowtie2-2.2.8/bowtie2'\n \n # defining the bowtie mapping cmd\n bowtie_cmd = shlex.split(BOWTIE_exe + ' -x ' + os.path.splitext(input_genome_fasta_filename)[0] + ' -U - -f -D 20 -R 3 -N 1 -L 10 -i S,1,0.50 --gbar 3 --end-to-end -k 30 --no-head -t --rdg 10,6 --rfg 10,6')\n mapping_cmd = bowtie_cmd\n\n \n # loading design matrix \n # fields: gene_id\tguide_num\tdonor_mut_type\tnum_donor_variants\tmut_pos_in_guide\tdonor_length\tdonor_seq_offsets\n\n design_df = pd.read_table(input_design_table_filename, sep='\\t', na_values = \"\")\n design_df['donor_seq_offsets'] = design_df['donor_seq_offsets'].apply(ast.literal_eval)\n design_df['donor_seq_offsets'] = design_df['donor_seq_offsets'].apply(np.array,dtype=np.float)\n \n design_df['gene_id'] = design_df['gene_id'].str.strip()\n design_df['donor_mut_type'] = design_df['donor_mut_type'].str.strip()\n \n \n # loading gene gff matrix\n print \"loading genes gff file: \" + input_annot_gff_filename\n genes_gff_df = sgd_gff2dataframe(input_annot_gff_filename, ['CDS'])\n \n # loading genome fasta file\n print \"loading genome fasta file: \" + input_genome_fasta_filename\n genome_seq = SeqIO.to_dict(SeqIO.parse(open(input_genome_fasta_filename),'fasta', alphabet=generic_dna))\n \n\n \n # init output dataframes\n out_guide_df = pd.DataFrame(data=None)\n out_guide_donor_df = pd.DataFrame(data=None)\n \n ####################\n # running gene by gene such that the donor sequences will use the same guides\n # if differnet number of guides are specified for each donor desing the guides will be selected from best to worst\n ####################\n \n # grouping by gene\n design_grouped = design_df.groupby('gene_id')\n \n\n # iterating over the genes\n for cur_gene_id, cur_gene_df in design_grouped:\n\n print \"Designing guides for gene:\" + cur_gene_id\n \n # the current gene object\n cur_gene = CRISPR_Gene(cur_gene_id, genes_gff_df, genome_seq)\n \n # if differnet number of guides are specified for each donor desing the guides will be selected from best to worst\n cur_gene_max_guide_num = max(cur_gene_df['guide_num'])\n \n # if even one row requires the guide to be entirely in the CDS - find only guides that are entirely in the CDS\n #cur_require_entire_guide_to_be_in_CDS = np.any(~ np.isnan(cur_gene_df['mut_pos_in_guide'].values))\n # np.any(cur_gene_df['require_entire_guide_in_CDS'])\n #\n cur_min_mut_pos_in_guide = min(int(-cur_gene.CRISPR_CUT_INDEX - 1*(cur_gene.is_neg_strand())),np.nanmin(cur_gene_df['mut_pos_in_guide'].values))\n cur_max_mut_pos_in_guide = max(2,np.nanmax(cur_gene_df['mut_pos_in_guide'].values))\n \n \n # max half region length to look for excluded sequences (should be at least ~20 to test the guide)\n \n\n cur_max_donor_seq_offset = np.nanmax(np.abs(cur_gene_df['donor_seq_offsets'].apply(np.nanmax).values))\n if np.isnan(cur_max_donor_seq_offset):\n cur_max_donor_seq_offset = 0\n \n \n \n cur_max_seq_len_around_cut = int( np.ceil(max(cur_gene_df['donor_length']) / 2) + \\\n cur_max_donor_seq_offset)\n \n #print \"Extracting all guides\"\n \n # get all guides (computing filter guides that contain excluded sequences in the sequences around them)\n cur_all_gene_guides_df = cur_gene.get_all_guides_that_cut_in_cds(pam = PAM_seq, seq_len_around_cut = cur_max_seq_len_around_cut, \n min_mut_pos_in_guide = cur_min_mut_pos_in_guide,\n max_mut_pos_in_guide = cur_max_mut_pos_in_guide,\n excluded_seqs = excluded_seqs, mapping_cmd = mapping_cmd,\n sort_by = sort_by)\n \n #DEBUG\n #print \"before get_K_best_guide_ids\" #DEBUG\n #print sum(cur_all_gene_guides_df['guide_id']== 'YAL001C_pS_33') #DEBUG\n \n #print cur_all_gene_guides_df[ cur_all_gene_guides_df['guide_id']== 'YAL001C_pS_33' ]\n #print cur_all_gene_guides_df[ cur_all_gene_guides_df['guide_id']== 'YAL001C_pS_20' ]\n #print cur_gene_max_guide_num\n #print off_targets_min_mismatch\n #print min_azimuth_score\n #print max_guide_pos_frac_in_gene\n\n # select top 'best' guides\n selected_guide_ids = cur_gene.get_K_best_guide_ids(guides_df = cur_all_gene_guides_df, K = cur_gene_max_guide_num, \n off_targets_min_mismatch = off_targets_min_mismatch,\n min_azimuth_score = min_azimuth_score,\n max_guide_pos_frac_in_gene = max_guide_pos_frac_in_gene,\n sort_by = sort_by)\n #print \"after get_K_best_guide_ids\" #DEBUG\n #for gi in selected_guide_ids:\n # print gi\n #print ('YAL001C_pS_33' in list(selected_guide_ids) ) \n #print 'XXX-------------------' #DEBUG\n \n # selected guides (each guide should have at least one donor designed)\n cur_selected_guide_df = cur_all_gene_guides_df[ cur_all_gene_guides_df['guide_id'].isin( list(selected_guide_ids) ) ]\n \n print \"--- Designing donor sequences for gene:\" + cur_gene_id + \", for \" + str(len(selected_guide_ids)) + \" guides (out of \" + str(cur_gene_max_guide_num) + \" requested)\"\n #print cur_selected_guide_df[ cur_selected_guide_df['guide_id'] == 'YAL001C_pS_33' ]\n \n # concating with the output dataframe\n out_guide_df = pd.concat([out_guide_df,cur_selected_guide_df],ignore_index=True)\n\n \n # desinging the donor sequences\n for idx,row in cur_gene_df.iterrows():\n \n if len(selected_guide_ids) < int(row['guide_num']):\n warnings.warn(\"There are NOT enough guides in --- %s --- for the design. There are %d guides and the design if for %d\" % (str(row[\"gene_id\"]),len(selected_guide_ids), row['guide_num'] ))\n \n if len(selected_guide_ids) <= 0:\n continue\n \n cur_selected_guide_ids = selected_guide_ids.iloc[range( min(int(row['guide_num']),len(selected_guide_ids)) )]\n \n \n # do reverse complement for donor sequences\n if 'do_revcomp_donor' in cur_gene_df.columns:\n do_revcomp_donor = bool(row['do_revcomp_donor']==True)\n else:\n do_revcomp_donor = False\n \n \n \n \n # do reverse complement for donor sequences\n if 'do_scramble_guide_and_donor' in cur_gene_df.columns:\n scramble_guide_and_donor = bool(row['do_scramble_guide_and_donor']==True)\n else:\n scramble_guide_and_donor = False\n \n #print \"do_revcomp_donor %d\" % (do_revcomp_donor)\n #print \"scramble_guide_and_donor %d\" % (scramble_guide_and_donor)\n \n cur_all_gene_guides_df_fordonors = cur_all_gene_guides_df\n \n # permuting the guides and adding it to the guide df\n if scramble_guide_and_donor:\n print 'Scramble donor guides...'\n cur_all_gene_guides_df_scramble = cur_all_gene_guides_df[ cur_all_gene_guides_df['guide_id'].isin(cur_selected_guide_ids) ].copy()\n # scrambling the guides and updating their ids\n cur_all_gene_guides_df_scramble['guide_id'] = cur_all_gene_guides_df_scramble['guide_id'] + '_scramble'\n scramble_func = lambda x: ''.join(random.sample(str(x),len(str(x))))\n cur_all_gene_guides_df_scramble['guide'] = cur_all_gene_guides_df_scramble['guide'].apply(scramble_func)\n cur_all_gene_guides_df_scramble['guide_noPAM'] = cur_all_gene_guides_df_scramble['guide_noPAM'].apply(scramble_func)\n cur_all_gene_guides_df_scramble['guide_PAM_p7'] = cur_all_gene_guides_df_scramble['guide_PAM_p7'].apply(scramble_func)\n cur_all_gene_guides_df_scramble['guide_PAM_m4p3'] = cur_all_gene_guides_df_scramble['guide_PAM_m4p3'].apply(scramble_func)\n # adding the scrambeles guides to the guides table\n out_guide_df = pd.concat([out_guide_df,cur_all_gene_guides_df_scramble],ignore_index=True)\n\n \n # for donors design\n cur_all_gene_guides_df_fordonors = cur_all_gene_guides_df_scramble\n cur_selected_guide_ids = cur_selected_guide_ids + '_scramble'\n \n \n cur_gene_donor_df = cur_gene.get_donor_mut_for_guides(cur_all_gene_guides_df_fordonors, cur_selected_guide_ids, \n donor_mut_type=str(row['donor_mut_type']), \n num_donor_variants = int(row['num_donor_variants']), \n mut_pos_in_guide = row['mut_pos_in_guide'], \n donor_length=int(row['donor_length']), \n donor_seq_offsets = row['donor_seq_offsets'],\n set_name = row['set_name'],\n min_dist_cut_to_donor_edge = min_dist_cut_to_donor_edge,\n excluded_seqs = excluded_seqs,\n do_revcomp_donor = do_revcomp_donor,\n scramble_guide_and_donor = scramble_guide_and_donor)\n \n out_guide_donor_df = pd.concat([out_guide_donor_df,cur_gene_donor_df],ignore_index=True)\n\n #print \"---------------------------- Finished designing guide for the above gene\"\n\n\n # adding for each guide tis location in the gene (now added in )\n #out_guide_df[\"guide_cut_gene_pos_frac\"] = out_guide_df[\"guide_cut_gene_nt_pos\"] / out_guide_df[\"CDS_len_nts\"]\n\n # filtering out donor sequences with excluded sequences\n if filter_out_donors_containing_excluded_seqs:\n if out_guide_donor_df.shape[1] > 1: # if not null df\n out_guide_donor_df = out_guide_donor_df[ (~ out_guide_donor_df[\"contain_excluded_sequences\"]).values ]\n \n \n if len(output_guides_df_filename) > 3:\n print \"saving guides to: \" + output_guides_df_filename\n out_guide_df.to_csv(output_guides_df_filename, sep='\\t', index = False)\n \n \n \n if len(output_donors_df_filename) > 3:\n print \"saving donor sequences to: \" + output_donors_df_filename\n out_guide_donor_df.to_csv(output_donors_df_filename, sep='\\t', index = False)\n \n \n return( (out_guide_df, out_guide_donor_df) )", "def generate_rbpdb_experimental_to_pwm(letter_strength, n_repeat_req):\n rbpdb_experiment_file_path = (\n \"./website/data/RBPDB_v1.3.1_experiments_human_2012-11-21.tdt\"\n )\n rbpdb_pfm_file_directory = \"./website/data/rbpdb-human-pfm-matrices/\"\n experimental_to_pwm_dict = {}\n with open(rbpdb_experiment_file_path) as handle:\n line = handle.readline()\n while line:\n columns = line.split(\"\\t\")\n # Here we expect the columns to be:\n # experimental_id, PUBMED_ID, exp_type, notes, seq_motif,\n # selex_file, aligned_selex_file,\n # aligned_motif_file, PWM_file, PFM_file, logo_file,\n # secondary_structure, in_vivo_notes, in_vivo_file, flag\n if columns[14] == \"1\":\n # The flag means this data is unreliable, according to the RBPDB\n # Readme files\n line = handle.readline()\n continue\n\n experimental_id = columns[0]\n\n assert len(experimental_id) > 0\n pfm_file = columns[9]\n seq_motifs = columns[4]\n pwms = []\n if pfm_file != \"\\\\N\":\n pfm_file_path = rbpdb_pfm_file_directory + pfm_file\n with open(pfm_file_path) as pfm_file_handle:\n raw_pwm_str = pfm_file_handle.read()\n pwm = str_to_pwm(raw_pwm_str, is_transpose=True)\n pwms += [pwm]\n elif seq_motifs not in (\"\\\\N\", \"\"):\n # This experiment still generated some useful data\n seq_motifs = seq_motifs.split(\";\")\n i = 0\n while i != len(seq_motifs):\n seq_motif = seq_motifs[i]\n while \")(\" in seq_motif:\n repeat_end = seq_motif.find(\")(\")\n assert seq_motif[repeat_end] == \")\"\n repeat_start = repeat_end\n while seq_motif[repeat_start] != \"(\":\n repeat_start -= 1\n\n number_start = repeat_end + 2\n assert (\n seq_motif[number_start].isdigit()\n or seq_motif[number_start] == \"n\"\n )\n number_end = number_start\n while seq_motif[number_end] != \")\":\n number_end += 1\n\n # deal with cases where the number of repeats is\n # \"15-30\". Take minimum to be conservative.\n # Note that most cases would be a single number like\n # \"15\".\n num_of_repeats = (\n min(\n [\n int(s) if s != \"n\" else\n math.ceil(\n n_repeat_req\n / (repeat_end - repeat_start - 1)\n )\n for s in\n seq_motif[number_start: number_end]\n .split(\"-\")\n ]\n )\n )\n\n seq_motif = (\n seq_motif.replace(\n seq_motif[repeat_start: number_end + 1],\n seq_motif[repeat_start + 1: repeat_end]\n * num_of_repeats\n )\n )\n\n\n maketrans = str.maketrans\n all_letters = 'wruysn'\n upper_map = maketrans(all_letters, all_letters.upper())\n seq_motif = seq_motif.translate(upper_map)\n if \"/\" in seq_motif:\n bracket_start = bracket_end = middle = (\n seq_motif.find(\"/\")\n )\n while seq_motif[bracket_start] != \"(\":\n bracket_start -= 1\n while seq_motif[bracket_end] != \")\":\n bracket_end += 1\n seq_motif_1 = (\n seq_motif.replace(\n seq_motif[bracket_start: bracket_end + 1],\n seq_motif[bracket_start + 1: middle]\n )\n )\n seq_motif_2 = (\n seq_motif.replace(\n seq_motif[bracket_start: bracket_end + 1],\n seq_motif[middle + 1: bracket_end]\n )\n )\n seq_motifs += [seq_motif_1, seq_motif_2]\n else:\n pwm = motif_to_pwm(\n seq_motif, letter_strength=letter_strength\n )\n pwms += [pwm]\n i += 1\n\n # Now we have the raw text, we convert it to pwm and add to\n # dictionary\n experimental_to_pwm_dict[experimental_id] = pwms\n line = handle.readline()\n\n return experimental_to_pwm_dict", "def get_ngramlogprobs(freqdict):\n return", "def model_likelihood(\n self, obs: Tensor, actions: Tensor, next_obs: Tensor\n ) -> Tensor:\n return self.model.log_prob(obs, actions, next_obs)", "def _logp(self, trace, **inputs):\n def calc_log(step):\n exp_pred = np.dot(inputs['gwas_gen'],\n step['beta_med'].T).ravel()\n phen_pred = step['alpha'] * exp_pred\n phen_prob = norm.logpdf(x=inputs['gwas_phen'],\n loc=phen_pred,\n scale=step['phenotype_sigma'])\n return phen_prob\n\n phen_probs = [calc_log(trace[idx])\n for idx in np.random.randint(0, len(self.trace), 500)]\n phen_probs = np.asmatrix(phen_probs)\n mc_logp = phen_probs.sum(axis=1).mean()\n return mc_logp", "def calc_prob_prior(iterations, lam):\n return list(map(lambda x: math.exp(-lam * x), range(iterations)))", "def evaluate_ngrams(eval_dataset, trigram_counts, bigram_counts, unigram_counts, train_token_count, lambda1, lambda2):\n perplexity = 0\n\n ### YOUR CODE HERE\n def calc_prob(sentense, i, word, trigram_counts, bigram_counts, unigram_counts, train_token_count, model):\n prob = 0.0\n prev_word = sentense[i - 1]\n prev_to_prev_word = sentense[i - 2]\n\n if model == \"unigram\":\n if word in unigram_counts:\n prob = (unigram_counts[word] + 0.0) / train_token_count\n else:\n prob = (unigram_counts[word_to_num['UUUNKKK']] + 0.0) / \\\n train_token_count\n\n if model == \"bigram\":\n if (prev_word, word) in bigram_counts:\n prob = (bigram_counts[(prev_word, word)] + 0.0) / \\\n unigram_counts[prev_word]\n # print(num_to_word[prev_word] ,num_to_word[word])\n # print(bigram_counts[(prev_word, word)])\n # print(unigram_counts[prev_word])\n # print(\"---------------------------\")\n else:\n prob = 0.0\n\n if model == \"trigram\":\n if (prev_to_prev_word, prev_word, word) in trigram_counts:\n prob = (trigram_counts[(prev_to_prev_word, prev_word, word)] + 0.0) \\\n / bigram_counts[(prev_to_prev_word, prev_word)]\n # / bigram_counts[(prev_word, word)] #this according to lecture notes slide 27\n else:\n prob = 0.0\n\n return prob\n\n l = 0\n num_of_words = 0\n\n ##########3\n better_than_chance = 0\n ###########\n\n for sentense in eval_dataset:\n for i, word in enumerate(sentense[2:]):\n num_of_words += 1\n prob = lambda1 * calc_prob(sentense, i + 2, word, trigram_counts, bigram_counts, unigram_counts,\n train_token_count, \"trigram\") + \\\n lambda2 * calc_prob(sentense, i + 2, word, trigram_counts, bigram_counts, unigram_counts,\n train_token_count, \"bigram\") + \\\n (1 - lambda1 - lambda2) * calc_prob(sentense, i + 2, word, trigram_counts, bigram_counts,\n unigram_counts, train_token_count, \"unigram\")\n ######################################\n if prob > (1.0 / vocabsize):\n better_than_chance += 1\n #########################\n l += np.log2(prob)\n l /= num_of_words\n perplexity = 2 ** -l\n\n print(\"better_than_chance:\", (better_than_chance + 0.0) / num_of_words)\n\n ### END YOUR CODE\n return perplexity", "def compute_AttentionLRP(self, cell_state, relevance_a, output_states):\r\n\r\n\r\n\t\t#Reconstructing the concatenated encoder states\r\n\t\tmax_encoding_len = len(output_states)\r\n\t\tu = np.zeros(output_states[0].shape)\r\n\t\tfor i in range(max_encoding_len):\r\n\t\t\tu += cell_state[\"alpha\"][i, 0] * output_states[i]\r\n\t\ta = np.matmul(self.attentonLayer, np.concatenate([cell_state[\"lstm\"]['last_h'], u]))\r\n\r\n\t\t# LRP for the attention layer\r\n\t\tinp_a_rel = layerLRP(np.concatenate([cell_state[\"lstm\"]['last_h'], u]), self.attentonLayer, np.zeros((self.attentionSize, 1)), a, relevance_a)\r\n\r\n\t\th_relevance= inp_a_rel[:self.cellSize]\r\n\t\tu_relevance = inp_a_rel[self.cellSize:]\r\n\r\n\t\tforward_encoder_relevance = []\r\n\t\tbackward_decoder_relevance = []\r\n\r\n\r\n\t\tinput_lrp_vector = np.concatenate([cell_state[\"alpha\"][i, 0] * output_states[i] for i in range(max_encoding_len)])\r\n\t\tinput_lrp_matrix = np.concatenate([np.identity(2*self.cellSize) for i in range(max_encoding_len)], 1)\r\n\t\t#for i in range(max_encoding_len):\r\n\t\t\t#inp_c_rel = layerLRP(cell_state[\"alpha\"][i, 0] * output_states[i], np.identity(2*self.cellSize), np.zeros((2*self.cellSize, 1)), u, u_relevance, epsilon = 0.001, delta=1.0)\r\n\t\t\t#forward_encoder_relevance.append(inp_c_rel[:self.cellSize])\r\n\t\t\t#backward_decoder_relevance.append(inp_c_rel[self.cellSize:])\r\n\t\tinp_c_rel = layerLRP(input_lrp_vector, input_lrp_matrix, np.zeros((2*self.cellSize, 1)), u, u_relevance)\r\n\t\tfor i in range(max_encoding_len):\r\n\t\t\tforward_encoder_relevance.append(inp_c_rel[2*i*self.cellSize:(2*i+1)*self.cellSize])\r\n\t\t\tbackward_decoder_relevance.append(inp_c_rel[(2*i+1)*self.cellSize:(2*(i+1))*self.cellSize])\r\n\r\n\t\treturn h_relevance, forward_encoder_relevance, backward_decoder_relevance", "def detectionProb(efficiency, trigger=1):\n if trigger > 1.:\n raise ValueError('Trigger > 1 not implemented yet\\n')\n q = 1.0 - np.asarray(efficiency)\n\n # probability of 0 detections\n logq = np.log(q)\n logpiq = logq.sum()\n piq = np.exp(logpiq)\n\n return 1.0 - piq", "def log_likelihood(data, probs):\n # Assume data is given as counts\n return _np.sum([nlogp(n, p) for n, p in zip(data, probs) if n > 0])", "def updateResidueProbAnnotation(residueProb):\n\n for resonance in residueProb.resonanceGroup.resonances:\n updateResonanceAnnotation(resonance)", "def figure_of_merit_from(self, parameter_list):\r\n return self.log_likelihood_from(parameter_list=parameter_list)", "def compute_log_prob(self,params: ndarray) -> float:\n return self.compute_log_prior(params) + self.compute_log_likelihood(params)", "def prob_to_llre(meth_prob):\n return math.log((meth_prob + EPSLONG) / (1 - meth_prob + EPSLONG))", "def compute_log_probability_of_text(text, char_to_ix, frequency_statistics, transition_matrix):\n t = text\n cix = char_to_ix\n fr = frequency_statistics\n tm = transition_matrix\n \n i0 = cix[t[0]]\n p = np.log(fr[i0])\n i = 0\n while i < len(t)-1:\n i1 = cix[t[i+1]]\n p += np.log(tm[i0, i1])\n i0 = i1\n i += 1\n \n return p", "def main(rand,mu,lamb,cxpb,mutpb,ngen,param):\n \n random.seed(rand)\n NGEN = ngen\n MU = mu\n LAMBDA = lamb\n CXPB = cxpb\n MUTPB = mutpb\n \n # Used for printing the results. It is the parameter that is changed one run from another\n if param==\"rand\" or param==\"optimal\":\n list_results=[rand]\n elif param==\"mu\":\n list_results=[mu]\n elif param==\"lamb\":\n list_results=[lamb]\n elif param==\"cross\":\n list_results=[cxpb]\n elif param==\"mutate\":\n list_results=[mutpb]\n elif param==\"ngen\":\n list_results=[ngen]\n elif param==\"original\":\n list_results=[0]\n \n # Initialization of the objects for the GA\n pop = toolbox.population(n=MU)\n hof = tools.ParetoFront()\n stats = tools.Statistics(lambda ind: ind.fitness.values)\n stats.register(\"avg\", np.mean, axis=0)\n stats.register(\"std\", np.std, axis=0)\n stats.register(\"min\", np.min, axis=0)\n stats.register(\"max\", np.max, axis=0)\n\n # Run of the GA\n p,logbook=algorithms.eaMuPlusLambda(pop, toolbox, MU, LAMBDA, CXPB, MUTPB, NGEN, stats,\n halloffame=hof,verbose=0)\n with open(results_path+param+'_logbook.csv', 'a',newline='') as f:\n w = csv.DictWriter(f, logbook[0].keys())\n w.writeheader()\n for el in logbook:\n w.writerow(el)\n w.writerow({})\n \n # Takes the max fitness of the population from all of the runs\n max_fit=0\n max_gen=0\n for elt in logbook:\n if elt['max'][0]>max_fit:\n max_fit=elt['max'][0]\n max_gen=elt['gen']\n list_results.append(max_fit)\n list_results.append(max_gen)\n \n #TODO\n# for ind in hof:\n# dist = numpy.linalg.norm(a-b)\n\n print (\"{0} {1} {2} {3}\".format(round(list_results[1],3),round(list_results[2],3),round(list_results[0],3),hof[0]))\n current_out_writer.writerow([list_results[0],list_results[1],list_results[2],hof[0]])\n \n return pop, stats, hof", "def probability(N_dr, L_opmin, L_opmax, L_min, L_max, L_d):\n opening_nomullignas = []\n opening_withmullignas = []\n sum_nomulligans = 0\n sum_withmulligans = 0\n mulligan_coeff = 0\n\n for i in range(L_opmin, min(L_opmax + 1, 8)): # first make a list of tuples of the form:\n # (number_of_lands_in_opening_hand, probability_of_drawing_such_a_hand)\n a = hypergeom(i, 7, 60, L_d)\n opening_nomullignas.append((i, a))\n mulligan_coeff = mulligan_coeff + a # this will be used later for calculating the probability of\n # taking the mulligan and is used as a coefficient before the mulligan sum\n for (x, y) in opening_nomullignas: # use the list of tuples to calculate the first part of equation 5\n partial_nomulligans = 0\n for j in range(L_min - x, L_max - x + 1):\n partial_nomulligans = partial_nomulligans + hypergeom(j, N_dr, 53, L_d - x)\n sum_nomulligans = sum_nomulligans + partial_nomulligans * y\n\n mulligan_coeff = 1 - mulligan_coeff # probability of mulliganing\n for i in range(L_opmin, min(L_opmax + 1, 7)): # doing the same thing as before, but drawing 6 instead of 7 cards\n a = hypergeom(i, 6, 60, L_d)\n opening_withmullignas.append((i, a))\n\n for (x, y) in opening_withmullignas:\n partial_withmulligans = 0\n for j in range(L_min - x, L_max - x + 1):\n partial_withmulligans = partial_withmulligans + hypergeom(j, N_dr, 54, L_d - x)\n sum_withmulligans = sum_withmulligans + partial_withmulligans * y\n total_withmulligans = mulligan_coeff * sum_withmulligans\n\n return total_withmulligans + sum_nomulligans", "def prob4():\n #get Omega, F, and initialize\n Omega = np.array([[-3/2,3/4],[0,1],[0,1/2],[0,1]])\n f = lambda x: np.exp(np.sum(-x**2/2,axis=0))/(2*np.pi)**(2)\n means, cov = np.zeros(4), np.eye(4)\n truth = scipy.stats.mvn.mvnun(list(Omega[:,0]),list(Omega[:,1]), means, cov)[0]\n domain = np.logspace(1,5,20)\n approxs = []\n error = []\n for N in domain:\n #calculate approx for various sizes of samples\n approx = mc_integrate(f,Omega[:,0],Omega[:,1],N)\n approxs.append(approx)\n #calculate relative err.\n error.append(np.abs((truth-approx)/truth))\n #PLOT it all\n plt.title(\"Error vs Sample Size\")\n plt.plot(domain,1/np.sqrt(domain),label = \"1/sqrt(N)\")\n plt.plot(domain,error,label = \"Error\")\n plt.loglog()\n plt.xlabel(\"N\")\n plt.ylabel(\"Relative Error\")\n plt.legend()\n plt.show()", "def gene_heritability(\n input_snp_filename: \"Data Input, use the SNPs file from dataParse\",\n output_genes_filename: 'output file for gene-level results, use .csv',\n output_summary_filename: 'output file for the genomewide results summary, use .csv',\n logger_filename: 'file for the logger, use a txt',\n sweeps: \"number of samples for each chain\" = 1000,\n burnin: \"number of burnin samples\" = 1000,\n n_chains: \"number of chains of the sampler\" = 4,\n n_cores: \"number of parallel cores to use\" = 4,\n N_1kG: \"number of SNPs onwhich the LD-score is calculated\" = 1290028,\n chromosome: \"chromosome on which the analysis is run\" = \"all\",\n snp_thr: \"threshold for the minimum number of SNPs in a gene\" = 10,\n sep: \"separator for the input files, use t for tab separated (not \\t)\" = \",\",\n model: 'specify the model for the regression, one betwenn normal/gamma' = 'normal',\n fix_intercept = False,\n ):\n\n # Initialisation of the logger\n output_logger = log.setup_logger(\"output_logger\", logger_filename)\n log.initialise_log(output_logger,\n 'gene level regression, model: %s' %model,\n [input_snp_filename],\n [output_genes_filename,output_summary_filename],\n sweeps,\n burnin,\n chromosome = str(chromosome),\n other_params_diz = {'chains': n_chains, 'cores': n_cores, 'SNP threshold': snp_thr})\n\n # Initialisation function, it reads the summary stats file, filters the SNPs,\n # creates the output files\n\n logging.info(\"Start Analysis\")\n\n snps = s.Snps()\n # read table\n logging.info(\"Reading SNP file: %s,\\n\\t with %s delimiter\"%(input_snp_filename, sep))\n snps.read_table(input_snp_filename, separator=sep)\n # generate chi squared stats\n snps.generate_stats()\n # update the summary stats\n snps.update_summary()\n output_logger.info(\" Sample size \" + str(snps.n_patients) + \"\\n\")\n\n\n\n snps.apply_filter_table(s.baghera_filter)\n snps.update_summary()\n output_logger.info(\"After baghera init filter.\\n\\t Number of SNPs: %s\\n\\t Number of genes: %s\\n\" \\\n %(str(snps.n_snps), str(snps.n_genes)) )\n\n # Non coding SNPs are assigned to a dummy gene, such that the regression is done on the entire SNPs' set\n snps.rename_non_annotated(name='NonCoding')\n\n if chromosome != \"all\":\n snps.apply_filter_table(snps.cut_single_chrom, **{'chromosome': chromosome})\n output_logger.info(\n \"Analysis restricted to chr %s\" %str(chromosome) )\n\n snps.update_summary()\n output_logger.info(\"Analysis. Number of SNPs: %s\\n, Number of genes: %s\\n\" \\\n %(str(snps.n_snps), str(snps.n_genes)) )\n\n # Creates the genes table with the number of SNPs for each gene and the basic stats values\n genes=g.Genes()\n genes.initialise_genes(snps.table.copy(), snps_thr=snp_thr)\n\n output_logger.info(\"Output gene table initialised:\\nNumber of genes: %s\\n\" \\\n %(str(genes.n_genes)) )\n\n snps.set_non_annotated(genes.cut_genes, 'NonCoding')\n\n if model == 'gamma':\n result = gr.analyse_gamma(snps, output_summary_filename, output_logger,\n sweeps, burnin, n_chains, n_cores, N_1kG, fix_intercept,\n )\n else:\n result = gr.analyse_normal(snps, output_summary_filename, output_logger,\n sweeps, burnin, n_chains, n_cores, N_1kG, fix_intercept,\n )\n\n logging.info(\"Saving genes table\")\n genes.table = genes.table.merge(\n result, left_index=False, left_on=\"name\", right_on=\"name\")\n\n k = genes.table.n_snps / float(N_1kG)\n genes.table[\"h2g\"] = genes.table.bg_mean.astype(\"float\") * k\n\n genes.table = genes.table.sort_values(by=[\"P\", \"bg_median\"])\n\n genes.save_table(output_genes_filename)\n\n non_coding = genes.table[genes.table.name == \"NonCoding\"]\n h2g_tot = np.sum(genes.table[\"h2g\"].values) - non_coding[\"h2g\"].values\n\n output_logger.info(\" Non coding heritability : \" +\n str(non_coding[\"h2g\"].values) + \"\\n\")\n output_logger.info(\" Coding heritability : \" + str(h2g_tot) + \"\\n\")", "def plot_likelihood(expected_posts_per_month, probability):\n bar_amount = max(10, int(5 * expected_posts_per_month * probability)) # at least 10 bars, not too long of a tail\n print(\"Generating likelihood plot\")\n distribution = [binom.pmf(option, expected_posts_per_month, probability) for option in range(bar_amount)]\n plt.bar(range(bar_amount), distribution)\n plt.xlabel(\"occurrences\")\n plt.ylabel(\"likelihood\")\n plt.title(\"Likelihood of word occurences next month\")\n plt.show()", "def annotate_ISM(data_df, REFERENCE, position_list, reference_genbank_name=\"data/covid-19-genbank.gb\"):\n seq_list = data_df['sequence'].values.tolist()\n \n seq_index = []\n index = 0\n for base in REFERENCE[1]:\n if base == '-':\n seq_index.append(index)\n else:\n index += 1\n seq_index.append(index)\n reference_local_index_map = np.array(seq_index)\n mapped_reference_index = []\n for index, entropy in position_list:\n mapped_reference_index.append((index, reference_local_index_map[index], entropy))\n REFERENCE_ISM = ''.join([REFERENCE[1][item[0]] for item in position_list])\n logging.info('Reference ISM: {}.'.format(REFERENCE_ISM))\n \n gene_dict = load_gene_dict(reference_genbank_name)\n reference_raw = REFERENCE[1].replace('-', '')\n res = OrderedDict()\n res['Ref position'] = []\n res['Entropy'] = []\n res['Gene'] = []\n res['Is silent'] = []\n res['AA position'] = []\n for align_index, ref_index, entropy in mapped_reference_index:\n codon, codon_idx, name, codon_pos = find_SNP(ref_index, gene_dict, reference_raw)\n base_freq = Counter([item[align_index] for item in seq_list]).most_common()\n for alt_base, count in base_freq:\n if alt_base != reference_raw[ref_index-1]:\n break\n if codon is None:\n if_silence = True\n else:\n alt_codon = list(codon)\n alt_codon[codon_idx] = alt_base\n alt_codon = ''.join(alt_codon)\n ref_aa = translate(codon)\n ism_aa = translate(alt_codon)\n if ref_aa == ism_aa:\n if_silence = True\n else:\n if_silence = False\n res['Ref position'].append(ref_index)\n res['Entropy'].append(entropy)\n if name is None:\n name = 'Non-coding'\n res['Gene'].append(name)\n res['Is silent'].append(if_silence)\n if codon_pos is None:\n res['AA position'].append('NaN')\n else:\n res['AA position'].append('{}{}{}'.format(ref_aa, codon_pos, ism_aa))\n annotation_df = pd.DataFrame.from_dict(res)\n return annotation_df", "def gw_heritability(\n input_snp_filename: \"Data Input, use the SNPs file from dataParse\",\n output_summary_filename: 'output file for the genomewide results summary, use .csv',\n logger_filename: 'file for the logger, use a txt',\n sweeps: \"number of samples for each chain\" = 1000,\n burnin: \"number of burnin samples\" = 1000,\n n_chains: \"number of chains of the sampler\" = 4,\n n_cores: \"number of parallel cores to use\" = 4,\n N_1kG: \"number of SNPs onwhich the LD-score is calculates\" = 1290028,\n chromosome: \"chromosome on which the analysis is run\" = \"all\",\n sep: \"separator for the input files, use t for tab separated (not \\t)\" = \",\",\n model: 'regression model'='normal',\n fix_intercept = False,\n ):\n\n # Initialisation of the logger\n output_logger = log.setup_logger(\"output_logger\", logger_filename)\n log.initialise_log(output_logger,\n 'genome-wide regression, model: %s' %model,\n [input_snp_filename],\n [output_summary_filename],\n sweeps,\n burnin,\n chromosome = str(chromosome),\n other_params_diz = {'chains': n_chains, 'cores': n_cores})\n\n # Initialisation function, it reads the summary stats file, filters the SNPs,\n # creates the output files\n\n logging.info(\"Start Analysis\")\n\n snps = s.Snps()\n # read table\n snps.read_table(input_snp_filename, separator=sep)\n # generate chi squared stats\n snps.generate_stats()\n # update the summary stats\n snps.update_summary()\n output_logger.info(\" Sample size \" + str(snps.n_patients) + \"\\n\")\n\n\n snps.apply_filter_table(s.baghera_filter)\n snps.update_summary()\n output_logger.info(\"After baghera init filter.\\nNumber of SNPs: %s\\nNumber of genes: %s\\n\" \\\n %(str(snps.n_snps), str(snps.n_genes)) )\n\n # Non coding SNPs are assigned to a dummy gene, such that the regression is done on the entire SNPs' set\n snps.rename_non_annotated(name='NonCoding')\n\n if chromosome != \"all\":\n snps.apply_filter_table(snps.cut_single_chrom, **{'chromosome': chromosome})\n output_logger.info(\n \"Analysis restricted to chr %s\" %str(chromosome) )\n\n snps.update_summary()\n output_logger.info(\"Analysis. Number of SNPs: %s\\n, Number of genes: %s\\n\" \\\n %(str(snps.n_snps), str(snps.n_genes)) )\n\n\n if model =='normal':\n [intercept, slope] = heritability.gw_normal(snps, output_summary_filename, output_logger,\n sweeps, burnin, n_chains, n_cores, N_1kG, fix_intercept)\n elif model=='gamma':\n [intercept, slope] = heritability.gw_normal(snps, output_summary_filename, output_logger,\n sweeps, burnin, n_chains, n_cores, N_1kG, fix_intercept)\n else:\n logging.info('Normal model by default')\n [intercept, slope] = heritability.gw_normal(snps, output_summary_filename, output_logger,\n sweeps, burnin, n_chains, n_cores, N_1kG, fix_intercept)\n logging.info(\"Analysis complete\")", "def log_likelihood_grad_rew(self, data, reward_model, bias_params):", "def gene_finder(dna, threshold):\n\n # YOUR IMPLEMENTATION HERE", "def process_observations(message, agent):\n if not message:\n print(\"Message is empty\");\n # return None\n else:\n # # Check if joint values are in the expected order and size.\n if message.joint_names != agent['joint_order']:\n # Check that the message is of same size as the expected message.\n if len(message.joint_names) != len(agent['joint_order']):\n raise MSG_INVALID_JOINT_NAMES_DIFFER\n\n # Check that all the expected joint values are present in a message.\n if not all(map(lambda x,y: x in y, message.joint_names,\n raise MSG_INVALID_JOINT_NAMES_DIFFER\n print(\"Joints differ\")\n return np.array(message.actual.positions) # + message.actual.velocities\n\ndef get_jacobians(state, scara_chain, jac_solver):\n \"\"\"\n Produce a Jacobian from the urdf that maps from joint angles to x, y, z.\n This makes a 6x6 matrix from 6 joint angles to x, y, z and 3 angles.\n The angles are roll, pitch, and yaw (not Euler angles) and are not needed.\n Returns a repackaged Jacobian that is 3x6.\n \"\"\"\n # Initialize a Jacobian for scara_chain.getNrOfJoints() joint angles by 3 cartesian coords and 3 orientation angles\n jacobian = Jacobian(scara_chain.getNrOfJoints())\n # Initialize a joint array for the present self.scara_chain.getNrOfJoints() joint angles.\n angles = JntArray(scara_chain.getNrOfJoints())\n # Construct the joint array from the most recent joint angles.\n for i in range(scara_chain.getNrOfJoints()):\n angles[i] = state[i]\n # Update the jacobian by solving for the given angles.observation_callback\n jac_solver.JntToJac(angles, jacobian)\n # Initialize a numpy array to store the Jacobian.\n J = np.array([[jacobian[i, j] for j in range(jacobian.columns())] for i in range(jacobian.rows())])\n # Only want the cartesian position, not Roll, Pitch, Yaw (RPY) Angles\n ee_jacobians = J\n return ee_jacobians", "def compute_probabilities():\n global total_spam_words, total_ham_words\n total_words = total_spam_words+total_ham_words\n unique_words = len(all_dict)\n print(\"Training Set Description: \")\n len_ham = len(ham_file_list)\n len_spam = len(spam_file_list)\n print(\"SPAM EMAILS: \",len_spam)\n print(\"HAM EMAILS: \",len_ham)\n print(\"Total words: \",total_words)\n print(\"Training...\")\n \n spam_probability = math.log((len_spam)/(len_spam+len_ham))\n ham_probability = math.log((len_ham)/(len_spam+len_ham))\n \n \n \n output_file = open(\"nbmodel.txt\", \"w+\", encoding=\"latin-1\")\n output_file.write(\"model_params \"+str(spam_probability)+\" \"+str(ham_probability)+\"\\n\")\n \n nbmodel = {}\n nbmodel[\"model_params\"] = (spam_probability,ham_probability)\n for word in all_dict.keys():\n spam_count = 1\n if word in spam_dict:\n spam_count+= spam_dict[word]\n \n word_spam_probability = math.log(spam_count / (total_spam_words+unique_words))\n \n ham_count = 1\n if word in ham_dict:\n ham_count+= ham_dict[word]\n \n word_ham_probability = math.log(ham_count / (total_ham_words+unique_words))\n \n output_file.write(word+\" \"+str(word_spam_probability)+\" \"+str(word_ham_probability)+\"\\n\")\n nbmodel[word] = (word_spam_probability, word_ham_probability) \n \n print(\"nbmodel.txt generated successfully...\")\n print(\"SPAM Probability: \",spam_probability)\n print(\"HAM Probability: \",ham_probability)\n output_file.close()", "def compute_probability_for(fixation):\n probabilities = np.zeros(Number_of_locs) #MOD Number_of_locs deleted\n for possible_target_location in xrange(Number_of_locs): #MOD Number_of_locs deleted\n probabilities[possible_target_location] = integrate.quad(\n integral_function,\n -np.inf, np.inf,\n args=(possible_target_location,Dprime_map[fixation]),\n epsabs=0,\n limit=100,\n full_output=1\n )[0] #MOD Dprime_map deleted\n return np.sum(Post_probs * probabilities) #MOD Post_probs deleted", "def prob(self, sequence):\n prob = 1\n for event, context in self.extract_ngrams(sequence):\n prob *= self.cond_prob(event, context)\n return prob", "def loglikelihood(self):\n raise NotImplementedError(\"To be implemented\")", "def giveMotevoParamFile(genome, wmlen, inter_dir, tag, aligned, ufemodel_path, ATfreq, GCfreq, emprior, bgorder, bgprior):\n\n ##UFE_models from genome_dict are not used anymore\n #UFEmodel_hg19 is UFE model for mammal species\n genome_dict = {}\n genome_dict['hg19'] = ['((((hg19:0.032973,rheMac2:0.057695):0.09821,mm9:0.352605):0.020666,(bosTau6:0.186713,(equCab2:0.107726,canFam2:0.150374):0.010431):0.032764):0.156024,monDom5:0.425899);', '/import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/MotEvo_v1.0/UFEmodels/UFEmodel_hg19']\n genome_dict['hg18'] = ['((((hg18:0.032973,rheMac2:0.057695):0.09821,mm9:0.352605):0.020666,(bosTau3:0.186713,(equCab1:0.107726,canFam2:0.150374):0.010431):0.032764):0.156024,monDom4:0.425899);', '/import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/MotEvo_v1.0/UFEmodels/UFE_mammals']\n #genome_dict['dm3'] = ['((((((dm3:0.059,droSim1:0.075):0.041,(droYak2:0.104,droEre2:0.107):0.054):0.120,droAna3:0.377):0.072,dp4:0.397):0.061,droWil1:0.536):0.020,((droVir3:0.196,droMoj3:0.255):0.073,droGri2:0.291):0.337);', '/import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/MotEvo_v1.0/UFEmodels/UFEmodel_dm3']\n genome_dict['dm3'] = ['((((((dm3:0.059,droSim1:0.075):0.041,(droYak2:0.104,droEre2:0.107):0.054):0.120,droAna3:0.377):0.072,dp4:0.397):0.061,droWil1:0.536):0.020,((droVir3:0.196,droMoj3:0.255):0.073,droGri2:0.291):0.337);', '/import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/MotEvo_v1.0/UFEmodels/dm3UFEparallel/UFEmodel_dm3']\n genome_dict['mm9'] = ['((((hg19:0.032973,rheMac2:0.057695):0.09821,mm9:0.352605):0.020666,(bosTau7:0.186713,(equCab2:0.107726,canFam2:0.150374):0.010431):0.032764):0.156024,monDom5:0.425899);', '/import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/MotEvo_v1.0/UFEmodels/UFEmodel_mm9']\n\n\n sitefilepath = os.path.join(inter_dir, 'sites_' + tag)\n priorfilepath = os.path.join(inter_dir, 'priors_' + tag)\n loglikfile = os.path.join(inter_dir, 'loglik_' + tag)\n\n\n print '\\nCreate motevo parameter file %s' %tag\n print 'aligned', aligned\n if aligned:\n motevo_params = '\\n'.join(['refspecies %s' %genome,\n 'TREE %s' %genome_dict[genome][0],\n 'Mode TFBS',\n 'EMprior %s' %emprior,\n 'priordiff %s' %0.05,\n 'UFEwmprior %s' %200,\n 'UFEwmfile %s' %ufemodel_path,\n 'UFEwmlen %s' %wmlen,\n 'UFEprint %s' %0,\n 'markovorderBG %s' %bgorder,\n 'bgprior %s' %bgprior,\n 'bg A %s' %ATfreq,\n 'bg T %s' %ATfreq,\n 'bg G %s' %GCfreq,\n 'bg C %s' %GCfreq,\n 'restrictparses %s' %0,\n 'sitefile %s' %sitefilepath,\n 'priorfile %s' %priorfilepath,\n 'printsiteals %s' %0,\n 'minposterior %f' %0.0,\n 'loglikfile %s' %loglikfile])\n else:\n motevo_params = '\\n'.join(['refspecies %s' %genome,\n 'TREE (%s: 1)' %genome,\n 'Mode TFBS',\n 'EMprior %s' %emprior,\n 'priordiff %s' %0.05,\n 'markovorderBG %s' %bgorder,\n 'bgprior %s' %bgprior,\n 'bg A %s' %ATfreq,\n 'bg T %s' %ATfreq,\n 'bg G %s' %GCfreq,\n 'bg C %s' %GCfreq,\n 'restrictparses %s' %0,\n 'sitefile %s' %sitefilepath,\n 'priorfile %s' %priorfilepath,\n 'printsiteals %s' %0,\n 'minposterior %f' %0.0,\n 'loglikfile %s' %loglikfile]) \n\n params_path = os.path.join(inter_dir, 'motevo_TFBS_params_' + tag)\n pf = open(params_path, 'w')\n pf.write(motevo_params)\n return (params_path, sitefilepath, priorfilepath, loglikfile)", "def log_prob_of_file(filepath, model):\n vocab = set(counts_un.keys())\n tot = 0\n count = 0\n prev_prev = \"<s>\\n\"\n prev = \"<s>\\n\"\n with open(filepath) as f:\n for line in f:\n count += 2\n line = line.strip()+\"\\n\"\n tri_prob = model.get_trigram_prob(prev_prev, prev, line)\n tot += math.log(tri_prob)\n prev_prev = prev\n prev = line \n for line in [\"</s>\\n\", \"</s>\\n\"]:\n tri_prob = model.get_trigram_prob(prev_prev, prev, line)\n tot += math.log(tri_prob)\n prev_prev = prev\n prev = line \n return tot, count", "def __compute_log_likelihood(self, outputs, data, boolean):\n end_loc, pi_loc, mu_loc, sigma_loc, rho_loc = outputs\n\n mu1_loc, mu2_loc = mu_loc[:, :, :20], mu_loc[:, :, 20:]\n sig1_loc, sig2_loc = (\n sigma_loc[:, :, :20] + 10e-10,\n sigma_loc[:, :, 20:] + 10e-10,\n )\n\n x1_loc = data[:, 1:, 1].unsqueeze(2).to(self.__device)\n x2_loc = data[:, 1:, 2].unsqueeze(2).to(self.__device)\n x3_loc = data[:, 1:, 0].to(self.__device)\n\n end_loc = end_loc[:, :-1, -1].to(self.__device)\n pi_loc = pi_loc[:, :-1, :].to(self.__device)\n mu1_loc = mu1_loc[:, :-1, :].to(self.__device)\n mu2_loc = mu2_loc[:, :-1, :].to(self.__device)\n sig1_loc = sig1_loc[:, :-1, :].to(self.__device)\n sig2_loc = sig2_loc[:, :-1, :].to(self.__device)\n rho_loc = rho_loc[:, :-1, :].to(self.__device)\n\n boolean = boolean[:, :-1].to(self.__device)\n\n gauss = pi_loc * self.__bivariate_gaussian(\n sig1_loc, sig2_loc, mu1_loc, mu2_loc, x1_loc, x2_loc, rho_loc\n )\n gauss = torch.sum(gauss, dim=2).to(self.__device)\n\n log_lik = torch.sum(\n -boolean * torch.log(gauss + 10e-10)\n - boolean * torch.log(end_loc + 10e-10) * (x3_loc)\n - boolean * torch.log(1 - end_loc + 10e-10) * (1 - x3_loc)\n )\n\n return log_lik", "def __compute_log_likelihood(self, outputs, data, boolean):\n end_loc, pi_loc, mu_loc, sigma_loc, rho_loc = outputs\n\n mu1_loc, mu2_loc = mu_loc[:, :, :20], mu_loc[:, :, 20:]\n sig1_loc, sig2_loc = (\n sigma_loc[:, :, :20] + 10e-10,\n sigma_loc[:, :, 20:] + 10e-10,\n )\n\n x1_loc = data[:, 1:, 1].unsqueeze(2).to(self.__device)\n x2_loc = data[:, 1:, 2].unsqueeze(2).to(self.__device)\n x3_loc = data[:, 1:, 0].to(self.__device)\n\n end_loc = end_loc[:, :-1, -1].to(self.__device)\n pi_loc = pi_loc[:, :-1, :].to(self.__device)\n mu1_loc = mu1_loc[:, :-1, :].to(self.__device)\n mu2_loc = mu2_loc[:, :-1, :].to(self.__device)\n sig1_loc = sig1_loc[:, :-1, :].to(self.__device)\n sig2_loc = sig2_loc[:, :-1, :].to(self.__device)\n rho_loc = rho_loc[:, :-1, :].to(self.__device)\n\n boolean = boolean[:, :-1].to(self.__device)\n\n gauss = pi_loc * self.__bivariate_gaussian(\n sig1_loc, sig2_loc, mu1_loc, mu2_loc, x1_loc, x2_loc, rho_loc\n )\n gauss = torch.sum(gauss, dim=2).to(self.__device)\n\n log_lik = torch.sum(\n -boolean * torch.log(gauss + 10e-10)\n - boolean * torch.log(end_loc + 10e-10) * (x3_loc)\n - boolean * torch.log(1 - end_loc + 10e-10) * (1 - x3_loc)\n )\n\n return log_lik", "def backward(log_emlik, log_startprob, log_transmat):\n N, M = log_emlik.shape\n backward_prob = np.zeros((N,M))\n\n backward_prob[N-1, :] = 0.0\n\n for i in range(N-2,-1,-1):\n for k in range(M):\n # probability of transitioning from k to state l * probability of emitting symbol at state l at ts i+1 * recursive backward probability\n backward_prob[i,k] = logsumexp(log_transmat[k,:] + log_emlik[i+1,:] + backward_prob[i+1,:])\n\n return backward_prob", "def sentence_logprob(self, sentence):\n grams = get_ngrams(sentence, 3)\n p = 1\n\n for gram in grams:\n p *= np.longfloat(self.smoothed_trigram_probability(gram))\n\n return np.log2(p)", "def dna_probability(dna:str, gc:float, return_log=False) -> float:\n at = (1 - gc) / 2.0\n gc /= 2.0\n\n p = 1\n for l in dna:\n if l in \"AT\":\n p *= at\n elif l in \"CG\":\n p *= gc\n else:\n raise ValueError(\"You should use dna string.\")\n if return_log:\n return math.log(p, 10)\n else:\n return p", "def likelihood(params,data):\n spec, isnflux, igalflux = data\n chi2=0\n modflux = (params[0]*isnflux + params[1]*igalflux)\n chi2 += sum((spec.flux - modflux)**2)/((0.05*sum(spec.var)**2)/2.0)\n return np.exp(-chi2/2.0)", "def test_GA():\n\tgenerationSize = 150\n\tmutationProb = 0.01\n\tgenerations = 500\n\tX = []\n\tT = []\n\tY = [] \n\tfitnesses = [0]*generationSize\n\tfor i in range(DATA_POINTS_NUM):\n\t\tX.append((i - DATA_POINTS_NUM/2)*0.1)\n\t\tT.append(polynomi_3N(REFERENCE, X[-1]))\n\t\tY.append(0)\n\t\n\tga = GA.GA(generationSize, 4, mutationProb)\n\tgenomes = ga.seedGenomes()\n\t#plot initial genomes\n\tplt.figure(1)\n\tplt.title('Initial genomes')\n\tfor i in range(len(genomes)):\n\t\tGenome = prescale(genomes[i])\n\t\tprint Genome\n\t\tfor j in range(DATA_POINTS_NUM):\n\t\t\tY[j] = (polynomi_3N(Genome, X[j]))\n\t\tfitnesses[i] = calculate_fitness(T, Y)\n\t\tplt.plot(X,Y, 'b-')\n\tplt.plot(X,T, 'r-')\n\t\t\n\t\n\t#live and learn\n\tfor k in range(generations):\n\t\tprint \".\",\n\t\tfor i in range(len(genomes)):\n\t\t\tGenome = prescale(genomes[i])\n\t\t\tfor j in range(DATA_POINTS_NUM):\n\t\t\t\tY[j] = (polynomi_3N(Genome,X[j]))\n\t\t\tfitnesses[i] = calculate_fitness(T, Y)\n\t\tga.fitnessUpdate(fitnesses)\n\t\tgenomes = ga.createNextGeneration()\n\t\t\n\t#plot final genomes\n\tplt.figure(2)\n\tplt.title('Final genomes')\n\tprint \"\\nfinal Genomes\"\n\tfor i in range(len(genomes)):\n\t\tGenome = prescale(genomes[i])\n\t\tfor j in range(DATA_POINTS_NUM):\n\t\t\tY[j] = (polynomi_3N(Genome,X[j]))\n\t\tprint \"fit:%5.1f [%7.4f, %7.4f, %7.4f, %7.4f]\"%\\\n\t\t (calculate_fitness(T, Y), Genome[0],\n\t\t Genome[1], Genome[2], Genome[3])\n\t\tplt.plot(X,Y, 'b-')\n\tplt.plot(X,T, 'r-')\n\t\t\n\t#plot progress\n\tP = []\n\thistory = ga.generations[:]\n\tfor f in history:\n\t\t#f[1].sort()\n\t\tP.append(max(f[1]))\n\tplt.figure(3)\n\tplt.title('progress')\n\tplt.plot(P)\n\tplt.show()\n\t\n\t#print the result:\t\n\tbestGene = fitnesses.index(max(fitnesses))\n\tG = prescale(genomes[bestGene])\n print \"\"\n\tprint \"And the result is:\"\n\tprint \"%.4f => %.4f (%.4f)\"%(A, G[0], abs(A - G[0]))\n\tprint \"%.4f => %.4f (%.4f)\"%(B, G[1], abs(B - G[1]))\n\tprint \"%.4f => %.4f (%.4f)\"%(C, G[2], abs(C - G[2]))\n\tprint \"%.4f => %.4f (%.4f)\"%(D, G[3], abs(D - G[3]))", "def return_likelihood(self, params):\n\n self._y_pred = self._int_spec(self._x, **params) * self._atten\n\n # Add the cascade\n if self._casc is not None:\n params_casc = deepcopy(params)\n\n # apply the weights\n if self._cr_spec is not None:\n # add units to the parameters where neccessary\n params_casc['Prefactor_CR'] *= u.Unit(\"TeV-1 cm-2 s-1\")\n params_casc['Scale_CR'] *= u.Unit(\"eV\").to(\"eV\") * u.eV\n params_casc['Emin_CR'] *= u.Unit(\"eV\").to(\"eV\") * u.eV\n params_casc['Emax_CR'] *= u.Unit(\"eV\").to(\"eV\") * u.eV\n self._casc.apply_spectral_weights(lambda x: self._cr_spec(x, **params_casc),\n smooth=True)\n\n else:\n # add units to the parameters where neccessary\n params_casc['Prefactor'] *= u.Unit(\"TeV-1 cm-2 s-1\")\n params_casc['Scale'] *= u.Unit(\"TeV\").to(\"eV\") * u.eV\n params_casc['Cutoff'] *= u.Unit(\"TeV\").to(\"eV\") * u.eV\n self._casc.apply_spectral_weights(lambda x: self._int_spec(x, **params_casc),\n smooth=True)\n\n # and get the flux in the ON region\n spec_halo = self._casc.get_obs_spectrum(\n region=self._on_region\n )\n # convert the units back\n flux_unit_conversion = (spec_halo.quantity.unit).to(\"TeV-1 cm-2 s-1\")\n\n # either add directly if energy bins are the same or use 1D interpolation\n if self._interp_casc:\n m = spec_halo.data[:, 0, 0] > 0.\n if not np.sum(m):\n raise ValueError(\"Predicted cascade flux is zero!\")\n interp = interp1d(np.log(spec_halo.geom.get_axis_by_name('energy').center.to(\"TeV\").value[m]),\n np.log(spec_halo.data[:, 0, 0][m] * flux_unit_conversion),\n fill_value='extrapolate', kind='cubic'\n )\n self._y_pred += np.exp(interp(np.log(self._x)))\n\n else:\n self._y_pred += spec_halo.data[:, 0, 0] * flux_unit_conversion\n\n if self._cov_inv is None:\n self._llh = -1. * ((self._y - self._y_pred) ** 2. / self._dy ** 2.).sum()\n else:\n self._llh = -1. * np.dot(self._y - self._y_pred, np.dot(self._cov_inv, self._y - self._y_pred))\n\n # add contribution from profile likelihood\n if self._llh_fermi_interp is not None:\n # change parameters to the values over which grid was interpolated\n params_llh = deepcopy(params)\n params_llh['Prefactor'] *= u.Unit(\"TeV-1 cm-2 s-1\").to(\"MeV-1 cm-2 s-1\")\n params_llh['Index'] *= -1.\n self._llh_fermi = 2. * self._llh_fermi_interp([params_llh['Cutoff'],\n -1. * params_llh['Index'],\n np.log10(params_llh['Prefactor'])])[0]\n else:\n self._llh_fermi = 0\n\n return -1. * (self._llh + self._llh_fermi)", "def get_log_likelihood(response_probability, observed_response):\n \n return np.log(response_probability[observed_response])", "def test_relevance_with_itself():\n state = gen_state_cgpm(get_data_separated)\n assert np.allclose(state.relevance_probability(2, [2], 1), 1.0)", "def p(self) -> Probability:\n ...", "def _trajectory_prob(self, trajectory):\n\n # Accumulate probabilities in log space for numerical stability\n logprob = np.log(self._starting_prob(trajectory[0][0]))\n logprob += np.sum([\n np.log(self._transition_prob(s1, a, s2))\n for (s1, a), (s2, _) in zip(trajectory[:-1], trajectory[1:])\n ])\n\n return np.exp(logprob)", "def log_probability(self, sequence):\n sequence = self._transform(sequence)\n\n T = len(sequence)\n\n if T > 0 and sequence[0][_TAG]:\n last_state = sequence[0][_TAG]\n p = self._priors.logprob(last_state) + self._output_logprob(\n last_state, sequence[0][_TEXT]\n )\n for t in range(1, T):\n state = sequence[t][_TAG]\n p += self._transitions[last_state].logprob(\n state\n ) + self._output_logprob(state, sequence[t][_TEXT])\n last_state = state\n return p\n else:\n alpha = self._forward_probability(sequence)\n p = logsumexp2(alpha[T - 1])\n return p", "def _create_log_likelihood(self, individual):\n # Get individuals data\n times = []\n observations = []\n mask = self._data[self._id_key] == individual\n data = self._data[mask][\n [self._time_key, self._obs_key, self._value_key]]\n for output in self._mechanistic_model.outputs():\n # Mask data for observable\n observable = self._output_observable_dict[output]\n mask = data[self._obs_key] == observable\n temp_df = data[mask]\n\n # Filter times and observations for non-NaN entries\n mask = temp_df[self._value_key].notnull()\n temp_df = temp_df[[self._time_key, self._value_key]][mask]\n mask = temp_df[self._time_key].notnull()\n temp_df = temp_df[mask]\n\n # Collect data for output\n times.append(temp_df[self._time_key].to_numpy())\n observations.append(temp_df[self._value_key].to_numpy())\n\n # # Count outputs that were measured\n # # TODO: copy mechanistic model and update model outputs.\n # # (Useful for e.g. control group and dose group training)\n # n_measured_outputs = 0\n # for output_measurements in observations:\n # if len(output_measurements) > 0:\n # n_measured_outputs += 1\n\n # Create log-likelihood and set ID to individual\n log_likelihood = chi.LogLikelihood(\n self._mechanistic_model, self._error_models, observations, times)\n log_likelihood.set_id(individual)\n\n return log_likelihood", "def probabilities_of_structures(sequence, structure_list, react=None):\n ensemble_energy = get_ens_energy(sequence, react = react)\n energies = [get_stru_energy(x, sequence, react = react) for x in structure_list]\n probabilities = [energy_to_proba(ensemble_energy, x) for x in energies]\n #probabilities = normalize(probabilities, norm='l1').tolist()[0]\n return [(stru,proba) for stru,proba in zip(structure_list,probabilities)]", "def sentence_logprob(self, sentence):\n line = get_ngrams(sentence,3)\n log_por = 0.0\n for item in line:\n raw_por = self.smoothed_trigram_probability(item)\n log_por = log_por+math.log2(raw_por)\n\n return float(log_por)", "def _logprob(self, sample):\n return 0, 0", "def log_probability(self, samples):\n pass", "def calc_likelihood(par_num, par_rng):\n\n likelihoods = np.zeros(np.size(par_rng))\n\n trivial_prior = trivial_prior_class()\n\n pipe = pipeline(observables_generator=hammu12,\n likelihood=likelihood,\n prior=trivial_prior,\n optimizer_class=Hamiltonian_Monte_Carlo)\n\n parameters = [0]*hammu12.get_parameter_dimension()\n for par_val in par_rng:\n parameters[par_num] = par_val\n likelihoods[par_val-par_rng[0]] = pipe._calc_posterior(parameters)\n\n np.save('data%s_RM' % (par_num), likelihoods)", "def compute_joint_probability(token_list, token_probabilities, use_log_prob=False):\n\n log_prob = 0\n\n for word in token_list:\n\n # do not allow zero probabilites\n assert word in token_probabilities\n\n if use_log_prob:\n log_prob += token_probabilities[word]\n else:\n log_prob += log10(token_probabilities[word])\n\n if use_log_prob:\n return log_prob\n\n return 10**log_prob", "def MAP(cpts, obs, terms):\r\n\r\n # a list to store the computed probabilities\r\n all_sums = []\r\n # initialize all terms to false\r\n for value in range(len(terms)):\r\n terms[value] = [terms[value], '0']\r\n search_array = terms + obs\r\n # if all terms are being watched, just call MPE\r\n if len(search_array) == len(cpts):\r\n return MPE(cpts, obs)\r\n # we need to know what terms we aren't interested in so we start with \r\n # or terms and observations and note the variables that appear in CPT but\r\n # not in those\r\n dont_count = []\r\n for var in cpts:\r\n if [var[0], '0'] not in search_array and [var[0], '1'] not in search_array:\r\n dont_count.append(var[0])\r\n terms.append([var[0],'1'])\r\n # sort the terms to ensure correct ordering\r\n terms.sort()\r\n # creates a list of all possible bit strings\r\n # just an easy way to create all possible truth assignments\r\n seq = [\"\".join(seq) for seq in itertools.product(\"01\", repeat=len(terms))]\r\n # loop through all possible truth assignments\r\n for j in range(len(seq)):\r\n # we initialize at probability = 100%\r\n chance = 1\r\n # assign the truth values\r\n for k in range(len(seq[j])):\r\n terms[k][1] = seq[j][k]\r\n # this computes the probability using the chaining rule\r\n for i in range(len(terms)):\r\n new_terms = terms[:-i-1] + obs\r\n new_terms.sort()\r\n chance *= probability(cpts,terms[-i-1], new_terms)\r\n # add the probabilities to our list\r\n all_sums.append(chance)\r\n combine = []\r\n # note all variables which weren't in obs or Vs\r\n for i in dont_count:\r\n combine.append(terms.index([i,'1']))\r\n # this will store the final probabilities\r\n final_array = [0] * len(seq)\r\n # another complicated looking loop, it just serves to combine probabilities\r\n # for example, if we have a CPT with x_1, x_2, x_3, x_4 and we observe \r\n # x_1 to be true and have Vs = [x_3, x_4] then we need to combine the \r\n # probabilities that are the same except for x_2 = true vs false\r\n for loc in combine:\r\n for sequence in range(len(seq)):\r\n for alt_sequence in range(sequence+1,len(seq)):\r\n if (seq[sequence][:loc] + seq[sequence][loc+1:]) == (seq[alt_sequence][:loc] + seq[alt_sequence][loc+1:]):\r\n final_array[sequence] = all_sums[sequence] + all_sums[alt_sequence]\r\n\r\n # get the truth assignment for the highest probability\r\n location = seq[final_array.index(max(final_array))]\r\n truth_assignment = []\r\n # place the truth assignment in a more readable fashion\r\n for value in range(len(terms)):\r\n if terms[value] in search_array:\r\n if location[value] == '0':\r\n truth_assignment.append(terms[value][0]+ ' = False')\r\n else:\r\n truth_assignment.append(terms[value][0]+ ' = True')\r\n return (truth_assignment)", "def random_strings(sequence, GC_array):\r\n\r\n AT = 0\r\n GC = 0\r\n\r\n for nt in sequence:\r\n if nt == \"A\" or nt == \"T\":\r\n AT += 1\r\n elif nt == \"G\" or nt == \"C\":\r\n GC += 1\r\n\r\n probabilities = []\r\n\r\n #Calculate probability of G = probability of C = %GC / 2\r\n #Calculate probability of A = probability of T = (1 - %GC) / 2\r\n\r\n #For each consecutive base in provided sequence:\r\n #1. Convert total probability to logarithm using math.log(probability, base=10)\r\n #2. Total probability to be multiplied by probability of specifically that base\r\n\r\n for i in range(len(GC_array)):\r\n prob = (AT * math.log10((1 - GC_array[i])/2)) + (GC * math.log10(GC_array[i]/2))\r\n\r\n probabilities.append('%0.3f' % prob)\r\n\r\n print(*probabilities, sep= \" \")", "def log_marg_likelihood(self):\n self.A = np.linalg.inv(self.Sn)\n term1 = self.t - [email protected]\n self.Evidence_mN = (self.beta/2)*np.linalg.norm(term1)+ (self.alpha/2)*[email protected]\n A_abs = np.linalg.eigvals(self.A)\n A_abs = np.prod(A_abs)\n\n self.marg_lik = ((self.p)/2)*np.log(self.alpha) + (self.n/2)*np.log(self.beta) - self.Evidence_mN - (1/2)*np.log(A_abs) - (self.n/2)*np.log(2*np.pi)\n\n return self.marg_lik", "def Log_OB_S1(xref,x):\n\n nX = np.shape(x)\n\n m = nX[0]\n n = nX[1]\n t = nX[2]\n\n G = np.zeros((n,t))\n Gv = np.zeros((m,n,t))\n\n for r in range(t):\n\n # Correct for permuations\n\n #Xout,PiA= CorrectPerm(xref,x[:,:,r])\n Xout = dp(x[:,:,r])\n\n for q in range(n):\n\n a = np.sum(Xout[:,q]*xref[:,q])/np.sqrt(np.sum(xref[:,q]**2)*np.sum(Xout[:,q]**2)) # Should have unit L2 norm\n if a > 1:\n a = 1\n if a < -1:\n a = -1\n G[q,r] = np.arccos(a) # Computing the angles\n v = Xout[:,q] - a*xref[:,q]\n Gv[:,q,r] = v / (1e-12 + np.linalg.norm(v)) # Unit vector in the tangent subspace\n\n return G,Gv", "def _log_likelihood_poisson(self, df, dfo, n_bins=10):\n cond = df[\"selected_jig\"].values == 1\n range = parameter_ranges['uae'], parameter_ranges['rec']\n\n uae_obs = dfo[\"mueff_av\"].values\n rec_obs = dfo[\"rec_arcsec\"].values\n obs, xedges, yedges = np.histogram2d(uae_obs, rec_obs, range=range, bins=n_bins)\n\n uae_mod = df[\"uae_obs_jig\"].values[cond]\n rec_mod = df[\"rec_obs_jig\"].values[cond]\n model, _, _ = np.histogram2d(uae_mod, rec_mod, range=range, bins=n_bins, density=True)\n\n # Rescale model by number of observations\n model = model.astype(\"float\") * dfo.shape[0]\n\n # Calculate Poisson probability for each bin\n obs = obs.reshape(-1).astype(\"float\")\n model = model.reshape(-1)\n probs = stats.poisson(mu=model).pmf(obs)\n\n # Return overall log likelihood\n return np.log(probs).sum()", "def GoAnnot(prots, gos, onlyProts=False):\r\n with resources.open_text(\"autoprot.data\",\"Homo_sapiens.gene_info\") as d:\r\n geneInfo = pd.read_csv(d, sep='\\t')\r\n with resources.open_text(\"autoprot.data\",\"gene2go_alt\") as d:\r\n gene2go = pd.read_csv(d, sep='\\t')\r\n prots = pd.DataFrame(pd.Series([str(i).upper().split(';')[0] for i in prots]), columns=[\"Gene names\"])\r\n prots = prots.merge(geneInfo[[\"Symbol\", \"GeneID\"]], left_on=\"Gene names\", right_on=\"Symbol\", how='inner')\r\n \r\n prots = prots.merge(gene2go[[\"GeneID\", \"GO_ID\", \"GO_term\"]], on=\"GeneID\", how='inner')\r\n if onlyProts == True:\r\n for idx, go in enumerate(gos):\r\n if idx == 0:\r\n redProts = prots[\"Symbol\"][prots[\"GO_term\"].str.contains(go)]\r\n else:\r\n redProts = redProts.append(prots[\"Symbol\"][prots[\"GO_term\"].str.contains(go)])\r\n return redProts.drop_duplicates()\r\n else: \r\n for idx, go in enumerate(gos):\r\n if idx == 0:\r\n redProts = prots[prots[\"GO_term\"]==go]\r\n else:\r\n redProts = redProts.append(prots[prots[\"GO_term\"]==go])\r\n return redProts.drop_duplicates()", "def calculate_likelihood_probability(measurement, predicted_measurement, covariance):\n \n return None", "def compute_correspondence_likelihoods(self, measurement,\r\n number_of_landmarks,\r\n Qt_measurement_covariance):\r\n likelihoods = []\r\n for i in range(number_of_landmarks):\r\n likelihoods.append(\r\n self.compute_weight(measurement, i, Qt_measurement_covariance))\r\n \r\n return likelihoods", "def label_propagate_probabilistic(embeddings, seeds_map, normalize=True, **kwargs):\n words = embeddings.iw\n M = transition_matrix(embeddings, **kwargs)\n teleport_set_map={}\n for seed_key, seed_list in seeds_map.items():\n teleport_set_map[seed_key]=teleport_set(words, seed_list)\n def update_seeds(r):\n idm= np.eye(len(seeds_map))\n for seed_key, w_indices in teleport_set_map.items():\n r[w_indices] = idm[seed_key]\n r /= np.sum(r, axis=1)[:, np.newaxis]\n r = run_iterative(M, np.random.random((M.shape[0], len(seeds_map))), update_seeds, **kwargs)\n polarities={}\n for i, w in enumerate(words):\n polarities[w]=Counter()\n for seed_key in seeds_map:\n polarities[w][seed_key]=r[i][seed_key]\n if normalize:\n polarities[w]=normalize_counter(polarities[w])\n return polarities", "def build_mm_df(sralist):\n\n def convert_to_codon(nts_array):\n \"\"\"\n pysam output is in nucleotides resolution, but scikit_curated_df uses codon resolution.\n This function converts nucleotide arrays to codon length (nts to codon resolution):\n \"\"\"\n \n nts_array = np.array(nts_array)\n codon_array = np.sum( np.reshape(A, (int(np.floor(nts_array[1]/3)),3) ), 1)/3.\n\n return codon_array\n\n\n def compute_mm(mmdata):\n \"\"\"\n get per gene average multi-mapping score\n \"\"\"\n\n mm_df = pd.DataFrame(columns=['ORF', 'MM'])\n counter = 0\n\n for gene in mmdata.keys():\n current_matrix = mmdata[gene]\n current_avrg = np.mean( np.sum(current_matrix, 1) / current_matrix.shape[1] )\n mm_df.loc[counter] = [gene, current_avrg]\n counter += 1\n\n return mm_df\n\n\n mm_mat = {}\n mm_pct = {}\n\n N = len(sralist)\n\n for ix, dataset in enumerate(sralist):\n samfile = pysam.AlignmentFile(TMP_DIR+'/ambiguous_reads/'+dataset+'_STAR_transcriptome_multi_mapped_sorted.bam', 'rb')\n genes_list = list(samfile.references)\n print(ix, dataset)\n\n for geneID in genes_list:\n # count the coverage of genomic positions by reads in region.\n # Returns: four array.arrays of the same length in order A C G T\n # The coverage is computed per-base [ACGT]\n cov = samfile.count_coverage(geneID, read_callback='nofilter')\n # Summ all 4 arrays\n cov_sum = np.sum(cov, axis=0)\n #print(geneID, cov_sum)\n codon_cov = convert_to_codon(cov_sum)\n codon_bool = np.asarray([1 if i > 0 else 0 for i in codon_cov])\n \n M = len(codon_bool)\n\n if ix == 0:\n \tmm_mat[geneID] = np.zeros((N,M)) * np.nan\n \n current_matrix = mm_mat[geneID]\n current_matrix[ix,:] = np.copy(codon_bool)\n mm_mat[geneID] = current_matrix\n\n\n mm_avrg = compute_mm(mm_mat)\n #mm_avrg.to_json('yeast_mm.json')\n #mm_avrg.to_csv('yeast_mm.txt', header=True, index=False, sep='\\t')\n\n \n mm_profile = {}\n theta_mm = 5\n for orf in mm_mat.keys():\n current_mat = mm_mat[orf]\n current_bool = np.sum(current_mat, 0) <= theta_mm\n mm_profile[orf] = current_bool\n\n with open('../data/processed/mm_consensus.pkl', 'wb') as f_mm:\n pickle.dump(mm_profile, f_mm)\n\n\n return mm_mat, mm_avrg, mm_profile", "def ICA_log_likelihood(X, model):\n\n # TODO: YOUR CODE HERE", "def ref_lamanno(\n fasta_path,\n gtf_path,\n cdna_path,\n intron_path,\n index_path,\n t2g_path,\n cdna_t2c_path,\n intron_t2c_path,\n temp_dir='tmp',\n overwrite=False,\n):\n results = {}\n if not os.path.exists(index_path) or overwrite:\n fasta_path = decompress_file(fasta_path, temp_dir=temp_dir)\n sorted_fasta_path, fasta_chromosomes = sort_fasta(\n fasta_path, os.path.join(temp_dir, SORTED_FASTA_FILENAME)\n )\n gtf_path = decompress_file(gtf_path, temp_dir=temp_dir)\n sorted_gtf_path, gtf_chromosomes = sort_gtf(\n gtf_path, os.path.join(temp_dir, SORTED_GTF_FILENAME)\n )\n logger.info('Splitting genome into cDNA at {}'.format(cdna_path))\n chromosomes = check_chromosomes(fasta_chromosomes, gtf_chromosomes)\n cdna_fasta_path = generate_cdna_fasta(\n sorted_fasta_path,\n sorted_gtf_path,\n cdna_path,\n chromosomes=chromosomes\n )\n results.update({'cdna_fasta': cdna_fasta_path})\n logger.info(\n 'Creating cDNA transcripts-to-capture at {}'.format(cdna_t2c_path)\n )\n cdna_t2c_result = create_t2c(cdna_fasta_path, cdna_t2c_path)\n results.update({'cdna_t2c': cdna_t2c_result['t2c']})\n logger.info('Splitting genome into introns at {}'.format(intron_path))\n intron_fasta_path = generate_intron_fasta(\n sorted_fasta_path,\n sorted_gtf_path,\n intron_path,\n chromosomes=chromosomes\n )\n results.update({'intron_fasta': intron_fasta_path})\n logger.info(\n 'Creating intron transcripts-to-capture at {}'.\n format(cdna_t2c_path)\n )\n intron_t2c_result = create_t2c(intron_fasta_path, intron_t2c_path)\n results.update({'intron_t2c': intron_t2c_result['t2c']})\n logger.info('Concatenating cDNA and intron FASTAs')\n combined_path = concatenate_files(\n cdna_fasta_path,\n intron_fasta_path,\n out_path=os.path.join(temp_dir, COMBINED_FILENAME),\n temp_dir=temp_dir\n )\n t2g_result = create_t2g_from_fasta(combined_path, t2g_path)\n results.update(t2g_result)\n index_result = kallisto_index(combined_path, index_path)\n results.update(index_result)\n else:\n logger.info(\n 'Skipping kallisto index because {} already exists. Use the --overwrite flag to overwrite.'\n .format(index_path)\n )\n\n return results", "def calories_protein(og, fg):\n\n return 0.994 * fg * real_extract(og, fg)", "def bayesian_info_criterion(log_likelihood, n_params, n_samples):\n return n_params * np.log(n_samples) - 2.0 * log_likelihood" ]
[ "0.64158255", "0.61762154", "0.6171898", "0.61255485", "0.6108559", "0.60751355", "0.5888883", "0.581115", "0.57994837", "0.5773769", "0.57539636", "0.5743196", "0.57412934", "0.57062215", "0.5637121", "0.5604803", "0.56041193", "0.55022335", "0.5498393", "0.543617", "0.53924245", "0.5376682", "0.5366281", "0.53575003", "0.5351058", "0.5326567", "0.53247386", "0.532451", "0.5315191", "0.53046423", "0.53007615", "0.52895355", "0.5286242", "0.5279394", "0.5272649", "0.5256475", "0.52495486", "0.5248279", "0.5241967", "0.523703", "0.52352494", "0.5231489", "0.5223748", "0.522235", "0.52157533", "0.52153933", "0.5211854", "0.52102554", "0.52069914", "0.52068955", "0.52005404", "0.51990205", "0.5191093", "0.5184872", "0.5178004", "0.515485", "0.5150197", "0.5142383", "0.51392275", "0.5138804", "0.5128439", "0.51066357", "0.51057214", "0.5103622", "0.5102116", "0.5100729", "0.5100422", "0.5100422", "0.5098312", "0.50956213", "0.50889736", "0.5086658", "0.50860214", "0.50852585", "0.50837", "0.5080683", "0.5075263", "0.5074123", "0.50736403", "0.50516945", "0.5051418", "0.5050595", "0.50469905", "0.5041392", "0.50409913", "0.50378263", "0.5036027", "0.50352263", "0.50314987", "0.5027727", "0.50216466", "0.50213647", "0.50196993", "0.5014496", "0.50070405", "0.49968448", "0.49965647", "0.49908167", "0.49891657", "0.49828002" ]
0.6003137
6
Fit an ellipse to an object. It returns the rotated rectangle in which the ellipse is inscribed.
def __CalculateEllipse(self, contour): if len(contour) > 5: return cv2.fitEllipse(contour) return cv2.minAreaRect(contour)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fit_ellipse(x,y):\r\n \r\n def fit(x,y):\r\n x = x[:,np.newaxis]\r\n y = y[:,np.newaxis]\r\n D = np.hstack((x*x, x*y, y*y, x, y, np.ones_like(x)))\r\n S = np.dot(D.T,D)\r\n C = np.zeros([6,6])\r\n C[0,2] = C[2,0] = 2; C[1,1] = -1\r\n E, V = np.linalg.eig(np.dot(np.linalg.inv(S), C))\r\n n = np.argmax(np.abs(E))\r\n a = V[:,n]\r\n return a\r\n \r\n def ellipse_center(a):\r\n b,c,d,f,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[0]\r\n num = b*b-a*c\r\n x0=(c*d-b*f)/num\r\n y0=(a*f-b*d)/num\r\n return np.array([x0,y0])\r\n \r\n def ellipse_angle_of_rotation(a):\r\n b,c,a = a[1]/2, a[2], a[0]\r\n return 0.5*np.arctan(2*b/(a-c))\r\n \r\n def ellipse_axis_length(a):\r\n b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]\r\n up = 2*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g)\r\n down1=(b*b-a*c)*( (c-a)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\r\n down2=(b*b-a*c)*( (a-c)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\r\n res1=np.sqrt(up/down1)\r\n res2=np.sqrt(up/down2)\r\n return np.array([res1, res2])\r\n \r\n a = fit(x,y)\r\n center = ellipse_center(a)\r\n theta = ellipse_angle_of_rotation(a)\r\n [R1,R2] = ellipse_axis_length(a)\r\n\r\n return R1, R2, center, theta", "def fitEllipse(x,y):\r\n x = x[:,np.newaxis]\r\n y = y[:,np.newaxis]\r\n D = np.hstack((x*x, x*y, y*y, x, y, np.ones_like(x)))\r\n S = np.dot(D.T,D)\r\n C = np.zeros([6,6])\r\n C[0,2] = C[2,0] = 2; C[1,1] = -1\r\n E, V = eig(np.dot(inv(S), C))\r\n n = np.argmax(np.abs(E))\r\n a = V[:,n]\r\n return a", "def create_ellipse(self, ratio):\n circ = Point(self.center).buffer(1.0)\n ell = affinity.scale(circ, float(\n self.lengths[0]*ratio), float(self.lengths[1]*ratio))\n ellr = affinity.rotate(ell, self.angle)\n return ellr", "def ellipse(self):\n f = self.img\n x = self.x\n y = self.y\n x2 = self.x2\n y2 = self.y2\n xy = self.xy\n self.a2 = (x2+y2) + sqrt(((x2-y2)/2.)**2 + xy**2)\n self.b2 = (x2+y2) - sqrt(((x2-y2)/2.)**2 + xy**2)\n self.a = sqrt(self.a2)\n self.b = sqrt(self.b2)\n tan2theta = 2* (xy/(x2-y2))\n self.theta = arctan(tan2theta)/2.\n denominator = sqrt(((x2-y2)/2)**2+xy**2)\n self.cxx = y2/denominator\n self.cyy = x2/denominator\n self.cxy = -2*xy/denominator", "def to_ellipse(self, factor=1.0):\n self._check_initialized()\n vals, vecs = sp.linalg.eigh(self.covariance)\n order = vals.argsort()[::-1]\n vals, vecs = vals[order], vecs[:, order]\n angle = np.arctan2(*vecs[:, 0][::-1])\n width, height = factor * np.sqrt(vals)\n return angle, width, height", "def fitEllipsetoContour(poly):\n from skimage.measure import EllipseModel\n\n model = EllipseModel()\n success=model.estimate(poly)\n\n if success:\n yc, xc, a, b, theta=model.params # xc, yc, a, b, theta <- in radiand\n # convert theta to degree\n theta=np.degrees(theta)\n theta=90.-theta\n # make sure major is larger than minor and theta is for major\n if a<b:\n [a,b]=[b,a]\n theta=theta-90.\n if theta<0.: theta=theta+180.\n params=(xc, yc, a, b, theta)\n return params\n else:\n return (0, 0, 0, 0, 0)", "def polarization_ellipse(self):\n self.ellipse = {}\n self.ellipse['d_lin'] = sqrt(self.Q**2 + self.U**2)/self.I\n self.ellipse['d_cir'] = abs(self.V)/self.I\n self.ellipse['d'] = sqrt(self.Q**2 + self.U**2 + self.V**2)/self.I\n if self.Q:\n self.ellipse['theta'] = 0.5*atan(self.U/self.Q)\n else:\n self.ellipse['theta'] = float('NaN')\n self.logger.debug(\"polarization_ellipse: theta = %f\",\n self.ellipse['theta'])\n\n if (self.Q**2 + self.U**2):\n self.ellipse['beta'] = 0.5*atan(self.V/sqrt(self.Q**2 + self.U**2))\n if self.V:\n self.ellipse['eccen'] = tan(self.ellipse['beta'])\n else:\n self.ellipse['eccen'] = 0.\n else:\n self.ellipse['beta'] = pi/4\n self.ellipse['eccen'] = 1.\n self.logger.debug(\"polarization_ellipse: beta = %f\",\n self.ellipse['beta'])\n self.logger.debug(\"polarization_ellipse: eccen = %f\",\n self.ellipse['eccen'])", "def drawFitEllipse(img, cnt):\n\td = fitEllipse(cnt)\n\tcv2.ellipse(img, d[\"center\"], (d[\"major\"], d[\"minor\"]), d[\"angle\"])", "def _proc_ellipse(self, tokens, filled):\n\n component = Ellipse(pen=self.pen,\n x_origin=tokens[\"x0\"],\n y_origin=tokens[\"y0\"],\n e_width=tokens[\"w\"],\n e_height=tokens[\"h\"],\n filled=filled)\n\n return component", "def ellipseDesc(lps):\r\n unit = 100 #units in QualiTree are in [mm], hence Pgl is in [dm] ?\r\n\r\n if isinstance(lps, pgl.Translated):\r\n cx, cy, cz = lps.translation\r\n else:\r\n print\"missing Translated from the bounding ellipse as a Translated(Rotated(Scaled(Sphere)))\"\r\n\r\n ori = lps.geometry\r\n\r\n if isinstance(ori, pgl.Oriented):\r\n rotMat = ori.transformation().getMatrix3()\r\n az, el, roll = rotMat.eulerAnglesZYX()\r\n else:\r\n print\"missing Oriented from the bounding ellipse as a Translated(Rotated(Scaled(Sphere)))\"\r\n az = 0\r\n \r\n scal = ori.geometry\r\n\r\n if isinstance(scal, pgl.Scaled):\r\n scMat = scal.transformation().getMatrix()\r\n rx, ry, rz, rt = scMat.getDiagonal()\r\n else:\r\n print\"missing Scaled from the bounding ellipse as a Translated(Rotated(Scaled(Sphere)))\"\r\n rx=ry=rz=1\r\n\r\n #x1, y1, z1 #Conversion repère MappleT (m) à reprère Qualitree (q) : Xq=Xm Yq=Zm Zq=-Ym. \r\n #Due to change of coordinate axis, rotation needs - pi <-- apparently not !\r\n #return cx*unit, cz*unit, -cy*unit, rx*unit, rz*unit, ry*unit, az-3.1415927\r\n\r\n return cx*unit, cz*unit, -cy*unit, rx*unit, rz*unit, ry*unit, az", "def _draw_ellipse(data, obj, draw_options):\n if isinstance(obj, mpl.patches.Circle):\n # circle specialization\n return _draw_circle(data, obj, draw_options)\n x, y = obj.center\n ff = data[\"float format\"]\n\n if obj.angle != 0:\n draw_options.append(\n f\"rotate around={{{obj.angle:{ff}}:(axis cs:{x:{ff}},{y:{ff}})}}\"\n )\n\n do = \",\".join(draw_options)\n content = (\n f\"\\\\draw[{do}] (axis cs:{x:{ff}},{y:{ff}}) ellipse \"\n f\"({0.5 * obj.width:{ff}} and {0.5 * obj.height:{ff}});\\n\"\n )\n content += _patch_legend(obj, draw_options, \"area legend\")\n\n return data, content", "def drawEllipse(img, center, axes, angle, startAngle=0, endAngle=360, color = (0,0,255), fill = -1):\n\tcv2.ellipse(img, center, axes, angle, startAngle, endAngle, color, fill)", "def DrawEllipseRect(*args, **kwargs):\n return _gdi_.DC_DrawEllipseRect(*args, **kwargs)", "def ellipse_dist_ratio_poly(self, theta, lwr):\n\n \"\"\"\n\n Params for FWD fit\n array([ 9.99999989e-01, 8.10852195e+07, 1.95444928e+00, 7.96543026e-02])\n this one is un-needed, since it's approximation y = 1\n\n Params for FWD_DIAG fit\n array([-0.00650758, 0.57761793, 0.35369061, 1.87834152])\n\n Params for ORTHOG fit\n array([-0.02014989, 5.7007518 , -0.83345416, 0.97711175])\n\n Params for BCK_DIAG fit\n array([-0.01608705, 9.44079769, -0.92071169, 0.89094967])\n\n Params for BCK fit\n array([ -0.01451187, 10.92674105, -0.93514904, 0.87868538])\n \"\"\"\n\n #fitting function\n def f(x,params):\n return params[0] + (1.0 / (params[1]*(x+params[2])**params[3]))\n\n #force float math, in case theta is an integer\n theta = float(theta)\n\n #into an angle index form:\n t = abs(int(4.0*theta/np.pi))\n\n if (t == 0) or (t == 8):\n return 1.0\n elif (t == 1) or (t == 7):\n #forward diagonal\n return f(lwr, [-0.00650758, 0.57761793, 0.35369061, 1.87834152])\n elif (t == 2) or (t == 6):\n #orthogonal\n return f(lwr, [-0.02014989, 5.7007518 , -0.83345416, 0.97711175])\n elif (t == 3) or (t == 5):\n #backward diagonal\n return f(lwr, [-0.01608705, 9.44079769, -0.92071169, 0.89094967])\n elif t == 4:\n #backward\n return f(lwr, [ -0.01451187, 10.92674105, -0.93514904, 0.87868538])\n else:\n #hmmm... TODO\n return 0.0", "def plot_ellipse(center, covariance, alpha, color):\n # eigenvalues and eigenvector of matrix covariance\n eigenvalues, eigenvector = np.linalg.eigh(covariance)\n order = eigenvalues.argsort()[::-1]\n eigenvector = eigenvector[:, order]\n\n # Calculate Angle of ellipse\n angle = np.degrees(np.arctan2(*eigenvector[:, 0][::-1]))\n\n # Calculate with, height\n width, height = 4 * np.sqrt(eigenvalues[order])\n\n # Ellipse Object\n ellipse = Ellipse(xy=center, width=width, height=height, angle=angle,\n alpha=alpha, color=color)\n\n ax = plt.gca()\n ax.add_artist(ellipse)\n\n return ellipse", "def add_ellipse(self, x, y, w, h, fill_style=None, outline_style=None):\n if w < 1 or h < 1:\n return\n fill_style = self.__prepare_style(fill_style, self.style.char)\n outline_style = self.__prepare_style(outline_style, 'o')\n # Bresenham's algorithm to plot ellipse is used\n a = w\n b = h - 1\n eight_a_square = 8 * a * a\n eight_b_square = 8 * b * b\n x_change = 4 * b * b * (1.0 - a)\n y_change = 4 * a * a * ((b & 1) + 1)\n ellipse_error = x_change + y_change + (b & 1) * a * a\n x0 = x\n x1 = x0 + w - 1\n y0 = y + h / 2\n y1 = y0 - (b & 1)\n outline_points = []\n while x0 <= x1:\n # add fill\n if x0 > x and x0 < x + w - 1:\n self.add_line(int(x0), int(y0), int(x0), int(y1), fill_style)\n self.add_line(int(x1), int(y0), int(x1), int(y1), fill_style)\n outline_points.append((int(x1), int(y0)))\n outline_points.append((int(x0), int(y0)))\n outline_points.append((int(x0), int(y1)))\n outline_points.append((int(x1), int(y1)))\n two_ellipse_error = 2 * ellipse_error\n if two_ellipse_error <= y_change:\n y0 += 1\n y1 -= 1\n y_change += eight_a_square\n ellipse_error += y_change\n if two_ellipse_error >= x_change or 2 * ellipse_error > y_change:\n x0 += 1\n x1 -= 1\n x_change += eight_b_square\n ellipse_error += x_change\n while y0 - y1 <= b:\n self.add_point(int(x0 - 1), int(y0), outline_style)\n self.add_point(int(x1 + 1), int(y0), outline_style)\n self.add_point(int(x0 - 1), int(y1), outline_style)\n self.add_point(int(x1 + 1), int(y1), outline_style)\n y0 += 1\n y1 -= 1\n # draw outline over fill\n for outline_point in outline_points:\n px, py = outline_point\n self.add_point(px, py, outline_style)", "def _getEllipseSize(self, pointInEllipse):\n x = abs(self.center[0] - pointInEllipse[0])\n y = abs(self.center[1] - pointInEllipse[1])\n if x == 0 or y == 0:\n return x, y\n # Ellipse definitions\n # e: eccentricity\n # a: length fron center to bounding box width\n # b: length fron center to bounding box height\n # Equations\n # (1) b < a\n # (2) For x,y a point in the ellipse: x^2/a^2 + y^2/b^2 = 1\n # (3) b = a * sqrt(1-e^2)\n # (4) e = sqrt(a^2 - b^2) / a\n\n # The eccentricity of the ellipse defined by a,b=x,y is the same\n # as the one we are searching for.\n swap = x < y\n if swap:\n x, y = y, x\n e = math.sqrt(x**2 - y**2) / x\n # From (2) using (3) to replace b\n # a^2 = x^2 + y^2 / (1-e^2)\n a = math.sqrt(x**2 + y**2 / (1.0 - e**2))\n b = a * math.sqrt(1 - e**2)\n if swap:\n a, b = b, a\n return a, b", "def tilted_ellipse(s, pos1, pos2, size_x, size_y, color, angle):\n surface = pygame.Surface((150, 150), pygame.SRCALPHA, 32).convert_alpha()\n ellipse(surface, color, (0, 0, size_x, size_y))\n surface2 = pygame.transform.rotate(surface, angle)\n return s.blit(surface2, (pos1, pos2))", "def ellipse(x,y,a,b):\n return ((x/float(a))**2 + (y/float(b))**2)", "def draw_ellipse_outline(center_x, center_y, width, height, color,\n border_width=1, tilt_angle=0):\n\n num_segments = 128\n\n GL.glEnable(GL.GL_BLEND)\n GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)\n GL.glEnable(GL.GL_LINE_SMOOTH)\n GL.glHint(GL.GL_LINE_SMOOTH_HINT, GL.GL_NICEST)\n GL.glHint(GL.GL_POLYGON_SMOOTH_HINT, GL.GL_NICEST)\n\n GL.glLoadIdentity()\n GL.glTranslatef(center_x, center_y, 0)\n GL.glRotatef(tilt_angle, 0, 0, 1)\n GL.glLineWidth(border_width)\n\n # Set color\n if len(color) == 4:\n GL.glColor4ub(color[0], color[1], color[2], color[3])\n elif len(color) == 3:\n GL.glColor4ub(color[0], color[1], color[2], 255)\n\n GL.glBegin(GL.GL_LINE_LOOP)\n for segment in range(num_segments):\n theta = 2.0 * 3.1415926 * segment / num_segments\n\n x = width * math.cos(theta)\n y = height * math.sin(theta)\n\n GL.glVertex3f(x, y, 0.5)\n\n GL.glEnd()\n GL.glLoadIdentity()", "def fit_ellipse(*args, equatorial_radius, dequatorial_radius=0, center_f=0, dcenter_f=0, center_g=0,\n dcenter_g=0, oblateness=0, doblateness=0, position_angle=0, dposition_angle=0,\n loop=10000000, number_chi=10000, dchi_min=None, verbose=False, ellipse_error=0, sigma_result=1):\n from sora.extra import ChiSquare\n from sora.config.visuals import progressbar_show\n from astropy.coordinates import Angle\n from .core import Occultation\n\n v = {'dcenter_f': dcenter_f, 'dcenter_g': dcenter_g, 'doblateness': doblateness, 'dposition_angle': dposition_angle,\n 'dequatorial_radius': dequatorial_radius, 'ellipse_error': ellipse_error, 'sigma_result': sigma_result,\n 'dchi_min': dchi_min}\n for key, item in v.items():\n if item is not None and item < 0:\n raise ValueError(\"{} must be a positive number.\".format(key))\n\n values = []\n chord_name = []\n if len(args) == 0:\n raise ValueError('No occultation have been given as input.')\n for occ in args:\n if not isinstance(occ, Occultation):\n raise TypeError('Given argument must be an Occultation object.')\n for name, chord in occ.chords.items():\n if chord.status() == 'positive':\n if chord.is_able['immersion']:\n f, g, vf, vg = chord.get_fg(time='immersion', vel=True)\n err = np.linalg.norm([vf, vg])*chord.lightcurve.immersion_err\n values.append([f, g, err])\n chord_name.append(name + '_immersion')\n if chord.is_able['emersion']:\n f, g, vf, vg = chord.get_fg(time='emersion', vel=True)\n err = np.linalg.norm([vf, vg])*chord.lightcurve.emersion_err\n values.append([f, g, err])\n chord_name.append(name + '_emersion')\n\n controle_f0 = Time.now()\n f0_chi = np.array([])\n g0_chi = np.array([])\n a_chi = np.array([])\n obla_chi = np.array([])\n posang_chi = np.array([])\n chi2_best = np.array([])\n\n while len(f0_chi) < number_chi:\n progressbar_show(len(f0_chi), number_chi, prefix='Ellipse fit:')\n chi2 = np.zeros(loop)\n f0 = center_f + dcenter_f*(2*np.random.random(loop) - 1)\n g0 = center_g + dcenter_g*(2*np.random.random(loop) - 1)\n a = equatorial_radius + dequatorial_radius*(2*np.random.random(loop) - 1)\n obla = oblateness + doblateness*(2*np.random.random(loop) - 1)\n obla[obla < 0], obla[obla > 1] = 0, 1\n phi_deg = position_angle + dposition_angle*(2*np.random.random(loop) - 1)\n controle_f1 = Time.now()\n\n for fi, gi, si in values:\n b = a - a*obla\n phi = phi_deg*(np.pi/180.0)\n dfi = fi-f0\n dgi = gi-g0\n theta = np.arctan2(dgi, dfi)\n ang = theta+phi\n r_model = (a*b)/np.sqrt((a*np.sin(ang))**2 + (b*np.cos(ang))**2)\n f_model = f0 + r_model*np.cos(theta)\n g_model = g0 + r_model*np.sin(theta)\n chi2 += ((fi - f_model)**2 + (gi - g_model)**2)/(si**2 + ellipse_error**2)\n\n controle_f2 = Time.now()\n if dchi_min is not None:\n region = np.where(chi2 < chi2.min() + dchi_min)[0]\n else:\n region = np.arange(len(chi2))\n chi2_best = np.append(chi2_best, chi2[region])\n if verbose:\n print('Elapsed time: {:.3f} seconds.'.format((controle_f2 - controle_f1).sec))\n print(len(chi2[region]), len(chi2_best))\n f0_chi = np.append(f0_chi, f0[region])\n g0_chi = np.append(g0_chi, g0[region])\n a_chi = np.append(a_chi, a[region])\n obla_chi = np.append(obla_chi, obla[region])\n posang_chi = np.append(posang_chi, phi_deg[region])\n\n progressbar_show(number_chi, number_chi, prefix='Ellipse fit:')\n chisquare = ChiSquare(chi2_best, len(values), center_f=f0_chi, center_g=g0_chi, equatorial_radius=a_chi,\n oblateness=obla_chi, position_angle=posang_chi)\n controle_f4 = Time.now()\n if verbose:\n print('Total elapsed time: {:.3f} seconds.'.format((controle_f4 - controle_f0).sec))\n\n result_sigma = chisquare.get_nsigma(sigma=sigma_result)\n a = result_sigma['equatorial_radius'][0]\n f0 = result_sigma['center_f'][0]\n g0 = result_sigma['center_g'][0]\n obla = result_sigma['oblateness'][0]\n phi_deg = result_sigma['position_angle'][0]\n radial_dispersion = np.array([])\n error_bar = np.array([])\n position_angle_point = np.array([])\n\n for fi, gi, si in values:\n b = a - a*obla\n phi = phi_deg*(np.pi/180.0)\n dfi = fi-f0\n dgi = gi-g0\n r = np.sqrt(dfi**2 + dgi**2)\n theta = np.arctan2(dgi, dfi)\n ang = theta+phi\n r_model = (a*b)/np.sqrt((a*np.sin(ang))**2 + (b*np.cos(ang))**2)\n radial_dispersion = np.append(radial_dispersion, r - r_model)\n error_bar = np.append(error_bar, si)\n position_angle_point = np.append(position_angle_point, Angle(90*u.deg - theta*u.rad).wrap_at(360 * u.deg).degree)\n\n for occ in args:\n if isinstance(occ, Occultation):\n occ.fitted_params = {i: result_sigma[i] for i in ['equatorial_radius', 'center_f', 'center_g',\n 'oblateness', 'position_angle']}\n occ.chi2_params = {'chord_name': chord_name, 'radial_dispersion': radial_dispersion,\n 'position_angle': position_angle_point, 'radial_error': error_bar,\n 'chi2_min': chisquare.get_nsigma(sigma=sigma_result)['chi2_min'],\n 'nparam': chisquare.nparam, 'npts': chisquare.npts}\n return chisquare", "def ellipse(self, arg, fill='', outline=''):\n pass", "def fitRectangle(self):\n \n #TODO MAKE SOMETHING MORE GENERIC!!\n \n fA, (fXg, fYg) = self.getArea_and_CenterOfMass()\n \n x1,y1, x2,y2 = self.getBoundingBox()\n #build a rectangle with same \"width\" as the polygon... is-it good enough??\n w = x2 - x1\n \n #but this width should not lead to go out of the bounding box!\n fW = min(w, (x2-fXg)*2, (fXg-x1)*2)\n \n #same area\n fH = fA / fW\n \n x1,y1, x2,y2 = [ int(round(v)) for v in [ fXg - fW/2.0, fYg - fH/2\n , fXg + fW/2.0, fYg + fH/2 ]]\n \n return x1,y1, x2,y2", "def get_ellipse_mask(ellipse, img_shape, offset=0):\n # create image\n mask = np.zeros((img_shape[0], img_shape[1]), dtype=np.uint8)\n\n try:\n # fill ellipse\n draw_ellipse(mask, ellipse, (255, 255, 255), -1)\n except Exception as ex:\n logging.getLogger(\"StrainDetection\").warning(\"Unable to create ellipse mask: {}\".format(ex))\n mask += 255 # make mask white to include everything\n return mask\n\n # dilate/erode by given offset, if necessary\n if offset != 0:\n operation = cv.MORPH_DILATE\n if offset < 0:\n operation = cv.MORPH_ERODE # if offset is negative --> erode\n\n # create kernel\n n = 2 * abs(offset) + 1\n kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (n, n))\n\n # perform morphological operation\n mask = cv.morphologyEx(mask, operation, kernel)\n\n return mask", "def ellipsoid(center, radii, rotation, scales=None, shape=None, minarea=0):\n center = np.array(center)\n radii = np.array(radii)\n rotation = np.array(rotation)\n assert center.shape == (3,)\n assert radii.shape == (3,)\n assert 0 < radii.max(), \"radii should contain at least one positive value\"\n assert rotation.shape == (3, 3)\n if scales is None:\n scales = (1.,) * 3\n scales = np.array(scales)\n assert scales.shape == (3,)\n\n scaled_center = center / scales\n\n # The upper_left_bottom and lower_right_top corners of the smallest cuboid\n # containing the ellipsoid.\n factor = np.array([\n [i, j, k] for k in (-1, 1) for j in (-1, 1) for i in (-1, 1)]).T\n while True:\n radii_rot = np.abs(\n np.diag(1. / scales).dot(rotation.dot(np.diag(radii).dot(factor)))\n ).max(axis=1)\n # In the original scikit-image code, ceil and floor were replaced.\n # https://github.com/scikit-image/scikit-image/blob/master/skimage/draw/draw.py#L127\n upper_left_bottom = np.floor(scaled_center - radii_rot).astype(int)\n lower_right_top = np.ceil(scaled_center + radii_rot).astype(int)\n\n if shape is not None:\n # Constrain upper_left and lower_ight by shape boundary.\n upper_left_bottom = np.maximum(\n upper_left_bottom, np.array([0, 0, 0]))\n lower_right_top = np.minimum(\n lower_right_top, np.array(shape[:3]) - 1)\n\n bounding_shape = lower_right_top - upper_left_bottom + 1\n\n d_lim, r_lim, c_lim = np.ogrid[0:float(bounding_shape[0]),\n 0:float(bounding_shape[1]),\n 0:float(bounding_shape[2])]\n d_org, r_org, c_org = scaled_center - upper_left_bottom\n d_rad, r_rad, c_rad = radii\n rotation_inv = np.linalg.inv(rotation)\n conversion_matrix = rotation_inv.dot(np.diag(scales))\n d, r, c = (d_lim - d_org), (r_lim - r_org), (c_lim - c_org)\n distances = (\n ((d * conversion_matrix[0, 0] +\n r * conversion_matrix[0, 1] +\n c * conversion_matrix[0, 2]) / d_rad) ** 2 +\n ((d * conversion_matrix[1, 0] +\n r * conversion_matrix[1, 1] +\n c * conversion_matrix[1, 2]) / r_rad) ** 2 +\n ((d * conversion_matrix[2, 0] +\n r * conversion_matrix[2, 1] +\n c * conversion_matrix[2, 2]) / c_rad) ** 2\n )\n if distances.size < minarea:\n old_radii = radii.copy()\n radii *= 1.1\n print('Increase radii from ({}) to ({})'.format(old_radii, radii))\n else:\n break\n distance_thresh = 1\n while True:\n dd, rr, cc = np.nonzero(distances < distance_thresh)\n if len(dd) < minarea:\n distance_thresh *= 1.1\n else:\n break\n dd.flags.writeable = True\n rr.flags.writeable = True\n cc.flags.writeable = True\n dd += upper_left_bottom[0]\n rr += upper_left_bottom[1]\n cc += upper_left_bottom[2]\n return dd, rr, cc", "def ci95_ellipse(data, type=\"pop\"):\n\n # Build and fit PCA model\n pca = PCA()\n pca.fit(data)\n coeff = pca.components_\n score = pca.transform(data)\n eigvals = pca.explained_variance_\n\n # Calculate rotation angle\n phi = math.atan2(coeff[0, 1], coeff[0, 0])\n\n # This angle is between -pi and pi.\n # Let's shift it such that the angle is between 0 and 2pi\n if phi < 0:\n phi += 2 * math.pi\n\n # Get the coordinates of the data mean\n n = len(data)\n m = np.mean(data, axis=0)\n x0 = m[0]\n y0 = m[1]\n\n # Get the 95% confidence interval error ellipse\n # inverse of the chi-square cumulative distribution for p = 0.05 & 2 d.f. = 5.9915\n chisquare_val = 5.9915\n if type is \"pop\":\n a = math.sqrt(chisquare_val * eigvals[0])\n b = math.sqrt(chisquare_val * eigvals[1])\n elif type is \"mean\":\n a = math.sqrt(chisquare_val * eigvals[0] / n)\n b = math.sqrt(chisquare_val * eigvals[1] / n)\n else:\n raise ValueError(\"type has to be 'pop' or 'mean'.\")\n\n # the ellipse in x and y coordinates\n theta_grid = np.linspace(0, 2 * math.pi, num=100)\n ellipse_x_r = a * np.cos(theta_grid)\n ellipse_y_r = b * np.sin(theta_grid)\n\n # Define a rotation matrix\n R = np.array([[np.cos(phi), np.sin(phi)], [-np.sin(phi), np.cos(phi)]])\n # let's rotate the ellipse to some angle phi\n r_ellipse = np.dot(np.vstack((ellipse_x_r, ellipse_y_r)).T, R)\n\n # Draw the error ellipse\n x = r_ellipse[:, 0] + x0\n y = r_ellipse[:, 1] + y0\n ellipse = np.stack((x, y), axis=1)\n\n outside = []\n for i in range(len(score)):\n metric = (score[i, 0] / a) ** 2 + (score[i, 1] / b) ** 2\n if metric > 1:\n outside.append(1)\n else:\n outside.append(0)\n\n return ellipse, outside", "def ellipse(radii = (10,5), angle_resolution = 2.5, layer = 0):\n D = Device(name = 'ellipse')\n a = radii[0]\n b = radii[1]\n t = np.linspace(0, 360, int(np.ceil(360/angle_resolution) + 1)) * pi/180\n r = a*b / (sqrt((b*cos(t))**2 + (a*sin(t))**2))\n xpts = r*cos(t)\n ypts = r*sin(t)\n D.add_polygon(points = (xpts, ypts), layer = layer)\n return D", "def ellipse2pathd(ellipse):\n\n cx = ellipse.get('cx', 0)\n cy = ellipse.get('cy', 0)\n rx = ellipse.get('rx', None)\n ry = ellipse.get('ry', None)\n r = ellipse.get('r', None)\n\n if r is not None:\n rx = ry = float(r)\n else:\n rx = float(rx)\n ry = float(ry)\n\n cx = float(cx)\n cy = float(cy)\n\n d = ''\n d += 'M' + str(cx - rx) + ',' + str(cy)\n d += 'a' + str(rx) + ',' + str(ry) + ' 0 1,0 ' + str(2 * rx) + ',0'\n d += 'a' + str(rx) + ',' + str(ry) + ' 0 1,0 ' + str(-2 * rx) + ',0'\n\n return d", "def ellipse(self, x, y, radiusx, radiusy, rotation=0, startangle=0, endangle=2 * pi, anticlockwise=False):\n self._impl.ellipse(x, y, radiusx, radiusy, rotation, startangle, endangle, anticlockwise)", "def fitEllipse(cnt):\n\t(x,y), (MA, ma), angle = cv2.fitEllipse(cnt)\n\treturn {\"center\":(x,y), \"major\":MA, \"minor\":ma, \"angle\":angle}", "def ellipse2pathd(ellipse):\n\n cx = ellipse.get('cx', None)\n cy = ellipse.get('cy', None)\n rx = ellipse.get('rx', None)\n ry = ellipse.get('ry', None)\n r = ellipse.get('r', None)\n\n if r is not None:\n rx = ry = float(r)\n else:\n rx = float(rx)\n ry = float(ry)\n\n cx = float(cx)\n cy = float(cy)\n\n d = ''\n d += 'M' + str(cx - rx) + ',' + str(cy)\n d += 'a' + str(rx) + ',' + str(ry) + ' 0 1,0 ' + str(2 * rx) + ',0'\n d += 'a' + str(rx) + ',' + str(ry) + ' 0 1,0 ' + str(-2 * rx) + ',0'\n\n return d", "def ellipse2pathd(ellipse):\n\n cx = ellipse.get('cx', None)\n cy = ellipse.get('cy', None)\n rx = ellipse.get('rx', None)\n ry = ellipse.get('ry', None)\n r = ellipse.get('r', None)\n\n if r is not None:\n rx = ry = float(r)\n else:\n rx = float(rx)\n ry = float(ry)\n\n cx = float(cx)\n cy = float(cy)\n\n d = ''\n d += 'M' + str(cx - rx) + ',' + str(cy)\n d += 'a' + str(rx) + ',' + str(ry) + ' 0 1,0 ' + str(2 * rx) + ',0'\n d += 'a' + str(rx) + ',' + str(ry) + ' 0 1,0 ' + str(-2 * rx) + ',0'\n\n return d", "def DrawEllipseRect(*args, **kwargs):\n return _gdi_.PseudoDC_DrawEllipseRect(*args, **kwargs)", "def draw_ellipse(self, color, position, size=None,\n border_width=0, anchor='topleft'):\n if size is None:\n rect = spyral.Rect(position)\n else:\n rect = spyral.Rect(position, size)\n offset = self._calculate_offset(anchor, rect.size)\n pygame.draw.ellipse(self._surf, color,\n (rect.pos + offset, rect.size), border_width)\n self._version += 1\n spyral.util.scale_surface.clear(self._surf)\n return self", "def get_eccentricity(self, ellipse):\r\n a = ellipse.get_width()\r\n b = ellipse.get_height()\r\n if b > a:\r\n a, b = b, a\r\n c = np.sqrt(a**2 - b**2)\r\n return fdiv(c, a)", "def r_ellipse(self,xc=None,yc=None):\n x = self.x\n y = self.y\n if xc == None:\n xc = self.x1\n if yc == None:\n yc = self.y1\n self.rel = sqrt(self.cxx*(x-xc)**2 +\n\t\t self.cyy*(y-yc)**2 +\n\t\t self.cxy*(x-xc)*(y-yc)\n\t\t )", "def draw_ellipse_filled(center_x, center_y,\n width, height, color, tilt_angle=0):\n\n num_segments = 128\n\n GL.glEnable(GL.GL_BLEND)\n GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)\n GL.glEnable(GL.GL_LINE_SMOOTH)\n GL.glHint(GL.GL_LINE_SMOOTH_HINT, GL.GL_NICEST)\n GL.glHint(GL.GL_POLYGON_SMOOTH_HINT, GL.GL_NICEST)\n\n GL.glLoadIdentity()\n GL.glTranslatef(center_x, center_y, 0)\n GL.glRotatef(tilt_angle, 0, 0, 1)\n\n # Set color\n if len(color) == 4:\n GL.glColor4ub(color[0], color[1], color[2], color[3])\n elif len(color) == 3:\n GL.glColor4ub(color[0], color[1], color[2], 255)\n\n GL.glBegin(GL.GL_TRIANGLE_FAN)\n\n GL.glVertex3f(0, 0, 0.5)\n\n for segment in range(num_segments + 1):\n theta = 2.0 * 3.1415926 * segment / num_segments\n\n x = width * math.cos(theta)\n y = height * math.sin(theta)\n\n GL.glVertex3f(x, y, 0.5)\n\n GL.glEnd()\n GL.glLoadIdentity()", "def cartesian_to_ellipse(center, angle, lengths):\n xInd, yInd = np.mgrid[:512, :512]\n major = max(lengths)/np.mean(lengths)\n minor = min(lengths)/np.mean(lengths)\n xInd, yInd = xInd - center[0], yInd - center[1]\n xInd, yInd = rotate(xInd, yInd, angle=-angle)\n xInd, yInd = xInd*minor, yInd*major\n xInd, yInd = rotate(xInd, yInd, angle=angle)\n return xInd, yInd", "def add_ellipse(self, centroid, length, width, angle, asymmetry=0.0,\n **kwargs):\n ellipse = Ellipse(xy=centroid, width=length, height=width,\n angle=np.degrees(angle), fill=False, **kwargs)\n self.axes.add_patch(ellipse)\n self.update()\n return ellipse", "def render_ellipse_filled(shape, center_x, center_y, color, angle=0):\n # Set color\n if len(color) == 4:\n GL.glColor4ub(shape.color[0], shape.color[1], shape.color[2],\n shape.color[3])\n GL.glEnable(GL.GL_BLEND)\n GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)\n elif len(color) == 3:\n GL.glDisable(GL.GL_BLEND)\n GL.glColor4ub(shape.color[0], shape.color[1], shape.color[2], 255)\n\n GL.glBindBuffer(GL.GL_ARRAY_BUFFER, shape.vbo_id)\n GL.glVertexPointer(2, GL.GL_FLOAT, 0, 0)\n\n GL.glLoadIdentity()\n GL.glTranslatef(center_x, center_y, 0)\n if angle:\n GL.glRotatef(angle, 0, 0, 1)\n\n GL.glDrawArrays(GL.GL_TRIANGLE_FAN, 0, shape.size)", "def draw_ellipse(mat, radius_x, radius_y, angle, color=(0, 0, 255), thickness=1):\n cv2.ellipse(mat, (radius_x, radius_y), angle, 0, 0, color, thickness=thickness)", "def box_ellipse(A, r):\n A = np.array(A)\n A = A.transpose().dot(A)\n size = len(A)\n widths = []\n for i in range(size):\n setperp = [k for k in range(i)] + [k for k in range(i + 1, size)]\n v = A[i, setperp]\n A22 = A[setperp][:, setperp]\n try:\n Aperpinv = np.linalg.inv(A22)\n gamma = Aperpinv.dot(v)\n gamma = gamma.dot(v)\n widths.append(r / np.sqrt(A[i, i] - gamma))\n except np.linalg.linalg.LinAlgError:\n widths.append(1.0e300)\n return widths", "def draw_ellipse(image, bounds, width=1, outline: Color = 'white', antialias=4):\n\n # Use a single channel image (mode='L') as mask.\n # The size of the mask can be increased relative to the imput image\n # to get smoother looking results.\n mask = Image.new(size=[int(dim * antialias) for dim in image.size], mode='L', color='black')\n draw = ImageDraw.Draw(mask)\n\n # draw outer shape in white (color) and inner shape in black (transparent)\n for offset, fill in (-7, 'white'), (width, 'black'):\n left, top = [(value + offset) * antialias for value in bounds[:2]]\n right, bottom = [(value - offset) * antialias for value in bounds[2:]]\n draw.ellipse([left, top, right, bottom], fill=fill)\n\n # downsample the mask using PIL.Image.LANCZOS\n # (a high-quality downsampling filter).\n mask = mask.resize(image.size, Image.LANCZOS)\n # paste outline color to input image through the mask\n image.paste(outline, mask=mask)", "def ellipse_pt(th, x_c, y_c, a, b, rot):\n x = x_c + (a * cos(th) * cos(rot) - b * sin(th) * sin(rot))\n y = y_c + (a * cos(th) * sin(rot) - b * sin(th) * cos(rot))\n return x, y", "def r_ellipse(radius=5, xc=[0., 0.], q=0.5, pa=0, re=1., gf_header=None, comp='2', verbose=True, nstep=256, psf_offset=[1, 1], **kwargs):\n if gf_header is not None:\n xx = gf_header_key(gf_header, comp+'_XC')\n yy = gf_header_key(gf_header, comp+'_YC')\n xc = np.array([xx, yy])\n mag = gf_header_key(gf_header, comp+'_MAG')\n\n if comp+'_N' in gf_header:\n n = gf_header_key(gf_header, comp+'_N')\n q = gf_header_key(gf_header, comp+'_AR')\n pa = gf_header_key(gf_header, comp+'_PA')\n re = gf_header_key(gf_header, comp+'_RE')\n else:\n n = 1.\n q = 1.\n pa = 0.\n re = 0.01\n\n if verbose:\n print(f'xc:{xc}, q:{q}, pa:{pa}')\n\n phi = np.linspace(0, 2*np.pi, nstep)\n xp = np.array([np.cos(phi), q*np.sin(phi)]).T*radius\n\n theta = -(np.pi/2 + pa/180*np.pi) # + np.pi\n\n _rot = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])\n\n x0 = xp.dot(_rot) + np.atleast_1d(xc)\n xell, yell = (x0 - 1 - np.atleast_1d(psf_offset)).T\n\n return xell, yell", "def drawEllipse(self,id,x,y,rx,ry):\n if id in self.elements.keys():\n print(\"The id for the ellipse has been registered! Please use another one\")\n return\n try:\n self.checkInBound(x,0); self.checkInBound(x+rx,0); self.checkInBound(x-rx,0)\n except AssertionError as e:\n print(\"The ellipse set would be longer than the x axis of canvas!\")\n return\n try:\n self.checkInBound(y,0); self.checkInBound(y+ry,0); self.checkInBound(y-ry,0)\n except AssertionError as e:\n print(\"The ellipse set would be longer than the y axis of canvas!\")\n return\n ellipseEle=Ellipse(id,self.h-1-y,x,ry,rx,self.drawColor)\n self.elements[id]=ellipseEle\n self.sync=False", "def fill_ellipse(self, x0, y0, a, b, color):\n a2 = a * a\n b2 = b * b\n twoa2 = a2 + a2\n twob2 = b2 + b2\n x = 0\n y = b\n px = 0\n py = twoa2 * y\n # Plot initial points\n self.line(x0, y0 - y, x0, y0 + y, color)\n # Region 1\n p = round(b2 - (a2 * b) + (0.25 * a2))\n while px < py:\n x += 1\n px += twob2\n if p < 0:\n p += b2 + px\n else:\n y -= 1\n py -= twoa2\n p += b2 + px - py\n self.line(x0 + x, y0 - y, x0 + x, y0 + y, color)\n self.line(x0 - x, y0 - y, x0 - x, y0 + y, color)\n # Region 2\n p = round(b2 * (x + 0.5) * (x + 0.5) +\n a2 * (y - 1) * (y - 1) - a2 * b2)\n while y > 0:\n y -= 1\n py -= twoa2\n if p > 0:\n p += a2 - py\n else:\n x += 1\n px += twob2\n p += a2 - py + px\n self.line(x0 + x, y0 - y, x0 + x, y0 + y, color)\n self.line(x0 - x, y0 - y, x0 - x, y0 + y, color)", "def in_ellipse(x,y,a,b):\n return ellipse(x,y,a,b) <= 1", "def scatter_ellipse(axis_ob, x, y, w, h, c='b', a=0.0, alpha=0.5):\r\n if not axis_ob._hold:\r\n axis_ob.cla()\r\n\r\n if not iterable(a):\r\n a = [a] * len(x)\r\n\r\n if not iterable(alpha):\r\n alpha = [alpha] * len(x)\r\n if len(c) != len(x):\r\n raise ValueError('c and x are not equal lengths')\r\n if len(w) != len(x):\r\n raise ValueError('w and x are not equal lengths')\r\n\r\n if len(h) != len(x):\r\n raise ValueError('h and x are not equal lengths')\r\n if len(a) != len(x):\r\n raise ValueError('a and x are not equal lengths')\r\n # if len(alpha)!=len(x):\r\n # raise ValueError, 'alpha and x are not equal lengths'\r\n patches = []\r\n for thisX, thisY, thisW, thisH, thisC, thisA, thisAl in \\\r\n zip(x, y, w, h, c, a, alpha):\r\n ellip = Ellipse((thisX, thisY), width=thisW, height=thisH,\r\n angle=thisA)\r\n\r\n ellip.set_facecolor(thisC)\r\n ellip.set_alpha(thisAl)\r\n axis_ob.add_patch(ellip)\r\n patches.append(ellip)\r\n axis_ob.autoscale_view()\r\n return axis_ob", "def ellipseToPath(self,node):\n cx = float(node['cx'])\n cy = float(node['cy'])\n rx = 0\n ry = 0\n if 'rx' in node:\n rx = float(node['rx'])\n if 'ry' in node:\n ry = float(node['ry'])\n\n d ='M %f,%f '%(cx-rx,cy)\n d+='A %f,%f 0 0 1 %f,%f'%(rx,ry,cx,cy-ry)\n d+='A %f,%f 0 0 1 %f,%f'%(rx,ry,cx+rx,cy)\n d+='A %f,%f 0 0 1 %f,%f'%(rx,ry,cx,cy+ry)\n d+='A %f,%f 0 0 1 %f,%f'%(rx,ry,cx-rx,cy)\n\n return d", "def smallest_ellipse(points, tol = 0.001):\n points = np.asmatrix(points)\n N, d = points.shape\n Q = np.column_stack((points, np.ones(N))).T\n err = tol+1.0\n u = np.ones(N)/N\n \n while err > tol:\n # assert u.sum() == 1 # invariant\n X = Q * np.diag(u) * Q.T\n M = np.diag(Q.T * la.inv(X) * Q)\n jdx = np.argmax(M)\n step_size = (M[jdx]-d-1.0)/((d+1)*(M[jdx]-1.0))\n new_u = (1-step_size)*u\n new_u[jdx] += step_size\n err = la.norm(new_u-u)\n u = new_u\n c = u*points\n A = la.inv(points.T*np.diag(u)*points - c.T*c)/d\n return np.asarray(A), np.squeeze(np.asarray(c))", "def create_ellipse(home_location, work_location, spread):\n\tif work_location is None:\n\t\treturn None\n\telse:\n\t\ta = home_location.distance(work_location)\n\t\tb = a * float(spread)\n\t\tpoint_list = []\n\t\tazimuth = math.atan2(work_location.y - home_location.y, work_location.x - home_location.x)\n\t\tro = (math.pi / 200)\n\n\t\tfor t in range(0, 401):\n\t\t\tx = home_location.x + (a * math.cos(t * ro) * math.cos(azimuth) - b * math.sin(t * ro) * math.sin(azimuth))\n\t\t\ty = home_location.y + (b * math.sin(t * ro) * math.cos(azimuth) + a * math.cos(t * ro) * math.sin(azimuth))\n\t\t\tpoint_list.append([Point(x, y).x, Point(x, y).y])\n\t\treturn Polygon(point_list)", "def create_ellipse(width, height, color):\n num_segments = 64\n\n data = []\n\n for segment in range(num_segments + 1):\n theta = 2.0 * 3.1415926 * segment / num_segments\n\n x = width * math.cos(theta)\n y = height * math.sin(theta)\n\n data.extend([x, y])\n\n vbo_id = GL.GLuint()\n\n GL.glGenBuffers(1, ctypes.pointer(vbo_id))\n\n v2f = data\n data2 = (GL.GLfloat * len(v2f))(*v2f)\n\n GL.glBindBuffer(GL.GL_ARRAY_BUFFER, vbo_id)\n GL.glBufferData(GL.GL_ARRAY_BUFFER, ctypes.sizeof(data2), data2,\n GL.GL_STATIC_DRAW)\n\n shape = VertexBuffer(vbo_id, len(v2f) // 2, width, height, color)\n return shape", "def getEllipticalKernel(size = (5,5)):\n\treturn cv2.getStructuringElement(cv2.MORPH_ELLIPSE, size)", "def draw_probe_ellipse(xy, covar, alpha, color=None, **kwargs):\n \n b24ac = scipy.sqrt(pow(covar[0,0] - covar[1,1],2) + 4*pow(covar[0,1],2))\n c2inv = chi2.ppf(alpha, 2.)\n \n a = scipy.real(scipy.sqrt(c2inv*.5*(covar[0,0] + covar[1,1] + b24ac)))\n b = scipy.real(scipy.sqrt(c2inv*.5*(covar[0,0] + covar[1,1] - b24ac)))\n\n if covar[0,0] != covar[1,1]:\n theta = .5*scipy.arctan(2*covar[0,1]/(covar[0,0] - covar[1,1]))\n print(theta)\n else:\n theta = scipy.sign(covar[0,1])*scipy.pi/4\n \n if covar[1,1] > covar[0,0]:\n swap = a\n a = b\n b = swap\n\n ellipse = Ellipse(xy, 2*a, 2*b, angle=theta*180./scipy.pi, edgecolor=color, fill=False, **kwargs)\n plt.gca().add_patch(ellipse)\n return ellipse", "def add_ellipse(self, centroid, length, width, angle, asymmetry=0.0, **kwargs):\n ellipse = Ellipse(\n x=centroid[0],\n y=centroid[1],\n width=length,\n height=width,\n angle=angle,\n fill_color=None,\n **kwargs,\n )\n glyph = self.figure.add_glyph(ellipse)\n self._annotations.append(glyph)\n self.update()\n return ellipse", "def construct_by_ellipse(a_xx, h_xy, b_yy, g_x, f_y, d, focal_length):\n gamma = - focal_length\n a = gamma**2 * a_xx\n b = gamma**2 * b_yy\n c = d\n d = gamma**2 * d\n f = -gamma*(f_y)\n g = -gamma*(g_x)\n h = gamma**2 * h_xy\n #Not needed\n u = gamma**2 * g_x\n v = gamma**2 * f_y\n w = -gamma*(d)\n return ConeCamera(a, b, c, f, g, h)", "def proc_filled_ellipse(self, tokens):\n\n return self._proc_ellipse(tokens, filled=True)", "def plot_cov_ellipse(ellipses, cov, pos=[0.0, 0.0], nstds=[0.0,1.0,2.0], **kwargs):\n def eigsorted(cov):\n vals, vecs = _np.linalg.eigh(cov)\n order = vals.argsort()[::-1]\n return vals[order], vecs[:,order]\n\n\n vals, vecs = eigsorted(cov)\n theta = _np.degrees(_np.arctan2(*vecs[:,0][::-1]))\n\n # Width and height are \"full\" widths, not radius\n sigma_max = 0.5\n alpha = min(0.8, _np.prod(sigma_max /_np.sqrt(vals)))\n for i,e in enumerate(ellipses):\n sigma = nstds[i]\n width, height = 2 * sigma * _np.sqrt(vals)\n #ellipses[i].center = pos\n e.set_alpha(alpha)\n if sigma > 0.1: #if this is below, then treat ellipse as a center circle and do not modify size at all\n e.width = width\n e.height= height\n e.angle = theta\n e.center = pos\n e.set(**kwargs)\n\n# e.fill=True\n# e.set_linewidth(0.0)\n\n\n return ellipses", "def draw_ellipse(self, color, position, size, border_width = 0, anchor= 'topleft'):\n # We'll try to make sure that everything is okay later\n \n color = spyral.color._determine(color)\n offset = self._calculate_offset(anchor, size)\n pygame.draw.ellipse(self._surf, color, (position + offset, size), border_width)", "def geom_ellipse(mean: np.array, cov: np.array, data: np.array=None, q: float=0.95, **kwargs):\n\n # Radius that covers q-fraction of white gaussian noise \n r = np.sqrt(stats.chi2.ppf(q=q, df=2))\n \n # Eigen-directions of a covariance matrix\n try:\n L, W = np.linalg.eigh(cov)\n except:\n return geom_path(aes(x = 'x', y = 'y'), data = DataFrame(columns=['x', 'y']))\n \n # Properly scaled eigen-directions\n W[0, :] = W[0, :] * r * np.sqrt(L[0]) \n W[1, :] = W[1, :] * r * np.sqrt(L[1]) \n \n theta = np.linspace(0, 2 * np.pi, 100)\n \n return geom_path(aes(x = 'x', y = 'y'), data = DataFrame()\n .assign(x = mean[0] + np.sin(theta) * W[0, 0] + np.cos(theta) * W[1, 0])\n .assign(y = mean[1] + np.sin(theta) * W[0, 1] + np.cos(theta) * W[1, 1]), **kwargs)", "def extractOblateEllipse(kperp,kpar,aniso):\n\n if aniso > 1.:\n #print(\"Swapping axis for oblate ellipse\")\n aniso = 1. / aniso\n\n # Define the eccentricity of the ellipse\n e = np.sqrt( 1. - aniso**2. )\n\n\n # the oblate surface area\n surface = 2. * np.pi * kperp**2. * ( 1. + ( (1. - e**2.) / e ) * np.arctanh(e) )\n\n return surface", "def getEllipse(self, xc, Sigma, nSigma=2):\n\n if nla.det(Sigma) == 0:\n return None\n\n w, v = nla.eig(Sigma)\n D = np.diag(w, 0)\n\n theta = np.linspace(0, 2*np.pi, 100, endpoint=True)\n circle = nSigma*np.vstack((np.cos(theta), np.sin(theta)))\n\n el = sla.sqrtm(D)\n el = el.dot(circle)\n el = v.dot(el)\n\n XY = xc + el\n\n return XY", "def _save_parameters(self):\n\n # eigenvectors are the coefficients of an ellipse in general form\n # a*x^2 + 2*b*x*y + c*y^2 + 2*d*x + 2*f*y + g = 0 [eqn. 15) from (**) or (***)\n a = self.coef[0, 0]\n b = self.coef[1, 0]/2.\n c = self.coef[2, 0]\n d = self.coef[3, 0]/2.\n f = self.coef[4, 0]/2.\n g = self.coef[5, 0]\n\n # finding center of ellipse [eqn.19 and 20] from (**)\n x0 = (c*d-b*f)/(b**2.-a*c)\n y0 = (a*f-b*d)/(b**2.-a*c)\n\n # Find the semi-axes lengths [eqn. 21 and 22] from (**)\n numerator = 2*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g)\n denominator1 = (b*b-a*c) * \\\n ((c-a)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\n denominator2 = (b*b-a*c) * \\\n ((a-c)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\n width = np.sqrt(numerator/denominator1)\n height = np.sqrt(numerator/denominator2)\n\n # angle of counterclockwise rotation of major-axis of ellipse to x-axis [eqn. 23] from (**)\n # or [eqn. 26] from (***).\n phi = .5*np.arctan((2.*b)/(a-c))\n\n self._center = [x0, y0]\n self._width = width\n self._height = height\n self._phi = phi", "def ellipse_bounds(P, level, n=100):\n # Round up to multiple of 2\n n += n % 2\n\n # Principal axes of ellipsoid\n eigval, eigvec = np.linalg.eig(P)\n eigvec *= np.sqrt(level / eigval)\n\n # set zero angle at maximum x\n angle = np.linspace(0, 2 * np.pi, n)[:, None]\n angle += np.arctan(eigvec[0, 1] / eigvec[0, 0])\n\n # Compute positions\n pos = np.cos(angle) * eigvec[:, 0] + np.sin(angle) * eigvec[:, 1]\n n /= 2\n\n # Return x-position (symmetric) and upper/lower bounds\n return pos[:n, 0], pos[:n, 1], pos[:n - 1:-1, 1]", "def test_ellipse_draw():\n with TestingCanvas():\n ellipse = visuals.Ellipse(pos=(0., 0.), radius=(0.4, 0.3),\n color=(0, 0, 1, 1))\n ellipse.transform = transforms.STTransform(scale=(2.0, 3.0))\n ellipse.draw()\n assert_image_equal(\"screenshot\", 'visuals/ellipse1.png')\n\n gloo.clear()\n ellipse = visuals.Ellipse(pos=(0., 0.), radius=(0.4, 0.3),\n color=(0, 0, 1, 1),\n border_color=(1, 0, 0, 1))\n ellipse.transform = transforms.STTransform(scale=(2.0, 3.0))\n ellipse.draw()\n assert_image_equal(\"screenshot\", 'visuals/ellipse2.png')\n\n gloo.clear()\n ellipse = visuals.Ellipse(pos=(0., 0.), radius=(0.4, 0.3),\n border_color=(1, 0, 0, 1))\n ellipse.transform = transforms.STTransform(scale=(2.0, 3.0))\n ellipse.draw()\n assert_image_equal(\"screenshot\", 'visuals/ellipse3.png')", "def draw_oval(display, coord, box_size, color, bg_color):\n left, top = coord\n half = int(box_size * 0.5)\n quarter = int(box_size * 0.25)\n pygame.draw.ellipse(display, color, (left, top + quarter, box_size, half))", "def fitEllipseDirect(points):\r\n x = points[:, 0]; y = points[:, 1];\r\n # Build design matrix\r\n D = np.vstack((x*x, x*y, y*y, x, y, np.ones(x.shape)))\r\n # Build scatter matrix\r\n S = D.dot(D.T)\r\n # Build constraint matrix\r\n C = np.zeros((6, 6))\r\n C[0, 2]= +2; C[1, 1]= -1; C[2, 0]= +2;\r\n # Solve generalised eigenvalue system C*a == l*S*a\r\n geval, gevec = linalg.eig(S, C)\r\n # Find the eigenvector with the only pozitive eigenvalue\r\n geval = np.real(geval)\r\n i = np.argmax((geval>0) * np.isfinite(geval))\r\n if not np.isfinite(geval[i]):\r\n raise linalg.LinAlgError(\r\n \"Eigenvalue calculation failed to return a valid answer.\" +\r\n \"\\nEigenvalues:\\n\" + str(geval) + '\\n')\r\n theVec = np.real(gevec[:, i])\r\n # That vector has the parameters of the ellipse\r\n return tuple(theVec.flatten())", "def draw_ellipse(position, covariance, ax=None, **kwargs):\r\n # Convert covariance to principal axes\r\n if covariance.shape == (2, 2):\r\n U, s, Vt = np.linalg.svd(covariance)\r\n angle = np.degrees(np.arctan2(U[1, 0], U[0, 0]))\r\n width, height = 2 * np.sqrt(s)\r\n else:\r\n angle = 0\r\n width, height = 2 * np.sqrt(covariance)\r\n \r\n # Draw the Ellipse\r\n for nsig in range(1, 4):\r\n ax.add_patch(Ellipse(position, nsig * width, nsig * height, \r\n angle, **kwargs))", "def run_ellipse(img, redo=False):\n # Reading data and mask\n outfile = \"ellipse.txt\"\n if os.path.exists(outfile) and not redo:\n return\n data = make_masked_img(img)\n # Preparing ellipse fitting\n\n geometry = EllipseGeometry(x0=213, y0=235, sma=25, eps=0.3,\n pa=np.deg2rad(-50))\n ellipse = Ellipse(data, geometry)\n isolist = ellipse.fit_image(fflag=0.01, maxsma=200, maxrit=104)\n # isolist = ellipse.fit_image(fflag=0.01, maxsma=20)\n table = isolist.to_table()[1:]\n table.write(outfile, format=\"ascii\", overwrite=True)\n # Producing image\n model_image = build_ellipse_model(data.shape, isolist)\n residual = data - model_image\n fig, (ax1, ax2, ax3) = plt.subplots(figsize=(14, 5), nrows=1, ncols=3)\n fig.subplots_adjust(left=0.04, right=0.98, bottom=0.02, top=0.98)\n ax1.imshow(data, origin='lower')\n ax1.set_title('Data')\n\n smas = np.linspace(5, 200, 10)\n for sma in smas:\n iso = isolist.get_closest(sma)\n x, y, = iso.sampled_coordinates()\n ax1.plot(x, y, color='C1')\n ax2.imshow(model_image, origin='lower')\n ax2.set_title('Ellipse Model')\n ax3.imshow(residual, origin='lower')\n ax3.set_title('Residual')\n plt.savefig(\"ellipse.png\", dpi=250)\n plt.show()", "def draw_ellipse(position, covariance, ax=None, **kwargs):\n ax = ax or plt.gca()\n # Convert covariance to principal axes\n if covariance.shape == (2, 2):\n U, s, Vt = np.linalg.svd(covariance)\n angle = np.degrees(np.arctan2(U[1, 0], U[0, 0]))\n width, height = 2 * np.sqrt(s)\n else:\n angle = 0\n width, height = 2 * np.sqrt(covariance)\n \n # Draw the Ellipse\n for nsig in range(1, 4):\n ax.add_patch(Ellipse(position, nsig * width, nsig * height,\n angle, **kwargs))", "def ellipse_orientation(S):\n return 1/2 * np.arctan2(S[..., 2], S[..., 1])", "def draw_ellipse(position, covariance, ax=None, **kwargs):\n ax = ax or plt.gca()\n \n # Convert covariance to principal axes\n if covariance.shape == (2, 2):\n U, s, Vt = np.linalg.svd(covariance)\n angle = np.degrees(np.arctan2(U[1, 0], U[0, 0]))\n width, height = 2 * np.sqrt(s)\n else:\n angle = 0\n width, height = 2 * np.sqrt(covariance)\n \n # Draw the Ellipse\n for nsig in range(1, 4):\n ax.add_patch(Ellipse(position, nsig * width, nsig * height,\n angle, **kwargs))", "def proc_unfilled_ellipse(self, tokens):\n\n return self._proc_ellipse(tokens, filled=False)", "def transform(self, heraldry: Image.Image) -> Image.Image:\n heraldry = heraldry.convert('RGBA')\n orig_width, orig_height = heraldry.size\n\n new_height = int(self.height_change * orig_height)\n ell_img = Image.new('RGBA', (orig_width, new_height))\n draw = ImageDraw.Draw(ell_img)\n\n top_left = (0, int(self.ell_start * orig_height))\n bot_right = (orig_width - 1, new_height - 1)\n draw.ellipse((*top_left, *bot_right), fill = self.fill_col)\n ell_img.paste(heraldry, (0, 0), heraldry)\n\n return ell_img", "def __DrawEllipse(self, image, rectangule, color):\n cv2.ellipse(image, rectangule, color, 2)\n points = cv2.boxPoints(rectangule)\n for i in range(4):\n cv2.line(image, tuple(np.array(points[i], np.int32)),\n tuple(np.array(points[(i + 1) % 4], np.int32)), color, 2)", "def area_ellipse(radius_x: float, radius_y: float) -> float:\r\n if radius_x < 0 or radius_y < 0:\r\n raise ValueError(\"area_ellipse() only accepts non-negative values\")\r\n return pi * radius_x * radius_y", "def getMinVolEllipse(P, tolerance=0.01):\n (N, d) = np.shape(P)\n d = float(d)\n\n # Q will be our working array\n Q = np.vstack([np.copy(P.T), np.ones(N)]) \n QT = Q.T\n \n # initializations\n err = 1.0 + tolerance\n u = (1.0 / N) * np.ones(N)\n\n # Khachiyan Algorithm\n while err > tolerance:\n V = np.dot(Q, np.dot(np.diag(u), QT))\n M = np.diag(np.dot(QT , np.dot(linalg.inv(V), Q))) # M the diagonal vector of an NxN matrix\n j = np.argmax(M)\n maximum = M[j]\n step_size = (maximum - d - 1.0) / ((d + 1.0) * (maximum - 1.0))\n new_u = (1.0 - step_size) * u\n new_u[j] += step_size\n err = np.linalg.norm(new_u - u)\n u = new_u\n\n # center of the ellipse \n center = np.dot(P.T, u)\n\n # the A matrix for the ellipse\n A = linalg.inv(\n np.dot(P.T, np.dot(np.diag(u), P)) - \n np.array([[a * b for b in center] for a in center])\n ) / d\n \n # Get the values we'd like to return\n U, s, rotation = linalg.svd(A)\n radii = 1.0/np.sqrt(s)\n\n rot_err = linalg.norm(np.identity(3)-abs(rotation))\n if(rot_err > 0.05):\n \tradii = np.array([radii[1],radii[0],radii[2]])\n return radii", "def draw_ellipse(position, covariance, ax=None, **kwargs):\n ax = ax or plt.gca()\n\n # Convert covariance to principal axes\n if covariance.shape == (2, 2):\n U, s, Vt = np.linalg.svd(covariance)\n angle = np.degrees(np.arctan2(U[1, 0], U[0, 0]))\n width, height = 2 * np.sqrt(s)\n else:\n angle = 0\n width, height = 2 * np.sqrt(covariance)\n\n # Draw the Ellipse\n for nsig in range(1, 4):\n ax.add_patch(Ellipse(position, nsig * width, nsig * height,\n angle, **kwargs))", "def draw_ellipse(frame, coordinate, line_color=(124, 0, 0), radius=1, normalized=True):\n\n x1 = coordinate[0]\n y1 = coordinate[1]\n\n if normalized:\n h = frame.shape[0]\n w = frame.shape[1]\n\n x1 = int(x1 * w)\n y1 = int(y1 * h)\n\n cv.circle(frame, (x1, y1), radius=radius, color=line_color, thickness=1)", "def draw_ellipse(position, covariance, ax=None, num_contours=5, **kwargs):\n ax = ax or plt.gca()\n \n # Convert covariance to principal axes\n U, s, Vt = np.linalg.svd(covariance)\n angle = np.degrees(np.arctan2(U[1, 0], U[0, 0]))\n width, height = 2 * np.sqrt(s)\n \n # Draw the Ellipse\n for nsig in range(1, num_contours):\n ax.add_patch(Ellipse(position, nsig * width, nsig * height,\n angle, **kwargs))", "def get_ellipse_coords(a=0.0, b=0.0, x=0.0, y=0.0, angle=0.0, k=2):\n pts = np.zeros((360*k+1, 2))\n\n beta = -angle * np.pi/180.0\n sin_beta = np.sin(beta)\n cos_beta = np.cos(beta)\n alpha = np.radians(np.r_[0.:360.:1j*(360*k+1)])\n \n sin_alpha = np.sin(alpha)\n cos_alpha = np.cos(alpha)\n \n pts[:, 0] = x + (a * cos_alpha * cos_beta - b * sin_alpha * sin_beta)\n pts[:, 1] = y + (a * cos_alpha * sin_beta + b * sin_alpha * cos_beta)\n\n return pts", "def ellipse_to_rectangle(n, locations):\n major_axis_radius, minor_axis_radius, angle, center_x, center_y = locations\n pt = []\n for i in range(n):\n pt1 = (int(center_x[i]) - int(minor_axis_radius[i]), int(center_y[i]) - int(major_axis_radius[i]))\n pt2 = (int(center_x[i]) + int(minor_axis_radius[i]), int(center_y[i]) + int(major_axis_radius[i]))\n pt.append([pt1, pt2])\n return pt", "def get_ellipse_coords(a=0.0, b=0.0, x=0.0, y=0.0, angle=0.0, k=2):\n pts = np.zeros((360*k+1, 2))\n\n beta = -angle * np.pi/180.0\n sin_beta = np.sin(beta)\n cos_beta = np.cos(beta)\n alpha = np.radians(np.r_[0.:360.:1j*(360*k+1)])\n\n sin_alpha = np.sin(alpha)\n cos_alpha = np.cos(alpha)\n\n pts[:, 0] = x + (a * cos_alpha * cos_beta - b * sin_alpha * sin_beta)\n pts[:, 1] = y + (a * cos_alpha * sin_beta + b * sin_alpha * cos_beta)\n\n return pts", "def calcualte_ellipse_radii(guess, eccentricity = 0, perimeter = 2 * np.pi*1):\n return fsolve(ellipse_radii_test, guess, args = (eccentricity, perimeter))", "def DrawEllipse(*args, **kwargs):\n return _gdi_.DC_DrawEllipse(*args, **kwargs)", "def get_cov_ellipse(cov, centre, nstd, **kwargs):\n #WZN\n\n # Find and sort eigenvalues and eigenvectors into descending order\n eigvals, eigvecs = np.linalg.eigh(cov)\n order = eigvals.argsort()[::-1]\n eigvals, eigvecs = eigvals[order], eigvecs[:, order]\n\n # The anti-clockwise angle to rotate our ellipse by \n vx, vy = eigvecs[:,0][0], eigvecs[:,0][1]\n theta = np.arctan2(vy, vx)\n\n # Width and height of ellipse to draw\n width, height = 2 * nstd * np.sqrt(eigvals)\n return Ellipse(xy=centre, width=width, height=height,\n angle=np.degrees(theta), **kwargs)", "def DrawEllipsePointSize(*args, **kwargs):\n return _gdi_.DC_DrawEllipsePointSize(*args, **kwargs)", "def visualize_result(img, ellipse, radii=None, angles=None, reference_ellipse=None, strain=None, marker_size=8,\n title=\"\"):\n if img is None or ellipse is None:\n return img\n\n img = np.copy(img) # copy so we don't alter the original\n\n blue = (255, 0, 0)\n green = (0, 200, 0)\n red = (0, 0, 255)\n black = (0, 0, 0)\n strain_colors = ((0, 127, 255), (127, 0, 255))\n\n # draw the reference ellipse\n if reference_ellipse is not None:\n draw_ellipse(img, reference_ellipse, green, 1, True, 1)\n\n # draw the fitted ellipse\n draw_ellipse(img, ellipse, blue, 2, True, 1)\n\n # draw radial strain\n if radii is not None and angles is not None:\n assert len(radii) == len(angles)\n # duplicate colors to make sure we have enough for each radius\n strain_colors = strain_colors * int(np.ceil(len(radii) / len(strain_colors)))\n center, diam, angle = ellipse\n for r, a, c in zip(radii, angles, strain_colors):\n p1, p2 = draw_diameter(img, center, r, a, c, 1)\n draw_cross(img, p1, marker_size, a, c, 2)\n draw_cross(img, p2, marker_size, a, c, 2)\n\n # draw text\n font = cv.FONT_HERSHEY_SIMPLEX\n scale = 1.5\n margin = 20\n cv.putText(img, title, (margin, 60), font, scale, black, 3)\n if reference_ellipse is not None:\n cv.putText(img, \"Detected ellipse\", (margin, 120), font, scale, blue, 2)\n cv.putText(img, \"Reference ellipse\", (margin, 180), font, scale, green, 2)\n\n if strain is not None:\n cv.putText(img, \"Strain: {:.1f} %\".format(strain), (margin, 240), font, scale, black, 2)\n\n return img", "def confidence_ellipse(x, y, ax, n_std=3.0, facecolor='none', **kwargs):\n if x.size != y.size:\n raise ValueError(\"x and y must be the same size\")\n\n cov = np.cov(x, y)\n pearson = cov[0, 1]/np.sqrt(cov[0, 0] * cov[1, 1])\n # Using a special case to obtain the eigenvalues of this\n # two-dimensionl dataset.\n ell_radius_x = np.sqrt(1 + pearson)\n ell_radius_y = np.sqrt(1 - pearson)\n ellipse = Ellipse((0, 0), width=ell_radius_x * 2, height=ell_radius_y * 2,\n facecolor=facecolor, **kwargs)\n\n # Calculating the stdandard deviation of x from\n # the squareroot of the variance and multiplying\n # with the given number of standard deviations.\n scale_x = np.sqrt(cov[0, 0]) * n_std\n mean_x = np.mean(x)\n\n # calculating the stdandard deviation of y ...\n scale_y = np.sqrt(cov[1, 1]) * n_std\n mean_y = np.mean(y)\n\n transf = transforms.Affine2D() \\\n .rotate_deg(45) \\\n .scale(scale_x, scale_y) \\\n .translate(mean_x, mean_y)\n\n ellipse.set_transform(transf + ax.transData)\n return ax.add_patch(ellipse)", "def confidence_ellipse(x, y, ax, n_std=3.0, facecolor='none', **kwargs):\n if x.size != y.size:\n raise ValueError(\"x and y must be the same size\")\n\n cov = np.cov(x, y)\n pearson = cov[0, 1]/np.sqrt(cov[0, 0] * cov[1, 1])\n # Using a special case to obtain the eigenvalues of this\n # two-dimensionl dataset.\n ell_radius_x = np.sqrt(1 + pearson)\n ell_radius_y = np.sqrt(1 - pearson)\n ellipse = Ellipse((0, 0), width=ell_radius_x * 2, height=ell_radius_y * 2,\n facecolor=facecolor, **kwargs)\n\n # Calculating the stdandard deviation of x from\n # the squareroot of the variance and multiplying\n # with the given number of standard deviations.\n scale_x = np.sqrt(cov[0, 0]) * n_std\n mean_x = np.mean(x)\n\n # calculating the stdandard deviation of y ...\n scale_y = np.sqrt(cov[1, 1]) * n_std\n mean_y = np.mean(y)\n\n transf = transforms.Affine2D() \\\n .rotate_deg(45) \\\n .scale(scale_x, scale_y) \\\n .translate(mean_x, mean_y)\n\n ellipse.set_transform(transf + ax.transData)\n return ax.add_patch(ellipse)", "def confidence_ellipse(x, y, ax, n_std=3.0, facecolor='none', **kwargs):\n if x.size != y.size:\n raise ValueError(\"x and y must be the same size\")\n\n cov = np.cov(x, y)\n pearson = cov[0, 1]/np.sqrt(cov[0, 0] * cov[1, 1])\n # Using a special case to obtain the eigenvalues of this\n # two-dimensionl dataset.\n ell_radius_x = np.sqrt(1 + pearson)\n ell_radius_y = np.sqrt(1 - pearson)\n ellipse = Ellipse((0, 0), width=ell_radius_x * 2, height=ell_radius_y * 2,\n facecolor=facecolor, **kwargs)\n\n # Calculating the stdandard deviation of x from\n # the squareroot of the variance and multiplying\n # with the given number of standard deviations.\n scale_x = np.sqrt(cov[0, 0]) * n_std\n mean_x = np.mean(x)\n\n # calculating the stdandard deviation of y ...\n scale_y = np.sqrt(cov[1, 1]) * n_std\n mean_y = np.mean(y)\n\n transf = transforms.Affine2D() \\\n .rotate_deg(45) \\\n .scale(scale_x, scale_y) \\\n .translate(mean_x, mean_y)\n\n ellipse.set_transform(transf + ax.transData)\n return ax.add_patch(ellipse)", "def minimum_rotated_rectangle(self): # -> BaseGeometry:\n ...", "def DrawEllipse(*args, **kwargs):\n return _gdi_.GraphicsContext_DrawEllipse(*args, **kwargs)", "def generate_ellipse(R1,R2,center,theta,N=100):\r\n t = np.linspace(0.0,2.0*np.pi,N)\r\n x = R1*np.cos(t)*np.cos(theta) - R2*np.sin(t)*np.sin(theta) + center[0]\r\n y = R1*np.cos(t)*np.sin(theta) + R2*np.sin(t)*np.cos(theta) + center[1]\r\n return x,y", "def constructByParamEllipse(x_center, y_center, maj, min, rot, radius_3d_circle, focal_length=1):\n e = ImpEllipse.construct_by_param(x_center, y_center, maj, min, rot)\n\n return Double3DCircle.construct_by_ImpEllipse(e, radius_3d_circle, focal_length)", "def determine_bounding_box_of_rotated_box(self, box, rotation_matrix):\n\n # top left, top right, bottom left, bottom right\n p1, p2, p3, p4 = box_points(box)\n\n # rotate all the points of the box\n tp1 = calc_rotate_point_with_rotation_matrix(p1, rotation_matrix)\n tp2 = calc_rotate_point_with_rotation_matrix(p2, rotation_matrix)\n tp3 = calc_rotate_point_with_rotation_matrix(p3, rotation_matrix)\n tp4 = calc_rotate_point_with_rotation_matrix(p4, rotation_matrix)\n\n # figure out which point has the furthest x distance, and the furthest y distance\n dx1 = abs(tp1[0] - tp4[0])\n dx2 = abs(tp2[0] - tp3[0])\n dy1 = abs(tp1[1] - tp4[1])\n dy2 = abs(tp2[1] - tp3[1])\n # the width and the height is the max distance between x and y\n w, h = max(dx1, dx2), max(dy1, dy2)\n\n # x and y is the min x, and min y among all points\n x = min(tp1[0], tp2[0], tp3[0], tp4[0])\n y = min(tp1[1], tp2[1], tp3[1], tp4[1])\n\n return (x, y, w, h)", "def get_exterior(self, x, y, x1, x2, bottom, head_y):\n fx1 = x+(x-x1)*8\n fx2 = x+(x-x2)*8\n # compute bounding ellipse; and intersection with body outline\n cv2.ellipse(self.ellipse_finder, ((x/mscale,y/mscale), ((fx1-fx2)/mscale, (2*(bottom-head_y))/mscale), 0), 255,-1 )\n intersection = np.bitwise_and(255-self.ellipse_finder, self.median_finder)\n # find external blobs\n im2, out_contours, out_hierarchy = cv2.findContours(intersection,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n return out_contours, out_hierarchy, fx1-fx2", "def _gen_ellipse(twiss, ep=1, num=100):\n a, b, c = twiss\n\n t = np.linspace(0, 2 * np.pi, num)\n t0 = np.arctan(a)\n x = np.sqrt(b * ep) * np.cos(t)\n y = np.sqrt(c * ep) * np.sin(t - t0)\n\n return np.vstack([x, y])", "def eclipse_compensate(aLocation):\n EQUATOR_RADIUS = 6378137.0 # equator radius, or \"spherical\" earth\n POLAR_RADIUS = 6356725.0 # ploar radius\n EP_DIFF = EQUATOR_RADIUS - POLAR_RADIUS # rad-diff between equator and pole\n \n # assuming linear slope from equator to pole\n r_center = POLAR_RADIUS + EP_DIFF * (1.0 - abs(aLocation.lat)/90) # the ring thru earth center\n r_level = r_center * math.cos(math.radians(aLocation.lat)) # the ring thru latitude level\n \n return [r_center, r_level]" ]
[ "0.72764295", "0.65389484", "0.6258115", "0.6085874", "0.6078846", "0.59491014", "0.5820374", "0.5784566", "0.561725", "0.55927783", "0.5559429", "0.5536257", "0.5470644", "0.5456046", "0.54508764", "0.54230434", "0.5402299", "0.53830856", "0.5359062", "0.53571004", "0.53560513", "0.53382266", "0.5328916", "0.5314603", "0.5278285", "0.526421", "0.5252613", "0.5245298", "0.5244697", "0.5243301", "0.5242532", "0.5242532", "0.5236282", "0.5232601", "0.52291036", "0.51700604", "0.51643294", "0.5152434", "0.5149357", "0.5147671", "0.5136441", "0.5118137", "0.5099325", "0.5098919", "0.50835234", "0.50798863", "0.5063783", "0.50600314", "0.50450844", "0.5044488", "0.502462", "0.50230616", "0.5020567", "0.50108546", "0.5007209", "0.5006757", "0.4990369", "0.4988743", "0.49806115", "0.49620646", "0.4955084", "0.49350673", "0.49310857", "0.49072263", "0.49036583", "0.49018162", "0.48816252", "0.48798743", "0.4878113", "0.486852", "0.48643646", "0.4854467", "0.48396644", "0.48237053", "0.48216644", "0.48196954", "0.48155782", "0.47958115", "0.478994", "0.477515", "0.4756133", "0.4749394", "0.47141978", "0.47078654", "0.47029993", "0.47008288", "0.47006717", "0.4695882", "0.46900976", "0.4689779", "0.4689779", "0.4689779", "0.46845865", "0.46709248", "0.4670893", "0.46501812", "0.4625324", "0.46126327", "0.46106678", "0.46102318" ]
0.5605659
9