file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
polynom.py |
else:
return NotImplemented
def __hash__(self):
return hash((self.v, self.n))
def __bool__(self):
return bool(self.v)
def __add__(a, b):
assert isinstance(b, ModInt)
assert a.n == b.n
return ModInt(a.v + b.v, a.n)
def __radd__(a, b):
assert isinstance(b, int)
return ModInt(a.v + b, a.n)
def __neg__(a): return ModInt(-a.v, a.n)
def __sub__(a, b): return ModInt(a.v - b.v, a.n)
def __mul__(a, b):
if isinstance(b, int):
return ModInt(b * a.v, a.n)
elif isinstance(b, ModInt):
assert a.n == b.n
return ModInt(a.v * b.v, a.n)
return NotImplemented
def __rmul__(a, b):
return a * b
def __pow__(P, k):
assert isinstance(k, int)
V = 1
A = P
while k:
if k & 1:
V *= A
k >>= 1
if not k:
break
A *= A
return V
def inv(self):
if self.v == 0:
raise ZeroDivisionError
return ModInt(ModInt._inv(self.v, self.n), self.n)
@staticmethod
def _inv(k, n):
k %= n
if k == 1:
return k
return (n - n // k) * ModInt._inv(n % k, n) % n
def __truediv__(a, b):
assert isinstance(b, ModInt)
assert a.n == b.n
return a * b.inv()
def __rtruediv__(a, k):
assert isinstance(k, int)
return ModInt(k, a.n) / a
@staticmethod
def extended_euclid(a, b):
"""Extended Euclid algorithm
Return
------
x : int
y : int
a * x + b * y = gcd(a, b)
"""
A, B = a, b
sa, sb = (1 if a >= 0 else -1), (1 if b >= 0 else -1)
xp, yp = 1, 0
x, y = 0, 1
while b:
assert A * xp + B * yp == a
assert A * x + B * y == b
r = a // b
a, b = b, a % b
x, xp = xp - r * x, x
y, yp = yp - r * y, y
return sa * xp, sb * yp
def __repr__(self):
return '%s(%s, %s)' % (self.__class__.__name__, self.v, self.n)
def __str__(self):
return '%s' % self.v
class Polynomial:
"""
Generic class for polynomials
Works with int, float and ModInt
"""
def __len__(self):
return len(self.C)
def trim(C):
i = len(C) - 1
while i >= 0 and not C[i]:
i -= 1
return C[:i + 1]
def __init__(self, C=None):
if C is None:
C = []
self.C = Polynomial.trim(C)
@property
def deg(self):
return len(self.C) - 1
def prime(self): return Polynomial([i * self[i]
for i in range(1, len(self))])
def eval(self, x):
if not self:
return 0
v = self[-1]
for c in self[-2::-1]:
v = v * x + c
return v
def shift(self, d): return Polynomial(
[0 * self[0]] * d + self.C if self else [])
def __eq__(P, Q):
return P.deg == Q.deg and all(cP == cQ for cP, cQ in zip(P, Q))
def __hash__(self):
return hash(tuple(self.C))
def __call__(self, x): return Polynomial.eval(self, x)
def __getitem__(self, x): return self.C[x]
def __neg__(P): return Polynomial([-c for c in P.C])
def __add__(P, Q):
if len(P.C) < len(Q.C):
P, Q = Q, P
return Polynomial([P[d] + Q[d] for d in range(len(Q))] + P[len(Q):])
def __sub__(P, Q): return P + (-Q)
def _mulpoly(P, Q):
assert isinstance(Q, Polynomial)
return Polynomial([sum(P[k] * Q[d - k]
for k in range(max(0, d + 1 - len(Q)),
min(d + 1, len(P)))
) for d in range(len(P) + len(Q) - 1)])
def _mulscal(P, k):
return Polynomial([k * c for c in P])
def __mul__(P, Q):
if isinstance(Q, Polynomial):
return P._mulpoly(Q)
return P._mulscal(Q)
def __rmul__(P, Q):
return P * Q
def __pow__(P, k):
assert isinstance(k, int)
V = 1
A = P
while k:
if k & 1:
V *= A
k >>= 1
if not k:
break
A *= A
return V
def __iter__(self):
yield from self.C
def euclidean_division(A, B):
Q = [0 * B[0]] * max(0, len(A) - len(B) + 1)
while len(A.C) >= len(B.C):
Q[len(A.C) - len(B.C)] = A[-1] / B[-1]
A -= B.shift(len(A) - len(B)) * (A[-1] / B[-1])
return Polynomial(Q), A
def __floordiv__(A, B):
assert isinstance(B, Polynomial)
return A.euclidean_division(B)[0]
def __mod__(A, B):
"""
Polynomial euclidian division
or modular reduction
"""
if isinstance(B, Polynomial):
return A.euclidean_division(B)[1]
else:
assert isinstance(B, int)
assert all(isinstance(c, int) for c in A)
return A.reduceP(B)
def __lt__(A, B): return A.deg < B.deg
def __bool__(self): return bool(self.C)
def gcd(A, B):
while B:
A, B = B, A % B
return A * (1 / A[-1])
@staticmethod
def gaussianElimKer(M, zero, one):
"""
Outputs an element of the kernel of M
zero and one are elements of the same field
"""
# V satisfies the invariant
# M = V M_0
V = [Polynomial([zero] * i + [one]) for i in range(len(M))]
pivots = [None] * (len(M) + 1)
for l in range(len(M)):
while M[l].deg >= 0:
idp = M[l].deg
if pivots[idp] is None:
pivots[idp] = l
break
else:
c = M[l][idp] / M[pivots[idp]][idp]
M[l] -= c * M[pivots[idp]]
V[l] -= c * V[pivots[idp]]
else:
# If a line is null, we found an element of the kernel
return V[l]
return None
def computeQ(P):
# only for Z/pZ[X] square-free polynoms, for p prime
p = P[0].n
# We ignore the image of 1 because (F-Id)(1) = 0
M = [Polynomial(([ModInt(0, p)] * (i * p)) + [ModInt(1, p)]) % P
for i in range(1, P.deg)]
# M -= Id
for i in range(1, P.deg):
M[i - 1] -= Polynomial([ModInt(0, p)] * i + [ModInt(1, p)])
# We find an element of the kernel by Gaussian elimination
pQ = Polynomial.gaussianElimKer(M, ModInt(0, p), ModInt(1, p))
# We put back the 1 tha was removed
return pQ.shift(1) if pQ is not None else None
def factor_unit(P):
"""
Berlekamp's algorithm
only in Z/pZ
"""
assert all(isinstance(c, ModInt) for c in P)
assert len(set(c.n for c in P)) == 1
if P | return not bool(a - b) | conditional_block |
|
polynom.py | :
raise ZeroDivisionError
return ModInt(ModInt._inv(self.v, self.n), self.n)
@staticmethod
def _inv(k, n):
k %= n
if k == 1:
return k
return (n - n // k) * ModInt._inv(n % k, n) % n
def __truediv__(a, b):
assert isinstance(b, ModInt)
assert a.n == b.n
return a * b.inv()
def __rtruediv__(a, k):
assert isinstance(k, int)
return ModInt(k, a.n) / a
@staticmethod
def extended_euclid(a, b):
"""Extended Euclid algorithm
Return
------
x : int
y : int
a * x + b * y = gcd(a, b)
"""
A, B = a, b
sa, sb = (1 if a >= 0 else -1), (1 if b >= 0 else -1)
xp, yp = 1, 0
x, y = 0, 1
while b:
assert A * xp + B * yp == a
assert A * x + B * y == b
r = a // b
a, b = b, a % b
x, xp = xp - r * x, x
y, yp = yp - r * y, y
return sa * xp, sb * yp
def __repr__(self):
return '%s(%s, %s)' % (self.__class__.__name__, self.v, self.n)
def __str__(self):
return '%s' % self.v
class Polynomial:
"""
Generic class for polynomials
Works with int, float and ModInt
"""
def __len__(self):
return len(self.C)
def trim(C):
i = len(C) - 1
while i >= 0 and not C[i]:
i -= 1
return C[:i + 1]
def __init__(self, C=None):
if C is None:
C = []
self.C = Polynomial.trim(C)
@property
def deg(self):
return len(self.C) - 1
def prime(self): return Polynomial([i * self[i]
for i in range(1, len(self))])
def eval(self, x):
if not self:
return 0
v = self[-1]
for c in self[-2::-1]:
v = v * x + c
return v
def shift(self, d): return Polynomial(
[0 * self[0]] * d + self.C if self else [])
def __eq__(P, Q):
return P.deg == Q.deg and all(cP == cQ for cP, cQ in zip(P, Q))
def __hash__(self):
return hash(tuple(self.C))
def __call__(self, x): return Polynomial.eval(self, x)
def __getitem__(self, x): return self.C[x]
def __neg__(P): return Polynomial([-c for c in P.C])
def __add__(P, Q):
if len(P.C) < len(Q.C):
P, Q = Q, P
return Polynomial([P[d] + Q[d] for d in range(len(Q))] + P[len(Q):])
def __sub__(P, Q): return P + (-Q)
def _mulpoly(P, Q):
assert isinstance(Q, Polynomial)
return Polynomial([sum(P[k] * Q[d - k]
for k in range(max(0, d + 1 - len(Q)),
min(d + 1, len(P)))
) for d in range(len(P) + len(Q) - 1)])
def _mulscal(P, k):
return Polynomial([k * c for c in P])
def __mul__(P, Q):
if isinstance(Q, Polynomial):
return P._mulpoly(Q)
return P._mulscal(Q)
def __rmul__(P, Q):
return P * Q
def __pow__(P, k):
assert isinstance(k, int)
V = 1
A = P
while k:
if k & 1:
V *= A
k >>= 1
if not k:
break
A *= A
return V
def __iter__(self):
yield from self.C
def | (A, B):
Q = [0 * B[0]] * max(0, len(A) - len(B) + 1)
while len(A.C) >= len(B.C):
Q[len(A.C) - len(B.C)] = A[-1] / B[-1]
A -= B.shift(len(A) - len(B)) * (A[-1] / B[-1])
return Polynomial(Q), A
def __floordiv__(A, B):
assert isinstance(B, Polynomial)
return A.euclidean_division(B)[0]
def __mod__(A, B):
"""
Polynomial euclidian division
or modular reduction
"""
if isinstance(B, Polynomial):
return A.euclidean_division(B)[1]
else:
assert isinstance(B, int)
assert all(isinstance(c, int) for c in A)
return A.reduceP(B)
def __lt__(A, B): return A.deg < B.deg
def __bool__(self): return bool(self.C)
def gcd(A, B):
while B:
A, B = B, A % B
return A * (1 / A[-1])
@staticmethod
def gaussianElimKer(M, zero, one):
"""
Outputs an element of the kernel of M
zero and one are elements of the same field
"""
# V satisfies the invariant
# M = V M_0
V = [Polynomial([zero] * i + [one]) for i in range(len(M))]
pivots = [None] * (len(M) + 1)
for l in range(len(M)):
while M[l].deg >= 0:
idp = M[l].deg
if pivots[idp] is None:
pivots[idp] = l
break
else:
c = M[l][idp] / M[pivots[idp]][idp]
M[l] -= c * M[pivots[idp]]
V[l] -= c * V[pivots[idp]]
else:
# If a line is null, we found an element of the kernel
return V[l]
return None
def computeQ(P):
# only for Z/pZ[X] square-free polynoms, for p prime
p = P[0].n
# We ignore the image of 1 because (F-Id)(1) = 0
M = [Polynomial(([ModInt(0, p)] * (i * p)) + [ModInt(1, p)]) % P
for i in range(1, P.deg)]
# M -= Id
for i in range(1, P.deg):
M[i - 1] -= Polynomial([ModInt(0, p)] * i + [ModInt(1, p)])
# We find an element of the kernel by Gaussian elimination
pQ = Polynomial.gaussianElimKer(M, ModInt(0, p), ModInt(1, p))
# We put back the 1 tha was removed
return pQ.shift(1) if pQ is not None else None
def factor_unit(P):
"""
Berlekamp's algorithm
only in Z/pZ
"""
assert all(isinstance(c, ModInt) for c in P)
assert len(set(c.n for c in P)) == 1
if P.deg == 1:
return defaultdict(int, {P: 1})
p = P[0].n
S = Polynomial.gcd(P, P.prime())
if S.deg == P.deg:
# P' = 0 so P = R^p
R = Polynomial(P.C[::p])
return defaultdict(int,
{D: p * v
for D, v in Polynomial.factor_unit(R).items()})
else:
factors = defaultdict(int)
if S.deg:
for D, v in S.factor_unit().items():
factors[D] += v
P //= S
# P is now square-free
# We look for Q in Ker(F-Id) \ {1}
Q = Polynomial.computeQ(P)
if Q is None:
# P is irreducible
factors[P] += 1
else:
# P is the product of the gcd(P, Q-i)
# that are factored recursively
for i in range(p):
D = Polynomial.gcd(P, Q - Polynomial([ModInt(i, p)]))
if D.deg:
for DD, v in D.factor_unit().items():
factors[DD] += v
return factors
def factor(P):
"""
Factorization of P
only in Z/pZ
"""
cd = P[-1]
if P.deg == 0:
| euclidean_division | identifier_name |
models.go | json:"updated"`
Created time.Time `json:"created"`
}
func (r *RoleDTO) LogID() string {
var org string
if r.Global() {
org = "Global"
} else {
org = fmt.Sprintf("OrgId:%v", r.OrgID)
}
if r.UID != "" {
return fmt.Sprintf("[%s RoleUID:%v]", org, r.UID)
}
return fmt.Sprintf("[%s Role:%v]", org, r.Name)
}
func (r *RoleDTO) Role() Role {
return Role{
ID: r.ID,
OrgID: r.OrgID,
UID: r.UID,
Version: r.Version,
Name: r.Name,
DisplayName: r.DisplayName,
Group: r.Group,
Description: r.Description,
Hidden: r.Hidden,
Updated: r.Updated,
Created: r.Created,
}
}
func (r *RoleDTO) Global() bool {
return r.OrgID == GlobalOrgID
}
func (r *RoleDTO) IsManaged() bool {
return strings.HasPrefix(r.Name, ManagedRolePrefix)
}
func (r *RoleDTO) | () bool {
return strings.HasPrefix(r.Name, FixedRolePrefix)
}
func (r *RoleDTO) IsPlugin() bool {
return strings.HasPrefix(r.Name, PluginRolePrefix)
}
func (r *RoleDTO) IsBasic() bool {
return strings.HasPrefix(r.Name, BasicRolePrefix) || strings.HasPrefix(r.UID, BasicRoleUIDPrefix)
}
func (r *RoleDTO) IsExternalService() bool {
return strings.HasPrefix(r.Name, ExternalServiceRolePrefix) || strings.HasPrefix(r.UID, ExternalServiceRoleUIDPrefix)
}
func (r RoleDTO) MarshalJSON() ([]byte, error) {
type Alias RoleDTO
return json.Marshal(&struct {
Alias
Global bool `json:"global" xorm:"-"`
}{
Alias: (Alias)(r),
Global: r.Global(),
})
}
type TeamRole struct {
ID int64 `json:"id" xorm:"pk autoincr 'id'"`
OrgID int64 `json:"orgId" xorm:"org_id"`
RoleID int64 `json:"roleId" xorm:"role_id"`
TeamID int64 `json:"teamId" xorm:"team_id"`
Created time.Time
}
type UserRole struct {
ID int64 `json:"id" xorm:"pk autoincr 'id'"`
OrgID int64 `json:"orgId" xorm:"org_id"`
RoleID int64 `json:"roleId" xorm:"role_id"`
UserID int64 `json:"userId" xorm:"user_id"`
Created time.Time
}
type BuiltinRole struct {
ID int64 `json:"id" xorm:"pk autoincr 'id'"`
RoleID int64 `json:"roleId" xorm:"role_id"`
OrgID int64 `json:"orgId" xorm:"org_id"`
Role string
Updated time.Time
Created time.Time
}
// Permission is the model for access control permissions.
type Permission struct {
ID int64 `json:"-" xorm:"pk autoincr 'id'"`
RoleID int64 `json:"-" xorm:"role_id"`
Action string `json:"action"`
Scope string `json:"scope"`
Updated time.Time `json:"updated"`
Created time.Time `json:"created"`
}
func (p Permission) OSSPermission() Permission {
return Permission{
Action: p.Action,
Scope: p.Scope,
}
}
type GetUserPermissionsQuery struct {
OrgID int64
UserID int64
Roles []string
TeamIDs []int64
RolePrefixes []string
}
// ResourcePermission is structure that holds all actions that either a team / user / builtin-role
// can perform against specific resource.
type ResourcePermission struct {
ID int64
RoleName string
Actions []string
Scope string
UserId int64
UserLogin string
UserEmail string
TeamId int64
TeamEmail string
Team string
BuiltInRole string
IsManaged bool
IsInherited bool
Created time.Time
Updated time.Time
}
func (p *ResourcePermission) Contains(targetActions []string) bool {
if len(p.Actions) < len(targetActions) {
return false
}
var contain = func(arr []string, s string) bool {
for _, item := range arr {
if item == s {
return true
}
}
return false
}
for _, a := range targetActions {
if !contain(p.Actions, a) {
return false
}
}
return true
}
type SetResourcePermissionCommand struct {
UserID int64 `json:"userId,omitempty"`
TeamID int64 `json:"teamId,omitempty"`
BuiltinRole string `json:"builtInRole,omitempty"`
Permission string `json:"permission"`
}
type SaveExternalServiceRoleCommand struct {
OrgID int64
Global bool
ExternalServiceID string
ServiceAccountID int64
Permissions []Permission
}
func (cmd *SaveExternalServiceRoleCommand) Validate() error {
if cmd.ExternalServiceID == "" {
return errors.New("external service id not specified")
}
// slugify the external service id ID for the role to have correct name and uid
cmd.ExternalServiceID = slugify.Slugify(cmd.ExternalServiceID)
if (cmd.OrgID == GlobalOrgID) != cmd.Global {
return fmt.Errorf("invalid org id %d for global role %t", cmd.OrgID, cmd.Global)
}
// Check and deduplicate permissions
if cmd.Permissions == nil || len(cmd.Permissions) == 0 {
return errors.New("no permissions provided")
}
dedupMap := map[Permission]bool{}
dedup := make([]Permission, 0, len(cmd.Permissions))
for i := range cmd.Permissions {
if len(cmd.Permissions[i].Action) == 0 {
return fmt.Errorf("external service %v requests a permission with no Action", cmd.ExternalServiceID)
}
if dedupMap[cmd.Permissions[i]] {
continue
}
dedupMap[cmd.Permissions[i]] = true
dedup = append(dedup, cmd.Permissions[i])
}
cmd.Permissions = dedup
if cmd.ServiceAccountID <= 0 {
return fmt.Errorf("invalid service account id %d", cmd.ServiceAccountID)
}
return nil
}
const (
GlobalOrgID = 0
FixedRolePrefix = "fixed:"
ManagedRolePrefix = "managed:"
BasicRolePrefix = "basic:"
PluginRolePrefix = "plugins:"
ExternalServiceRolePrefix = "externalservice:"
BasicRoleUIDPrefix = "basic_"
ExternalServiceRoleUIDPrefix = "externalservice_"
RoleGrafanaAdmin = "Grafana Admin"
GeneralFolderUID = "general"
// Permission actions
ActionAPIKeyRead = "apikeys:read"
ActionAPIKeyCreate = "apikeys:create"
ActionAPIKeyDelete = "apikeys:delete"
// Users actions
ActionUsersRead = "users:read"
ActionUsersWrite = "users:write"
ActionUsersImpersonate = "users:impersonate"
// We can ignore gosec G101 since this does not contain any credentials.
// nolint:gosec
ActionUsersAuthTokenList = "users.authtoken:read"
// We can ignore gosec G101 since this does not contain any credentials.
// nolint:gosec
ActionUsersAuthTokenUpdate = "users.authtoken:write"
// We can ignore gosec G101 since this does not contain any credentials.
// nolint:gosec
ActionUsersPasswordUpdate = "users.password:write"
ActionUsersDelete = "users:delete"
ActionUsersCreate = "users:create"
ActionUsersEnable = "users:enable"
ActionUsersDisable = "users:disable"
ActionUsersPermissionsUpdate = "users.permissions:write"
ActionUsersLogout = "users:logout"
ActionUsersQuotasList = "users.quotas:read"
ActionUsersQuotasUpdate = "users.quotas:write"
ActionUsersPermissionsRead = "users.permissions:read"
// Org actions
ActionOrgsRead = "orgs:read"
ActionOrgsPreferencesRead = "orgs.preferences:read"
ActionOrgsQuotasRead = "orgs.quotas:read"
ActionOrgsWrite = "orgs:write"
ActionOrgsPreferencesWrite = "orgs.preferences:write"
ActionOrgsQuotasWrite = "orgs.quotas:write"
ActionOrgsDelete = "orgs:delete"
ActionOrgsCreate = "orgs:create"
ActionOrgUsersRead = "org.users:read"
ActionOrgUsersAdd = "org.users:add"
ActionOrgUsersRemove = "org.users:remove"
ActionOrgUsersWrite = "org.users:write"
// LDAP actions
ActionLDAPUsersRead = "ldap.user:read"
ActionLDAP | IsFixed | identifier_name |
models.go | :"updated"`
Created time.Time `json:"created"`
}
func (r *RoleDTO) LogID() string {
var org string
if r.Global() {
org = "Global"
} else {
org = fmt.Sprintf("OrgId:%v", r.OrgID)
}
if r.UID != "" {
return fmt.Sprintf("[%s RoleUID:%v]", org, r.UID)
}
return fmt.Sprintf("[%s Role:%v]", org, r.Name)
}
func (r *RoleDTO) Role() Role {
return Role{
ID: r.ID,
OrgID: r.OrgID,
UID: r.UID,
Version: r.Version,
Name: r.Name,
DisplayName: r.DisplayName,
Group: r.Group,
Description: r.Description,
Hidden: r.Hidden,
Updated: r.Updated,
Created: r.Created,
}
}
func (r *RoleDTO) Global() bool {
return r.OrgID == GlobalOrgID
}
func (r *RoleDTO) IsManaged() bool {
return strings.HasPrefix(r.Name, ManagedRolePrefix)
}
func (r *RoleDTO) IsFixed() bool {
return strings.HasPrefix(r.Name, FixedRolePrefix)
}
func (r *RoleDTO) IsPlugin() bool {
return strings.HasPrefix(r.Name, PluginRolePrefix)
}
func (r *RoleDTO) IsBasic() bool {
return strings.HasPrefix(r.Name, BasicRolePrefix) || strings.HasPrefix(r.UID, BasicRoleUIDPrefix)
}
func (r *RoleDTO) IsExternalService() bool {
return strings.HasPrefix(r.Name, ExternalServiceRolePrefix) || strings.HasPrefix(r.UID, ExternalServiceRoleUIDPrefix)
}
func (r RoleDTO) MarshalJSON() ([]byte, error) {
type Alias RoleDTO
return json.Marshal(&struct {
Alias
Global bool `json:"global" xorm:"-"`
}{
Alias: (Alias)(r),
Global: r.Global(),
})
}
type TeamRole struct {
ID int64 `json:"id" xorm:"pk autoincr 'id'"`
OrgID int64 `json:"orgId" xorm:"org_id"`
RoleID int64 `json:"roleId" xorm:"role_id"`
TeamID int64 `json:"teamId" xorm:"team_id"`
Created time.Time
}
type UserRole struct {
ID int64 `json:"id" xorm:"pk autoincr 'id'"`
OrgID int64 `json:"orgId" xorm:"org_id"`
RoleID int64 `json:"roleId" xorm:"role_id"`
UserID int64 `json:"userId" xorm:"user_id"`
Created time.Time
}
type BuiltinRole struct {
ID int64 `json:"id" xorm:"pk autoincr 'id'"`
RoleID int64 `json:"roleId" xorm:"role_id"`
OrgID int64 `json:"orgId" xorm:"org_id"`
Role string
Updated time.Time
Created time.Time
}
// Permission is the model for access control permissions.
type Permission struct {
ID int64 `json:"-" xorm:"pk autoincr 'id'"`
RoleID int64 `json:"-" xorm:"role_id"`
Action string `json:"action"`
Scope string `json:"scope"`
Updated time.Time `json:"updated"`
Created time.Time `json:"created"`
}
func (p Permission) OSSPermission() Permission {
return Permission{
Action: p.Action,
Scope: p.Scope,
}
}
type GetUserPermissionsQuery struct {
OrgID int64
UserID int64
Roles []string
TeamIDs []int64
RolePrefixes []string
}
// ResourcePermission is structure that holds all actions that either a team / user / builtin-role
// can perform against specific resource.
type ResourcePermission struct {
ID int64
RoleName string
Actions []string
Scope string
UserId int64
UserLogin string
UserEmail string
TeamId int64
TeamEmail string
Team string
BuiltInRole string
IsManaged bool
IsInherited bool
Created time.Time
Updated time.Time
}
func (p *ResourcePermission) Contains(targetActions []string) bool {
if len(p.Actions) < len(targetActions) {
return false
}
var contain = func(arr []string, s string) bool {
for _, item := range arr {
if item == s {
return true
}
}
return false
}
for _, a := range targetActions {
if !contain(p.Actions, a) {
return false
}
}
return true
}
type SetResourcePermissionCommand struct {
UserID int64 `json:"userId,omitempty"`
TeamID int64 `json:"teamId,omitempty"`
BuiltinRole string `json:"builtInRole,omitempty"`
Permission string `json:"permission"`
}
type SaveExternalServiceRoleCommand struct {
OrgID int64
Global bool
ExternalServiceID string
ServiceAccountID int64
Permissions []Permission
}
func (cmd *SaveExternalServiceRoleCommand) Validate() error {
if cmd.ExternalServiceID == "" {
return errors.New("external service id not specified")
}
// slugify the external service id ID for the role to have correct name and uid
cmd.ExternalServiceID = slugify.Slugify(cmd.ExternalServiceID)
if (cmd.OrgID == GlobalOrgID) != cmd.Global {
return fmt.Errorf("invalid org id %d for global role %t", cmd.OrgID, cmd.Global)
}
// Check and deduplicate permissions
if cmd.Permissions == nil || len(cmd.Permissions) == 0 {
return errors.New("no permissions provided")
}
dedupMap := map[Permission]bool{}
dedup := make([]Permission, 0, len(cmd.Permissions))
for i := range cmd.Permissions {
if len(cmd.Permissions[i].Action) == 0 {
return fmt.Errorf("external service %v requests a permission with no Action", cmd.ExternalServiceID)
}
if dedupMap[cmd.Permissions[i]] |
dedupMap[cmd.Permissions[i]] = true
dedup = append(dedup, cmd.Permissions[i])
}
cmd.Permissions = dedup
if cmd.ServiceAccountID <= 0 {
return fmt.Errorf("invalid service account id %d", cmd.ServiceAccountID)
}
return nil
}
const (
GlobalOrgID = 0
FixedRolePrefix = "fixed:"
ManagedRolePrefix = "managed:"
BasicRolePrefix = "basic:"
PluginRolePrefix = "plugins:"
ExternalServiceRolePrefix = "externalservice:"
BasicRoleUIDPrefix = "basic_"
ExternalServiceRoleUIDPrefix = "externalservice_"
RoleGrafanaAdmin = "Grafana Admin"
GeneralFolderUID = "general"
// Permission actions
ActionAPIKeyRead = "apikeys:read"
ActionAPIKeyCreate = "apikeys:create"
ActionAPIKeyDelete = "apikeys:delete"
// Users actions
ActionUsersRead = "users:read"
ActionUsersWrite = "users:write"
ActionUsersImpersonate = "users:impersonate"
// We can ignore gosec G101 since this does not contain any credentials.
// nolint:gosec
ActionUsersAuthTokenList = "users.authtoken:read"
// We can ignore gosec G101 since this does not contain any credentials.
// nolint:gosec
ActionUsersAuthTokenUpdate = "users.authtoken:write"
// We can ignore gosec G101 since this does not contain any credentials.
// nolint:gosec
ActionUsersPasswordUpdate = "users.password:write"
ActionUsersDelete = "users:delete"
ActionUsersCreate = "users:create"
ActionUsersEnable = "users:enable"
ActionUsersDisable = "users:disable"
ActionUsersPermissionsUpdate = "users.permissions:write"
ActionUsersLogout = "users:logout"
ActionUsersQuotasList = "users.quotas:read"
ActionUsersQuotasUpdate = "users.quotas:write"
ActionUsersPermissionsRead = "users.permissions:read"
// Org actions
ActionOrgsRead = "orgs:read"
ActionOrgsPreferencesRead = "orgs.preferences:read"
ActionOrgsQuotasRead = "orgs.quotas:read"
ActionOrgsWrite = "orgs:write"
ActionOrgsPreferencesWrite = "orgs.preferences:write"
ActionOrgsQuotasWrite = "orgs.quotas:write"
ActionOrgsDelete = "orgs:delete"
ActionOrgsCreate = "orgs:create"
ActionOrgUsersRead = "org.users:read"
ActionOrgUsersAdd = "org.users:add"
ActionOrgUsersRemove = "org.users:remove"
ActionOrgUsersWrite = "org.users:write"
// LDAP actions
ActionLDAPUsersRead = "ldap.user:read"
ActionLDAP | {
continue
} | conditional_block |
models.go | json:"updated"`
Created time.Time `json:"created"`
}
func (r *RoleDTO) LogID() string {
var org string
if r.Global() {
org = "Global"
} else {
org = fmt.Sprintf("OrgId:%v", r.OrgID)
}
if r.UID != "" {
return fmt.Sprintf("[%s RoleUID:%v]", org, r.UID)
}
return fmt.Sprintf("[%s Role:%v]", org, r.Name)
}
func (r *RoleDTO) Role() Role {
return Role{
ID: r.ID,
OrgID: r.OrgID,
UID: r.UID,
Version: r.Version,
Name: r.Name,
DisplayName: r.DisplayName,
Group: r.Group,
Description: r.Description,
Hidden: r.Hidden,
Updated: r.Updated,
Created: r.Created,
}
}
func (r *RoleDTO) Global() bool {
return r.OrgID == GlobalOrgID
}
func (r *RoleDTO) IsManaged() bool {
return strings.HasPrefix(r.Name, ManagedRolePrefix)
}
func (r *RoleDTO) IsFixed() bool {
return strings.HasPrefix(r.Name, FixedRolePrefix)
}
func (r *RoleDTO) IsPlugin() bool {
return strings.HasPrefix(r.Name, PluginRolePrefix)
}
func (r *RoleDTO) IsBasic() bool {
return strings.HasPrefix(r.Name, BasicRolePrefix) || strings.HasPrefix(r.UID, BasicRoleUIDPrefix)
}
func (r *RoleDTO) IsExternalService() bool {
return strings.HasPrefix(r.Name, ExternalServiceRolePrefix) || strings.HasPrefix(r.UID, ExternalServiceRoleUIDPrefix)
}
func (r RoleDTO) MarshalJSON() ([]byte, error) {
type Alias RoleDTO
return json.Marshal(&struct {
Alias
Global bool `json:"global" xorm:"-"`
}{
Alias: (Alias)(r),
Global: r.Global(),
})
}
type TeamRole struct {
ID int64 `json:"id" xorm:"pk autoincr 'id'"`
OrgID int64 `json:"orgId" xorm:"org_id"`
RoleID int64 `json:"roleId" xorm:"role_id"`
TeamID int64 `json:"teamId" xorm:"team_id"`
Created time.Time
}
type UserRole struct {
ID int64 `json:"id" xorm:"pk autoincr 'id'"`
OrgID int64 `json:"orgId" xorm:"org_id"`
RoleID int64 `json:"roleId" xorm:"role_id"`
UserID int64 `json:"userId" xorm:"user_id"`
Created time.Time
}
type BuiltinRole struct {
ID int64 `json:"id" xorm:"pk autoincr 'id'"`
RoleID int64 `json:"roleId" xorm:"role_id"`
OrgID int64 `json:"orgId" xorm:"org_id"`
Role string
Updated time.Time
Created time.Time
}
// Permission is the model for access control permissions.
type Permission struct {
ID int64 `json:"-" xorm:"pk autoincr 'id'"`
RoleID int64 `json:"-" xorm:"role_id"`
Action string `json:"action"`
Scope string `json:"scope"`
Updated time.Time `json:"updated"`
Created time.Time `json:"created"`
}
func (p Permission) OSSPermission() Permission {
return Permission{
Action: p.Action,
Scope: p.Scope,
}
}
type GetUserPermissionsQuery struct {
OrgID int64
UserID int64
Roles []string
TeamIDs []int64
RolePrefixes []string
}
// ResourcePermission is structure that holds all actions that either a team / user / builtin-role
// can perform against specific resource.
type ResourcePermission struct {
ID int64
RoleName string
Actions []string
Scope string
UserId int64
UserLogin string
UserEmail string
TeamId int64
TeamEmail string
Team string
BuiltInRole string
IsManaged bool
IsInherited bool
Created time.Time
Updated time.Time
}
func (p *ResourcePermission) Contains(targetActions []string) bool {
if len(p.Actions) < len(targetActions) {
return false
}
var contain = func(arr []string, s string) bool {
for _, item := range arr {
if item == s {
return true
}
}
return false
}
for _, a := range targetActions {
if !contain(p.Actions, a) {
return false
}
}
return true
}
type SetResourcePermissionCommand struct {
UserID int64 `json:"userId,omitempty"`
TeamID int64 `json:"teamId,omitempty"` | }
type SaveExternalServiceRoleCommand struct {
OrgID int64
Global bool
ExternalServiceID string
ServiceAccountID int64
Permissions []Permission
}
func (cmd *SaveExternalServiceRoleCommand) Validate() error {
if cmd.ExternalServiceID == "" {
return errors.New("external service id not specified")
}
// slugify the external service id ID for the role to have correct name and uid
cmd.ExternalServiceID = slugify.Slugify(cmd.ExternalServiceID)
if (cmd.OrgID == GlobalOrgID) != cmd.Global {
return fmt.Errorf("invalid org id %d for global role %t", cmd.OrgID, cmd.Global)
}
// Check and deduplicate permissions
if cmd.Permissions == nil || len(cmd.Permissions) == 0 {
return errors.New("no permissions provided")
}
dedupMap := map[Permission]bool{}
dedup := make([]Permission, 0, len(cmd.Permissions))
for i := range cmd.Permissions {
if len(cmd.Permissions[i].Action) == 0 {
return fmt.Errorf("external service %v requests a permission with no Action", cmd.ExternalServiceID)
}
if dedupMap[cmd.Permissions[i]] {
continue
}
dedupMap[cmd.Permissions[i]] = true
dedup = append(dedup, cmd.Permissions[i])
}
cmd.Permissions = dedup
if cmd.ServiceAccountID <= 0 {
return fmt.Errorf("invalid service account id %d", cmd.ServiceAccountID)
}
return nil
}
const (
GlobalOrgID = 0
FixedRolePrefix = "fixed:"
ManagedRolePrefix = "managed:"
BasicRolePrefix = "basic:"
PluginRolePrefix = "plugins:"
ExternalServiceRolePrefix = "externalservice:"
BasicRoleUIDPrefix = "basic_"
ExternalServiceRoleUIDPrefix = "externalservice_"
RoleGrafanaAdmin = "Grafana Admin"
GeneralFolderUID = "general"
// Permission actions
ActionAPIKeyRead = "apikeys:read"
ActionAPIKeyCreate = "apikeys:create"
ActionAPIKeyDelete = "apikeys:delete"
// Users actions
ActionUsersRead = "users:read"
ActionUsersWrite = "users:write"
ActionUsersImpersonate = "users:impersonate"
// We can ignore gosec G101 since this does not contain any credentials.
// nolint:gosec
ActionUsersAuthTokenList = "users.authtoken:read"
// We can ignore gosec G101 since this does not contain any credentials.
// nolint:gosec
ActionUsersAuthTokenUpdate = "users.authtoken:write"
// We can ignore gosec G101 since this does not contain any credentials.
// nolint:gosec
ActionUsersPasswordUpdate = "users.password:write"
ActionUsersDelete = "users:delete"
ActionUsersCreate = "users:create"
ActionUsersEnable = "users:enable"
ActionUsersDisable = "users:disable"
ActionUsersPermissionsUpdate = "users.permissions:write"
ActionUsersLogout = "users:logout"
ActionUsersQuotasList = "users.quotas:read"
ActionUsersQuotasUpdate = "users.quotas:write"
ActionUsersPermissionsRead = "users.permissions:read"
// Org actions
ActionOrgsRead = "orgs:read"
ActionOrgsPreferencesRead = "orgs.preferences:read"
ActionOrgsQuotasRead = "orgs.quotas:read"
ActionOrgsWrite = "orgs:write"
ActionOrgsPreferencesWrite = "orgs.preferences:write"
ActionOrgsQuotasWrite = "orgs.quotas:write"
ActionOrgsDelete = "orgs:delete"
ActionOrgsCreate = "orgs:create"
ActionOrgUsersRead = "org.users:read"
ActionOrgUsersAdd = "org.users:add"
ActionOrgUsersRemove = "org.users:remove"
ActionOrgUsersWrite = "org.users:write"
// LDAP actions
ActionLDAPUsersRead = "ldap.user:read"
ActionLDAPUsers | BuiltinRole string `json:"builtInRole,omitempty"`
Permission string `json:"permission"` | random_line_split |
models.go | :"updated"`
Created time.Time `json:"created"`
}
func (r *RoleDTO) LogID() string {
var org string
if r.Global() {
org = "Global"
} else {
org = fmt.Sprintf("OrgId:%v", r.OrgID)
}
if r.UID != "" {
return fmt.Sprintf("[%s RoleUID:%v]", org, r.UID)
}
return fmt.Sprintf("[%s Role:%v]", org, r.Name)
}
func (r *RoleDTO) Role() Role {
return Role{
ID: r.ID,
OrgID: r.OrgID,
UID: r.UID,
Version: r.Version,
Name: r.Name,
DisplayName: r.DisplayName,
Group: r.Group,
Description: r.Description,
Hidden: r.Hidden,
Updated: r.Updated,
Created: r.Created,
}
}
func (r *RoleDTO) Global() bool {
return r.OrgID == GlobalOrgID
}
func (r *RoleDTO) IsManaged() bool |
func (r *RoleDTO) IsFixed() bool {
return strings.HasPrefix(r.Name, FixedRolePrefix)
}
func (r *RoleDTO) IsPlugin() bool {
return strings.HasPrefix(r.Name, PluginRolePrefix)
}
func (r *RoleDTO) IsBasic() bool {
return strings.HasPrefix(r.Name, BasicRolePrefix) || strings.HasPrefix(r.UID, BasicRoleUIDPrefix)
}
func (r *RoleDTO) IsExternalService() bool {
return strings.HasPrefix(r.Name, ExternalServiceRolePrefix) || strings.HasPrefix(r.UID, ExternalServiceRoleUIDPrefix)
}
func (r RoleDTO) MarshalJSON() ([]byte, error) {
type Alias RoleDTO
return json.Marshal(&struct {
Alias
Global bool `json:"global" xorm:"-"`
}{
Alias: (Alias)(r),
Global: r.Global(),
})
}
type TeamRole struct {
ID int64 `json:"id" xorm:"pk autoincr 'id'"`
OrgID int64 `json:"orgId" xorm:"org_id"`
RoleID int64 `json:"roleId" xorm:"role_id"`
TeamID int64 `json:"teamId" xorm:"team_id"`
Created time.Time
}
type UserRole struct {
ID int64 `json:"id" xorm:"pk autoincr 'id'"`
OrgID int64 `json:"orgId" xorm:"org_id"`
RoleID int64 `json:"roleId" xorm:"role_id"`
UserID int64 `json:"userId" xorm:"user_id"`
Created time.Time
}
type BuiltinRole struct {
ID int64 `json:"id" xorm:"pk autoincr 'id'"`
RoleID int64 `json:"roleId" xorm:"role_id"`
OrgID int64 `json:"orgId" xorm:"org_id"`
Role string
Updated time.Time
Created time.Time
}
// Permission is the model for access control permissions.
type Permission struct {
ID int64 `json:"-" xorm:"pk autoincr 'id'"`
RoleID int64 `json:"-" xorm:"role_id"`
Action string `json:"action"`
Scope string `json:"scope"`
Updated time.Time `json:"updated"`
Created time.Time `json:"created"`
}
func (p Permission) OSSPermission() Permission {
return Permission{
Action: p.Action,
Scope: p.Scope,
}
}
type GetUserPermissionsQuery struct {
OrgID int64
UserID int64
Roles []string
TeamIDs []int64
RolePrefixes []string
}
// ResourcePermission is structure that holds all actions that either a team / user / builtin-role
// can perform against specific resource.
type ResourcePermission struct {
ID int64
RoleName string
Actions []string
Scope string
UserId int64
UserLogin string
UserEmail string
TeamId int64
TeamEmail string
Team string
BuiltInRole string
IsManaged bool
IsInherited bool
Created time.Time
Updated time.Time
}
func (p *ResourcePermission) Contains(targetActions []string) bool {
if len(p.Actions) < len(targetActions) {
return false
}
var contain = func(arr []string, s string) bool {
for _, item := range arr {
if item == s {
return true
}
}
return false
}
for _, a := range targetActions {
if !contain(p.Actions, a) {
return false
}
}
return true
}
type SetResourcePermissionCommand struct {
UserID int64 `json:"userId,omitempty"`
TeamID int64 `json:"teamId,omitempty"`
BuiltinRole string `json:"builtInRole,omitempty"`
Permission string `json:"permission"`
}
type SaveExternalServiceRoleCommand struct {
OrgID int64
Global bool
ExternalServiceID string
ServiceAccountID int64
Permissions []Permission
}
func (cmd *SaveExternalServiceRoleCommand) Validate() error {
if cmd.ExternalServiceID == "" {
return errors.New("external service id not specified")
}
// slugify the external service id ID for the role to have correct name and uid
cmd.ExternalServiceID = slugify.Slugify(cmd.ExternalServiceID)
if (cmd.OrgID == GlobalOrgID) != cmd.Global {
return fmt.Errorf("invalid org id %d for global role %t", cmd.OrgID, cmd.Global)
}
// Check and deduplicate permissions
if cmd.Permissions == nil || len(cmd.Permissions) == 0 {
return errors.New("no permissions provided")
}
dedupMap := map[Permission]bool{}
dedup := make([]Permission, 0, len(cmd.Permissions))
for i := range cmd.Permissions {
if len(cmd.Permissions[i].Action) == 0 {
return fmt.Errorf("external service %v requests a permission with no Action", cmd.ExternalServiceID)
}
if dedupMap[cmd.Permissions[i]] {
continue
}
dedupMap[cmd.Permissions[i]] = true
dedup = append(dedup, cmd.Permissions[i])
}
cmd.Permissions = dedup
if cmd.ServiceAccountID <= 0 {
return fmt.Errorf("invalid service account id %d", cmd.ServiceAccountID)
}
return nil
}
const (
GlobalOrgID = 0
FixedRolePrefix = "fixed:"
ManagedRolePrefix = "managed:"
BasicRolePrefix = "basic:"
PluginRolePrefix = "plugins:"
ExternalServiceRolePrefix = "externalservice:"
BasicRoleUIDPrefix = "basic_"
ExternalServiceRoleUIDPrefix = "externalservice_"
RoleGrafanaAdmin = "Grafana Admin"
GeneralFolderUID = "general"
// Permission actions
ActionAPIKeyRead = "apikeys:read"
ActionAPIKeyCreate = "apikeys:create"
ActionAPIKeyDelete = "apikeys:delete"
// Users actions
ActionUsersRead = "users:read"
ActionUsersWrite = "users:write"
ActionUsersImpersonate = "users:impersonate"
// We can ignore gosec G101 since this does not contain any credentials.
// nolint:gosec
ActionUsersAuthTokenList = "users.authtoken:read"
// We can ignore gosec G101 since this does not contain any credentials.
// nolint:gosec
ActionUsersAuthTokenUpdate = "users.authtoken:write"
// We can ignore gosec G101 since this does not contain any credentials.
// nolint:gosec
ActionUsersPasswordUpdate = "users.password:write"
ActionUsersDelete = "users:delete"
ActionUsersCreate = "users:create"
ActionUsersEnable = "users:enable"
ActionUsersDisable = "users:disable"
ActionUsersPermissionsUpdate = "users.permissions:write"
ActionUsersLogout = "users:logout"
ActionUsersQuotasList = "users.quotas:read"
ActionUsersQuotasUpdate = "users.quotas:write"
ActionUsersPermissionsRead = "users.permissions:read"
// Org actions
ActionOrgsRead = "orgs:read"
ActionOrgsPreferencesRead = "orgs.preferences:read"
ActionOrgsQuotasRead = "orgs.quotas:read"
ActionOrgsWrite = "orgs:write"
ActionOrgsPreferencesWrite = "orgs.preferences:write"
ActionOrgsQuotasWrite = "orgs.quotas:write"
ActionOrgsDelete = "orgs:delete"
ActionOrgsCreate = "orgs:create"
ActionOrgUsersRead = "org.users:read"
ActionOrgUsersAdd = "org.users:add"
ActionOrgUsersRemove = "org.users:remove"
ActionOrgUsersWrite = "org.users:write"
// LDAP actions
ActionLDAPUsersRead = "ldap.user:read"
ActionLDAP | {
return strings.HasPrefix(r.Name, ManagedRolePrefix)
} | identifier_body |
hslcolor.rs | bound::Bound;
use color::{Color, RGBColor, XYZColor};
use coord::Coord;
use csscolor::{parse_hsl_hsv_tuple, CSSParseError};
use illuminants::Illuminant;
/// A color in the HSL color space, a direct transformation of the sRGB space. sHSL is used to
/// distinguish this space from a similar transformation of a different RGB space, which can cause
/// some confusion as other implementations of HSL (such as on the web) omit this distinction.
/// # Example
/// Shifting from red to yellow creates two colors of clearly different brightnesses. This is because
/// HSL doesn't account for the perceptual difference in brightness of light and dark colors.
///
/// ```
/// # use scarlet::prelude::*;
/// # use scarlet::colors::HSLColor;
/// let red = HSLColor{h: 20., s: 0.5, l: 0.5};
/// let yellow = HSLColor{h: 60., s: 0.5, l: 0.5};
/// println!("{} {}", red.convert::<RGBColor>().to_string(), yellow.convert::<RGBColor>().to_string());
/// // prints #BF6A40 #BFBF40
/// // note how the second one is strictly more light
/// ```
#[derive(Debug, Copy, Clone, Serialize, Deserialize)]
pub struct HSLColor {
/// The hue component. Ranges from 0 to 360, as the angle in a cylindrical space. Exactly the same
/// as the hue component of HSV.
pub h: f64,
/// The saturation component. Ranges between 0 and 1. Note that this is much less accurate to
/// human perception than the chroma or saturation found in other, higher-fidelity color spaces.
pub s: f64,
/// The lightness component. Ranges from 0 to 1. Defined in HSL as the average of the largest and
/// smallest color components in RGB, which sacrifices accuracy for convenience.
pub l: f64,
}
impl Color for HSLColor {
/// Converts from XYZ to HSL through RGB: thus, there is a limited precision because RGB colors
/// are limited to integer values of R, G, and B.
fn from_xyz(xyz: XYZColor) -> HSLColor {
// first get RGB color
let rgb = RGBColor::from_xyz(xyz);
// this is sorta interesting: a hexagonal projection instead of the circular projection used
// in CIEHCL. It turns out that, if you tilt the RGB cube and project it into a hexagon, the
// equivalent of radius is simply the largest component minus the smallest component: adding
// a constant to every component simply travels up and down vertically and doesn't change the
// projection.
// I call this chroma, but it's a very very rough estimate of the actual color attribute.
// More info: https://en.wikipedia.org/wiki/HSL_and_HSV#Formal_derivation
let components = [rgb.r, rgb.g, rgb.b];
let max_c = components.iter().cloned().fold(-1.0, f64::max);
let min_c = components.iter().cloned().fold(2.0, f64::min);
let chroma = max_c - min_c;
// hue is crazy in a hexagon! no more trig functions for us!
// it's technically the proportion of the length of the hexagon through the point, but it's
// treated as degrees
let mut hue = if chroma == 0.0 {
// could be anything, undefined according to Wikipedia, in Scarlet just 0 for gray
0.0
} else if (max_c - rgb.r).abs() < EPSILON {
// in red sector: find which part by comparing green and blue and scaling
// adding green moves up on the hexagon, adding blue moves down: hence, linearity
// the modulo makes sure it's in the range 0-360
(((rgb.g - rgb.b) / chroma) % 6.0) * 60.0
} else if (max_c - rgb.g).abs() < EPSILON {
// similar to above, but you add an offset
(((rgb.b - rgb.r) / chroma) % 6.0) * 60.0 + 120.0
} else {
// same as above, different offset
(((rgb.r - rgb.g) / chroma) % 6.0) * 60.0 + 240.0
};
// if hue still not in 0-360, add until it does: this can sometimes happen
while hue < 0. {
hue += 360.;
}
while hue >= 360. {
hue -= 360.;
}
// saturation, scientifically speaking, is chroma adjusted for lightness. For HSL, it's
// defined relative to the maximum chroma, which varies depending on the place on the
// cone. Thus, I'll compute lightness first.
// now we choose lightness as the average of the largest and smallest components. This
// essentially translates to a double hex cone, quite the interesting structure!
let lightness = (max_c + min_c) / 2.0;
// now back to saturation
let saturation = if (lightness - 1.0).abs() < EPSILON || lightness == 0.0 {
// this would be a divide by 0 otherwise, just set it to 0 because it doesn't matter
0.0
} else {
chroma / (1.0 - (2.0 * lightness - 1.0).abs())
};
HSLColor {
h: hue,
s: saturation,
l: lightness,
}
}
// Converts back to XYZ through RGB.
fn to_xyz(&self, illuminant: Illuminant) -> XYZColor {
// first get back chroma
let chroma = (1.0 - (2.0 * self.l - 1.0).abs()) * self.s;
// find the point with 0 lightness that matches ours in the other two components
// intermediate value is the second-largest RGB value, where C is the largest because the
// smallest is 0: call this x
let x = chroma * (1.0 - ((self.h / 60.0) % 2.0 - 1.0).abs());
// now split based on which line of the hexagon we're on, i.e., which are the two largest
// components
let (r1, g1, b1) = if self.h <= 60.0 {
(chroma, x, 0.0)
} else if self.h <= 120.0 {
(x, chroma, 0.0)
} else if self.h <= 180.0 {
(0.0, chroma, x)
} else if self.h <= 240.0 {
(0.0, x, chroma)
} else if self.h <= 300.0 {
(x, 0.0, chroma)
} else {
(chroma, 0.0, x)
};
// now we add the right value to each component to get the correct lightness and scale back
// to 0-255
let offset = self.l - chroma / 2.0;
let r = r1 + offset;
let g = g1 + offset;
let b = b1 + offset;
RGBColor { r, g, b }.to_xyz(illuminant)
}
}
impl From<Coord> for HSLColor {
fn from(c: Coord) -> HSLColor {
HSLColor {
h: c.x,
s: c.y,
l: c.z,
}
}
}
impl From<HSLColor> for Coord {
fn from(val: HSLColor) -> Self {
Coord {
x: val.h,
y: val.s,
z: val.l,
}
}
}
impl Bound for HSLColor {
fn bounds() -> [(f64, f64); 3] {
[(0., 360.), (0., 1.), (0., 1.)]
}
}
impl FromStr for HSLColor {
type Err = CSSParseError;
fn from_str(s: &str) -> Result<HSLColor, CSSParseError> {
if !s.starts_with("hsl(") {
return Err(CSSParseError::InvalidColorSyntax);
}
let tup: String = s.chars().skip(3).collect::<String>();
match parse_hsl_hsv_tuple(&tup) {
Ok(res) => Ok(HSLColor {
h: res.0,
s: res.1,
l: res.2,
}),
Err(_e) => Err(_e),
}
}
}
#[cfg(test)]
mod tests {
#[allow(unused_imports)]
use super::*;
use consts::TEST_PRECISION; | random_line_split |
||
hslcolor.rs | agon but simply stretched into a circle, and the area of a
//! horizontal cross-section varies with lightness. A special note: some implementations of HSV and
//! HSL are circular in nature, using polar coordinates explicitly. This implementation is instead
//! hexagonal: first values are put on a hexagon, and then that hexagon is "squeezed" into a
//! circle. This can cause small variations between Scarlet and other applications.
//! Another small implementation note is that converting gray into HSL or HSV will give a hue of 0
//! degrees, although any hue could be used in its place.
use std::f64;
use std::f64::EPSILON;
use std::str::FromStr;
use bound::Bound;
use color::{Color, RGBColor, XYZColor};
use coord::Coord;
use csscolor::{parse_hsl_hsv_tuple, CSSParseError};
use illuminants::Illuminant;
/// A color in the HSL color space, a direct transformation of the sRGB space. sHSL is used to
/// distinguish this space from a similar transformation of a different RGB space, which can cause
/// some confusion as other implementations of HSL (such as on the web) omit this distinction.
/// # Example
/// Shifting from red to yellow creates two colors of clearly different brightnesses. This is because
/// HSL doesn't account for the perceptual difference in brightness of light and dark colors.
///
/// ```
/// # use scarlet::prelude::*;
/// # use scarlet::colors::HSLColor;
/// let red = HSLColor{h: 20., s: 0.5, l: 0.5};
/// let yellow = HSLColor{h: 60., s: 0.5, l: 0.5};
/// println!("{} {}", red.convert::<RGBColor>().to_string(), yellow.convert::<RGBColor>().to_string());
/// // prints #BF6A40 #BFBF40
/// // note how the second one is strictly more light
/// ```
#[derive(Debug, Copy, Clone, Serialize, Deserialize)]
pub struct HSLColor {
/// The hue component. Ranges from 0 to 360, as the angle in a cylindrical space. Exactly the same
/// as the hue component of HSV.
pub h: f64,
/// The saturation component. Ranges between 0 and 1. Note that this is much less accurate to
/// human perception than the chroma or saturation found in other, higher-fidelity color spaces.
pub s: f64,
/// The lightness component. Ranges from 0 to 1. Defined in HSL as the average of the largest and
/// smallest color components in RGB, which sacrifices accuracy for convenience.
pub l: f64,
}
impl Color for HSLColor {
/// Converts from XYZ to HSL through RGB: thus, there is a limited precision because RGB colors
/// are limited to integer values of R, G, and B.
fn from_xyz(xyz: XYZColor) -> HSLColor {
// first get RGB color
let rgb = RGBColor::from_xyz(xyz);
// this is sorta interesting: a hexagonal projection instead of the circular projection used
// in CIEHCL. It turns out that, if you tilt the RGB cube and project it into a hexagon, the
// equivalent of radius is simply the largest component minus the smallest component: adding
// a constant to every component simply travels up and down vertically and doesn't change the
// projection.
// I call this chroma, but it's a very very rough estimate of the actual color attribute.
// More info: https://en.wikipedia.org/wiki/HSL_and_HSV#Formal_derivation
let components = [rgb.r, rgb.g, rgb.b];
let max_c = components.iter().cloned().fold(-1.0, f64::max);
let min_c = components.iter().cloned().fold(2.0, f64::min);
let chroma = max_c - min_c;
// hue is crazy in a hexagon! no more trig functions for us!
// it's technically the proportion of the length of the hexagon through the point, but it's
// treated as degrees
let mut hue = if chroma == 0.0 {
// could be anything, undefined according to Wikipedia, in Scarlet just 0 for gray
0.0
} else if (max_c - rgb.r).abs() < EPSILON {
// in red sector: find which part by comparing green and blue and scaling
// adding green moves up on the hexagon, adding blue moves down: hence, linearity
// the modulo makes sure it's in the range 0-360
(((rgb.g - rgb.b) / chroma) % 6.0) * 60.0
} else if (max_c - rgb.g).abs() < EPSILON {
// similar to above, but you add an offset
(((rgb.b - rgb.r) / chroma) % 6.0) * 60.0 + 120.0
} else {
// same as above, different offset
(((rgb.r - rgb.g) / chroma) % 6.0) * 60.0 + 240.0
};
// if hue still not in 0-360, add until it does: this can sometimes happen
while hue < 0. {
hue += 360.;
}
while hue >= 360. {
hue -= 360.;
}
// saturation, scientifically speaking, is chroma adjusted for lightness. For HSL, it's
// defined relative to the maximum chroma, which varies depending on the place on the
// cone. Thus, I'll compute lightness first.
// now we choose lightness as the average of the largest and smallest components. This
// essentially translates to a double hex cone, quite the interesting structure!
let lightness = (max_c + min_c) / 2.0;
// now back to saturation
let saturation = if (lightness - 1.0).abs() < EPSILON || lightness == 0.0 {
// this would be a divide by 0 otherwise, just set it to 0 because it doesn't matter
0.0
} else {
chroma / (1.0 - (2.0 * lightness - 1.0).abs())
};
HSLColor {
h: hue,
s: saturation,
l: lightness,
}
}
// Converts back to XYZ through RGB.
fn to_xyz(&self, illuminant: Illuminant) -> XYZColor {
// first get back chroma
let chroma = (1.0 - (2.0 * self.l - 1.0).abs()) * self.s;
// find the point with 0 lightness that matches ours in the other two components
// intermediate value is the second-largest RGB value, where C is the largest because the
// smallest is 0: call this x
let x = chroma * (1.0 - ((self.h / 60.0) % 2.0 - 1.0).abs());
// now split based on which line of the hexagon we're on, i.e., which are the two largest
// components
let (r1, g1, b1) = if self.h <= 60.0 {
(chroma, x, 0.0)
} else if self.h <= 120.0 {
(x, chroma, 0.0)
} else if self.h <= 180.0 {
(0.0, chroma, x)
} else if self.h <= 240.0 {
(0.0, x, chroma)
} else if self.h <= 300.0 {
(x, 0.0, chroma)
} else {
(chroma, 0.0, x)
};
// now we add the right value to each component to get the correct lightness and scale back
// to 0-255
let offset = self.l - chroma / 2.0;
let r = r1 + offset;
let g = g1 + offset;
let b = b1 + offset;
RGBColor { r, g, b }.to_xyz(illuminant)
}
}
impl From<Coord> for HSLColor {
fn from(c: Coord) -> HSLColor |
}
impl From<HSLColor> for Coord {
fn from(val: HSLColor) -> Self {
Coord {
x: val.h,
y: val.s,
z: val.l,
}
}
}
impl Bound for HSLColor {
fn bounds() -> [(f64, f64); 3] {
[(0., 360.), (0., 1.), (0., 1.)]
}
}
impl FromStr for HSLColor {
type Err | {
HSLColor {
h: c.x,
s: c.y,
l: c.z,
}
} | identifier_body |
hslcolor.rs | agon but simply stretched into a circle, and the area of a
//! horizontal cross-section varies with lightness. A special note: some implementations of HSV and
//! HSL are circular in nature, using polar coordinates explicitly. This implementation is instead
//! hexagonal: first values are put on a hexagon, and then that hexagon is "squeezed" into a
//! circle. This can cause small variations between Scarlet and other applications.
//! Another small implementation note is that converting gray into HSL or HSV will give a hue of 0
//! degrees, although any hue could be used in its place.
use std::f64;
use std::f64::EPSILON;
use std::str::FromStr;
use bound::Bound;
use color::{Color, RGBColor, XYZColor};
use coord::Coord;
use csscolor::{parse_hsl_hsv_tuple, CSSParseError};
use illuminants::Illuminant;
/// A color in the HSL color space, a direct transformation of the sRGB space. sHSL is used to
/// distinguish this space from a similar transformation of a different RGB space, which can cause
/// some confusion as other implementations of HSL (such as on the web) omit this distinction.
/// # Example
/// Shifting from red to yellow creates two colors of clearly different brightnesses. This is because
/// HSL doesn't account for the perceptual difference in brightness of light and dark colors.
///
/// ```
/// # use scarlet::prelude::*;
/// # use scarlet::colors::HSLColor;
/// let red = HSLColor{h: 20., s: 0.5, l: 0.5};
/// let yellow = HSLColor{h: 60., s: 0.5, l: 0.5};
/// println!("{} {}", red.convert::<RGBColor>().to_string(), yellow.convert::<RGBColor>().to_string());
/// // prints #BF6A40 #BFBF40
/// // note how the second one is strictly more light
/// ```
#[derive(Debug, Copy, Clone, Serialize, Deserialize)]
pub struct HSLColor {
/// The hue component. Ranges from 0 to 360, as the angle in a cylindrical space. Exactly the same
/// as the hue component of HSV.
pub h: f64,
/// The saturation component. Ranges between 0 and 1. Note that this is much less accurate to
/// human perception than the chroma or saturation found in other, higher-fidelity color spaces.
pub s: f64,
/// The lightness component. Ranges from 0 to 1. Defined in HSL as the average of the largest and
/// smallest color components in RGB, which sacrifices accuracy for convenience.
pub l: f64,
}
impl Color for HSLColor {
/// Converts from XYZ to HSL through RGB: thus, there is a limited precision because RGB colors
/// are limited to integer values of R, G, and B.
fn from_xyz(xyz: XYZColor) -> HSLColor {
// first get RGB color
let rgb = RGBColor::from_xyz(xyz);
// this is sorta interesting: a hexagonal projection instead of the circular projection used
// in CIEHCL. It turns out that, if you tilt the RGB cube and project it into a hexagon, the
// equivalent of radius is simply the largest component minus the smallest component: adding
// a constant to every component simply travels up and down vertically and doesn't change the
// projection.
// I call this chroma, but it's a very very rough estimate of the actual color attribute.
// More info: https://en.wikipedia.org/wiki/HSL_and_HSV#Formal_derivation
let components = [rgb.r, rgb.g, rgb.b];
let max_c = components.iter().cloned().fold(-1.0, f64::max);
let min_c = components.iter().cloned().fold(2.0, f64::min);
let chroma = max_c - min_c;
// hue is crazy in a hexagon! no more trig functions for us!
// it's technically the proportion of the length of the hexagon through the point, but it's
// treated as degrees
let mut hue = if chroma == 0.0 {
// could be anything, undefined according to Wikipedia, in Scarlet just 0 for gray
0.0
} else if (max_c - rgb.r).abs() < EPSILON {
// in red sector: find which part by comparing green and blue and scaling
// adding green moves up on the hexagon, adding blue moves down: hence, linearity
// the modulo makes sure it's in the range 0-360
(((rgb.g - rgb.b) / chroma) % 6.0) * 60.0
} else if (max_c - rgb.g).abs() < EPSILON {
// similar to above, but you add an offset
(((rgb.b - rgb.r) / chroma) % 6.0) * 60.0 + 120.0
} else {
// same as above, different offset
(((rgb.r - rgb.g) / chroma) % 6.0) * 60.0 + 240.0
};
// if hue still not in 0-360, add until it does: this can sometimes happen
while hue < 0. {
hue += 360.;
}
while hue >= 360. {
hue -= 360.;
}
// saturation, scientifically speaking, is chroma adjusted for lightness. For HSL, it's
// defined relative to the maximum chroma, which varies depending on the place on the
// cone. Thus, I'll compute lightness first.
// now we choose lightness as the average of the largest and smallest components. This
// essentially translates to a double hex cone, quite the interesting structure!
let lightness = (max_c + min_c) / 2.0;
// now back to saturation
let saturation = if (lightness - 1.0).abs() < EPSILON || lightness == 0.0 {
// this would be a divide by 0 otherwise, just set it to 0 because it doesn't matter
0.0
} else {
chroma / (1.0 - (2.0 * lightness - 1.0).abs())
};
HSLColor {
h: hue,
s: saturation,
l: lightness,
}
}
// Converts back to XYZ through RGB.
fn to_xyz(&self, illuminant: Illuminant) -> XYZColor {
// first get back chroma
let chroma = (1.0 - (2.0 * self.l - 1.0).abs()) * self.s;
// find the point with 0 lightness that matches ours in the other two components
// intermediate value is the second-largest RGB value, where C is the largest because the
// smallest is 0: call this x
let x = chroma * (1.0 - ((self.h / 60.0) % 2.0 - 1.0).abs());
// now split based on which line of the hexagon we're on, i.e., which are the two largest
// components
let (r1, g1, b1) = if self.h <= 60.0 {
(chroma, x, 0.0)
} else if self.h <= 120.0 | else if self.h <= 180.0 {
(0.0, chroma, x)
} else if self.h <= 240.0 {
(0.0, x, chroma)
} else if self.h <= 300.0 {
(x, 0.0, chroma)
} else {
(chroma, 0.0, x)
};
// now we add the right value to each component to get the correct lightness and scale back
// to 0-255
let offset = self.l - chroma / 2.0;
let r = r1 + offset;
let g = g1 + offset;
let b = b1 + offset;
RGBColor { r, g, b }.to_xyz(illuminant)
}
}
impl From<Coord> for HSLColor {
fn from(c: Coord) -> HSLColor {
HSLColor {
h: c.x,
s: c.y,
l: c.z,
}
}
}
impl From<HSLColor> for Coord {
fn from(val: HSLColor) -> Self {
Coord {
x: val.h,
y: val.s,
z: val.l,
}
}
}
impl Bound for HSLColor {
fn bounds() -> [(f64, f64); 3] {
[(0., 360.), (0., 1.), (0., 1.)]
}
}
impl FromStr for HSLColor {
type Err = | {
(x, chroma, 0.0)
} | conditional_block |
hslcolor.rs | XYZColor};
use coord::Coord;
use csscolor::{parse_hsl_hsv_tuple, CSSParseError};
use illuminants::Illuminant;
/// A color in the HSL color space, a direct transformation of the sRGB space. sHSL is used to
/// distinguish this space from a similar transformation of a different RGB space, which can cause
/// some confusion as other implementations of HSL (such as on the web) omit this distinction.
/// # Example
/// Shifting from red to yellow creates two colors of clearly different brightnesses. This is because
/// HSL doesn't account for the perceptual difference in brightness of light and dark colors.
///
/// ```
/// # use scarlet::prelude::*;
/// # use scarlet::colors::HSLColor;
/// let red = HSLColor{h: 20., s: 0.5, l: 0.5};
/// let yellow = HSLColor{h: 60., s: 0.5, l: 0.5};
/// println!("{} {}", red.convert::<RGBColor>().to_string(), yellow.convert::<RGBColor>().to_string());
/// // prints #BF6A40 #BFBF40
/// // note how the second one is strictly more light
/// ```
#[derive(Debug, Copy, Clone, Serialize, Deserialize)]
pub struct HSLColor {
/// The hue component. Ranges from 0 to 360, as the angle in a cylindrical space. Exactly the same
/// as the hue component of HSV.
pub h: f64,
/// The saturation component. Ranges between 0 and 1. Note that this is much less accurate to
/// human perception than the chroma or saturation found in other, higher-fidelity color spaces.
pub s: f64,
/// The lightness component. Ranges from 0 to 1. Defined in HSL as the average of the largest and
/// smallest color components in RGB, which sacrifices accuracy for convenience.
pub l: f64,
}
impl Color for HSLColor {
/// Converts from XYZ to HSL through RGB: thus, there is a limited precision because RGB colors
/// are limited to integer values of R, G, and B.
fn from_xyz(xyz: XYZColor) -> HSLColor {
// first get RGB color
let rgb = RGBColor::from_xyz(xyz);
// this is sorta interesting: a hexagonal projection instead of the circular projection used
// in CIEHCL. It turns out that, if you tilt the RGB cube and project it into a hexagon, the
// equivalent of radius is simply the largest component minus the smallest component: adding
// a constant to every component simply travels up and down vertically and doesn't change the
// projection.
// I call this chroma, but it's a very very rough estimate of the actual color attribute.
// More info: https://en.wikipedia.org/wiki/HSL_and_HSV#Formal_derivation
let components = [rgb.r, rgb.g, rgb.b];
let max_c = components.iter().cloned().fold(-1.0, f64::max);
let min_c = components.iter().cloned().fold(2.0, f64::min);
let chroma = max_c - min_c;
// hue is crazy in a hexagon! no more trig functions for us!
// it's technically the proportion of the length of the hexagon through the point, but it's
// treated as degrees
let mut hue = if chroma == 0.0 {
// could be anything, undefined according to Wikipedia, in Scarlet just 0 for gray
0.0
} else if (max_c - rgb.r).abs() < EPSILON {
// in red sector: find which part by comparing green and blue and scaling
// adding green moves up on the hexagon, adding blue moves down: hence, linearity
// the modulo makes sure it's in the range 0-360
(((rgb.g - rgb.b) / chroma) % 6.0) * 60.0
} else if (max_c - rgb.g).abs() < EPSILON {
// similar to above, but you add an offset
(((rgb.b - rgb.r) / chroma) % 6.0) * 60.0 + 120.0
} else {
// same as above, different offset
(((rgb.r - rgb.g) / chroma) % 6.0) * 60.0 + 240.0
};
// if hue still not in 0-360, add until it does: this can sometimes happen
while hue < 0. {
hue += 360.;
}
while hue >= 360. {
hue -= 360.;
}
// saturation, scientifically speaking, is chroma adjusted for lightness. For HSL, it's
// defined relative to the maximum chroma, which varies depending on the place on the
// cone. Thus, I'll compute lightness first.
// now we choose lightness as the average of the largest and smallest components. This
// essentially translates to a double hex cone, quite the interesting structure!
let lightness = (max_c + min_c) / 2.0;
// now back to saturation
let saturation = if (lightness - 1.0).abs() < EPSILON || lightness == 0.0 {
// this would be a divide by 0 otherwise, just set it to 0 because it doesn't matter
0.0
} else {
chroma / (1.0 - (2.0 * lightness - 1.0).abs())
};
HSLColor {
h: hue,
s: saturation,
l: lightness,
}
}
// Converts back to XYZ through RGB.
fn to_xyz(&self, illuminant: Illuminant) -> XYZColor {
// first get back chroma
let chroma = (1.0 - (2.0 * self.l - 1.0).abs()) * self.s;
// find the point with 0 lightness that matches ours in the other two components
// intermediate value is the second-largest RGB value, where C is the largest because the
// smallest is 0: call this x
let x = chroma * (1.0 - ((self.h / 60.0) % 2.0 - 1.0).abs());
// now split based on which line of the hexagon we're on, i.e., which are the two largest
// components
let (r1, g1, b1) = if self.h <= 60.0 {
(chroma, x, 0.0)
} else if self.h <= 120.0 {
(x, chroma, 0.0)
} else if self.h <= 180.0 {
(0.0, chroma, x)
} else if self.h <= 240.0 {
(0.0, x, chroma)
} else if self.h <= 300.0 {
(x, 0.0, chroma)
} else {
(chroma, 0.0, x)
};
// now we add the right value to each component to get the correct lightness and scale back
// to 0-255
let offset = self.l - chroma / 2.0;
let r = r1 + offset;
let g = g1 + offset;
let b = b1 + offset;
RGBColor { r, g, b }.to_xyz(illuminant)
}
}
impl From<Coord> for HSLColor {
fn from(c: Coord) -> HSLColor {
HSLColor {
h: c.x,
s: c.y,
l: c.z,
}
}
}
impl From<HSLColor> for Coord {
fn from(val: HSLColor) -> Self {
Coord {
x: val.h,
y: val.s,
z: val.l,
}
}
}
impl Bound for HSLColor {
fn bounds() -> [(f64, f64); 3] {
[(0., 360.), (0., 1.), (0., 1.)]
}
}
impl FromStr for HSLColor {
type Err = CSSParseError;
fn from_str(s: &str) -> Result<HSLColor, CSSParseError> {
if !s.starts_with("hsl(") {
return Err(CSSParseError::InvalidColorSyntax);
}
let tup: String = s.chars().skip(3).collect::<String>();
match parse_hsl_hsv_tuple(&tup) {
Ok(res) => Ok(HSLColor {
h: res.0,
s: res.1,
l: res.2,
}),
Err(_e) => Err(_e),
}
}
}
#[cfg(test)]
mod tests {
#[allow(unused_imports)]
use super::*;
use consts::TEST_PRECISION;
#[test]
fn | test_hsl_rgb_conversion | identifier_name |
|
channel_layout.rs | ,
Channel::TopSideLeft,
Channel::TopSideRight,
Channel::TopBackCenter,
Channel::BottomFrontCenter,
Channel::BottomFrontLeft,
Channel::BottomFrontRight,
]);
pub const _2POINT1: ChannelLayout = Self::STEREO.with_channels_native(&[Channel::LowFrequency]);
pub const _2_1: ChannelLayout = Self::STEREO.with_channels_native(&[Channel::BackCenter]);
pub const _2_2: ChannelLayout = Self::STEREO.with_channels_native(&[Channel::SideLeft, Channel::SideRight]);
pub const _3POINT1: ChannelLayout = Self::SURROUND.with_channels_native(&[Channel::LowFrequency]);
pub const _4POINT0: ChannelLayout = Self::SURROUND.with_channels_native(&[Channel::BackCenter]);
pub const _4POINT1: ChannelLayout = Self::_4POINT0.with_channels_native(&[Channel::LowFrequency]);
pub const _5POINT0: ChannelLayout = Self::SURROUND.with_channels_native(&[Channel::SideLeft, Channel::SideRight]);
pub const _5POINT0_BACK: ChannelLayout =
Self::SURROUND.with_channels_native(&[Channel::BackLeft, Channel::BackRight]);
pub const _5POINT1: ChannelLayout = Self::_5POINT0.with_channels_native(&[Channel::LowFrequency]);
pub const _5POINT1_BACK: ChannelLayout = Self::_5POINT0_BACK.with_channels_native(&[Channel::LowFrequency]);
pub const _6POINT0: ChannelLayout = Self::_5POINT0.with_channels_native(&[Channel::BackCenter]);
pub const _6POINT0_FRONT: ChannelLayout =
Self::_2_2.with_channels_native(&[Channel::FrontLeftOfCenter, Channel::FrontRightOfCenter]);
pub const _6POINT1: ChannelLayout = Self::_5POINT1.with_channels_native(&[Channel::BackCenter]);
pub const _6POINT1_BACK: ChannelLayout = Self::_5POINT1_BACK.with_channels_native(&[Channel::BackCenter]);
pub const _6POINT1_FRONT: ChannelLayout = Self::_6POINT0_FRONT.with_channels_native(&[Channel::LowFrequency]);
pub const _7POINT0: ChannelLayout = Self::_5POINT0.with_channels_native(&[Channel::BackLeft, Channel::BackRight]);
pub const _7POINT0_FRONT: ChannelLayout =
Self::_5POINT0.with_channels_native(&[Channel::FrontLeftOfCenter, Channel::FrontRightOfCenter]);
pub const _7POINT1: ChannelLayout = Self::_5POINT1.with_channels_native(&[Channel::BackLeft, Channel::BackRight]);
pub const _7POINT1_TOP_BACK: ChannelLayout =
Self::_5POINT1_BACK.with_channels_native(&[Channel::TopFrontLeft, Channel::TopFrontRight]);
pub const _7POINT1_WIDE: ChannelLayout =
Self::_5POINT1.with_channels_native(&[Channel::FrontLeftOfCenter, Channel::FrontRightOfCenter]);
pub const _7POINT1_WIDE_BACK: ChannelLayout =
Self::_5POINT1_BACK.with_channels_native(&[Channel::FrontLeftOfCenter, Channel::FrontRightOfCenter]);
}
impl From<ChannelLayout> for AVChannelLayout {
fn from(v: ChannelLayout) -> AVChannelLayout {
v.0
}
}
impl From<AVChannelLayout> for ChannelLayout {
fn from(v: AVChannelLayout) -> ChannelLayout {
Self(v)
}
}
impl fmt::Debug for ChannelLayout {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut d = f.debug_struct("ChannelLayout");
d.field("order", &self.0.order);
d.field("nb_channels", &self.0.nb_channels);
if let Some(custom) = self.custom_channels() {
d.field("map", &custom);
}
else {
unsafe {
d.field("mask", &self.0.u.mask);
}
}
d.field("opaque", &self.0.opaque);
d.finish()
}
}
impl PartialEq for ChannelLayout {
fn eq(&self, other: &ChannelLayout) -> bool {
unsafe {
let ord = av_channel_layout_compare(self.as_ptr(), other.as_ptr());
match ord {
// negative return values for invalid layouts
..=-1 => false,
0 => true,
1 => false,
2.. => panic!("illegal return value"),
}
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
#[repr(transparent)]
pub struct CustomChannel(AVChannelCustom);
impl CustomChannel {
pub fn new(channel: Channel, name: Option<&str>) -> Self {
Self::new_raw(channel as i32, name)
}
pub fn new_raw(channel: i32, name: Option<&str>) -> Self {
let name = name.unwrap_or("").as_bytes();
let mut name_with_zero = [0; 16];
let len = name.len().min(15);
name_with_zero[..len].copy_from_slice(&name[..len]);
Self::custom(channel as i32, array::from_fn(|i| name_with_zero[i] as i8))
}
pub fn custom(channel: i32, name: [i8; 16]) -> Self {
assert_eq!(name[15], 0);
Self(AVChannelCustom {
id: AVChannel(channel as i32),
name,
opaque: ptr::null_mut(),
})
}
}
impl From<Channel> for CustomChannel {
fn from(v: Channel) -> CustomChannel {
CustomChannel::new(v, None)
}
}
impl From<CustomChannel> for AVChannelCustom {
fn from(v: CustomChannel) -> AVChannelCustom {
v.0
}
}
impl From<AVChannelCustom> for CustomChannel {
fn from(v: AVChannelCustom) -> CustomChannel {
Self(v)
}
}
#[cfg(feature = "serde")]
mod serde {
//! It is expected that `CustomChannel::name` contains human-readable names in
//! zero-terminated UTF-8. They are serialized as text instead of byte arrays
//! to make them easily readable in e.g. JSON output. You'll need a different
//! serde impl if you cleverly hid extra data after the null terminator, or
//! use the name field to smuggle non-UTF-8 data.
use std::{array, ffi::CStr, ptr, str};
use serde_::{
de::Error as _,
ser::{Error as _, SerializeStruct},
Deserialize, Deserializer, Serialize, Serializer,
};
use super::{alloc_custom_channels, ChannelData, ChannelLayout, CustomChannel};
use crate::ffi::{AVChannelLayout, AVChannelOrder};
impl Serialize for CustomChannel {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut s = serializer.serialize_struct("CustomChannel", 2)?;
s.serialize_field("id", &self.0.id.0)?;
if self.0.name[0] != 0 {
let u8_name = array::from_fn::<u8, 16, _>(|i| self.0.name[i] as u8);
let str_name = CStr::from_bytes_until_nul(&u8_name[..])
.map_err(|_| S::Error::custom("name is not a null-terminated string"))?
.to_str()
.map_err(|_| S::Error::custom("name is not valid UTF-8"))?;
s.serialize_field("name", &str_name)?;
}
s.end()
}
}
impl<'de> Deserialize<'de> for CustomChannel {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize)]
#[serde(crate = "serde_")]
struct Channel<'a> {
id: i32,
name: Option<&'a str>,
}
let Channel { id, name } = Channel::deserialize(deserializer)?;
Ok(CustomChannel::new_raw(id, name.as_deref()))
}
}
impl Serialize for ChannelLayout {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut s = serializer.serialize_struct("ChannelLayout", 2)?;
// provide type hints in order to get compile-time errors if ffmpeg
// changes the struct definition
s.serialize_field::<u32>("order", &self.0.order.0)?;
if let Some(custom) = self.custom_channels() {
s.serialize_field("map", &custom)?;
}
else {
s.serialize_field::<u64>("mask", unsafe { &self.0.u.mask })?;
}
s.end()
}
}
impl<'de> Deserialize<'de> for ChannelLayout {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize)]
#[serde(crate = "serde_")]
struct OldLayout {
bits: u64,
}
#[derive(Deserialize)]
#[serde(crate = "serde_")]
struct | NewLayout | identifier_name |
|
channel_layout.rs | : ChannelLayout =
Self::_5POINT1.with_channels_native(&[Channel::FrontLeftOfCenter, Channel::FrontRightOfCenter]);
pub const _7POINT1_WIDE_BACK: ChannelLayout =
Self::_5POINT1_BACK.with_channels_native(&[Channel::FrontLeftOfCenter, Channel::FrontRightOfCenter]);
}
impl From<ChannelLayout> for AVChannelLayout {
fn from(v: ChannelLayout) -> AVChannelLayout {
v.0
}
}
impl From<AVChannelLayout> for ChannelLayout {
fn from(v: AVChannelLayout) -> ChannelLayout {
Self(v)
}
}
impl fmt::Debug for ChannelLayout {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut d = f.debug_struct("ChannelLayout");
d.field("order", &self.0.order);
d.field("nb_channels", &self.0.nb_channels);
if let Some(custom) = self.custom_channels() {
d.field("map", &custom);
}
else {
unsafe {
d.field("mask", &self.0.u.mask);
}
}
d.field("opaque", &self.0.opaque);
d.finish()
}
}
impl PartialEq for ChannelLayout {
fn eq(&self, other: &ChannelLayout) -> bool {
unsafe {
let ord = av_channel_layout_compare(self.as_ptr(), other.as_ptr());
match ord {
// negative return values for invalid layouts
..=-1 => false,
0 => true,
1 => false,
2.. => panic!("illegal return value"),
}
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
#[repr(transparent)]
pub struct CustomChannel(AVChannelCustom);
impl CustomChannel {
pub fn new(channel: Channel, name: Option<&str>) -> Self {
Self::new_raw(channel as i32, name)
}
pub fn new_raw(channel: i32, name: Option<&str>) -> Self {
let name = name.unwrap_or("").as_bytes();
let mut name_with_zero = [0; 16];
let len = name.len().min(15);
name_with_zero[..len].copy_from_slice(&name[..len]);
Self::custom(channel as i32, array::from_fn(|i| name_with_zero[i] as i8))
}
pub fn custom(channel: i32, name: [i8; 16]) -> Self {
assert_eq!(name[15], 0);
Self(AVChannelCustom {
id: AVChannel(channel as i32),
name,
opaque: ptr::null_mut(),
})
}
}
impl From<Channel> for CustomChannel {
fn from(v: Channel) -> CustomChannel {
CustomChannel::new(v, None)
}
}
impl From<CustomChannel> for AVChannelCustom {
fn from(v: CustomChannel) -> AVChannelCustom {
v.0
}
}
impl From<AVChannelCustom> for CustomChannel {
fn from(v: AVChannelCustom) -> CustomChannel {
Self(v)
}
}
#[cfg(feature = "serde")]
mod serde {
//! It is expected that `CustomChannel::name` contains human-readable names in
//! zero-terminated UTF-8. They are serialized as text instead of byte arrays
//! to make them easily readable in e.g. JSON output. You'll need a different
//! serde impl if you cleverly hid extra data after the null terminator, or
//! use the name field to smuggle non-UTF-8 data.
use std::{array, ffi::CStr, ptr, str};
use serde_::{
de::Error as _,
ser::{Error as _, SerializeStruct},
Deserialize, Deserializer, Serialize, Serializer,
};
use super::{alloc_custom_channels, ChannelData, ChannelLayout, CustomChannel};
use crate::ffi::{AVChannelLayout, AVChannelOrder};
impl Serialize for CustomChannel {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut s = serializer.serialize_struct("CustomChannel", 2)?;
s.serialize_field("id", &self.0.id.0)?;
if self.0.name[0] != 0 {
let u8_name = array::from_fn::<u8, 16, _>(|i| self.0.name[i] as u8);
let str_name = CStr::from_bytes_until_nul(&u8_name[..])
.map_err(|_| S::Error::custom("name is not a null-terminated string"))?
.to_str()
.map_err(|_| S::Error::custom("name is not valid UTF-8"))?;
s.serialize_field("name", &str_name)?;
}
s.end()
}
}
impl<'de> Deserialize<'de> for CustomChannel {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize)]
#[serde(crate = "serde_")]
struct Channel<'a> {
id: i32,
name: Option<&'a str>,
}
let Channel { id, name } = Channel::deserialize(deserializer)?;
Ok(CustomChannel::new_raw(id, name.as_deref()))
}
}
impl Serialize for ChannelLayout {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut s = serializer.serialize_struct("ChannelLayout", 2)?;
// provide type hints in order to get compile-time errors if ffmpeg
// changes the struct definition
s.serialize_field::<u32>("order", &self.0.order.0)?;
if let Some(custom) = self.custom_channels() {
s.serialize_field("map", &custom)?;
}
else {
s.serialize_field::<u64>("mask", unsafe { &self.0.u.mask })?;
}
s.end()
}
}
impl<'de> Deserialize<'de> for ChannelLayout {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize)]
#[serde(crate = "serde_")]
struct OldLayout {
bits: u64,
}
#[derive(Deserialize)]
#[serde(crate = "serde_")]
struct NewLayout {
order: u32,
mask: Option<u64>,
map: Option<Vec<CustomChannel>>,
}
#[derive(Deserialize)]
#[serde(untagged, crate = "serde_")]
enum VersionedLayout {
Old(OldLayout),
New(NewLayout),
}
let (order, u, nb_channels);
match VersionedLayout::deserialize(deserializer)? {
VersionedLayout::Old(OldLayout { bits: mask }) => {
order = AVChannelOrder::AV_CHANNEL_ORDER_NATIVE;
u = ChannelData { mask };
nb_channels = mask.count_ones() as i32;
}
VersionedLayout::New(NewLayout {
order: num_order,
mask,
map,
}) => {
order = AVChannelOrder(num_order);
match (order, mask, map) {
(AVChannelOrder::AV_CHANNEL_ORDER_CUSTOM, _, Some(map)) => {
u = ChannelData {
map: alloc_custom_channels(&map),
};
nb_channels = map.len() as i32;
}
(
AVChannelOrder::AV_CHANNEL_ORDER_UNSPEC
| AVChannelOrder::AV_CHANNEL_ORDER_NATIVE
| AVChannelOrder::AV_CHANNEL_ORDER_AMBISONIC,
Some(mask),
_,
) => {
u = ChannelData { mask };
nb_channels = mask.count_ones() as i32
}
(_, _, _) => return Err(D::Error::missing_field("mask or map")),
}
}
}
Ok(ChannelLayout(AVChannelLayout {
order,
nb_channels,
u,
opaque: ptr::null_mut(),
}))
}
}
#[cfg(test)]
mod test {
use std::fmt::Debug;
use serde_::{de::DeserializeOwned, Serialize};
use super::super::{Channel, ChannelLayout, CustomChannel};
use crate::ffi::AVChannelOrder;
fn round_trip_debug<T>(x: T)
where
T: Serialize + DeserializeOwned + Debug,
{
let json = serde_json::to_string(&x).unwrap();
let y: T = serde_json::from_str(&json).unwrap();
assert_eq!(format!("{x:?}"), format!("{y:?}"));
}
#[test]
fn serde() | {
round_trip_debug(ChannelLayout::native(&[Channel::StereoRight, Channel::LowFrequency]));
round_trip_debug(ChannelLayout::custom(&[
CustomChannel::new(Channel::LowFrequency, Some("low-freq")),
CustomChannel::new(Channel::BackCenter, None),
]));
} | identifier_body |
|
channel_layout.rs | channels = self.custom_channels_unchecked();
Some(
channels
.iter()
.all(|ch| self.contains_avchannel(ch.0.id).unwrap_or(false)),
)
},
// no information about channels available
(AVChannelOrder::AV_CHANNEL_ORDER_UNSPEC, _) | (_, AVChannelOrder::AV_CHANNEL_ORDER_UNSPEC) => None,
(AVChannelOrder::AV_CHANNEL_ORDER_AMBISONIC, _) | (_, AVChannelOrder::AV_CHANNEL_ORDER_AMBISONIC) => None,
(self_order, layout_order) => panic!("invalid channel orders: {self_order:?}, {layout_order:?}"),
}
}
// this would need only one pass with the bprint API, but that's currently
// unwrapped
pub fn describe(&self) -> Result<String, Error> {
fn describe_into(buf: &mut [u8], layout: &ChannelLayout) -> Result<Result<String, usize>, Error> {
unsafe {
let bytes_needed = match av_channel_layout_describe(layout.as_ptr(), buf.as_mut_ptr() as *mut _, buf.len()) {
e if e < 0 => return Err(Error::from(e))?,
needed => needed as usize,
};
if bytes_needed <= buf.len() {
let s = String::from_utf8_lossy(&buf[..bytes_needed]);
Ok(Ok(s.into_owned()))
}
else {
Ok(Err(bytes_needed))
}
}
}
const BUF_SIZE: usize = 64;
let mut buf = [0u8; BUF_SIZE];
match describe_into(&mut buf[..], self)? {
Ok(s) => Ok(s),
Err(needed) => {
let mut buf = vec![0; needed + 1];
Ok(describe_into(&mut buf[..], self)?.expect("allocated buffer should have been big enough"))
}
}
}
pub fn is_empty(&self) -> bool {
self.0.nb_channels == 0
}
pub fn order(&self) -> ChannelOrder {
match self.0.order {
AVChannelOrder::AV_CHANNEL_ORDER_UNSPEC => ChannelOrder::Unspecified,
AVChannelOrder::AV_CHANNEL_ORDER_NATIVE => ChannelOrder::Native,
AVChannelOrder::AV_CHANNEL_ORDER_CUSTOM => ChannelOrder::Custom,
AVChannelOrder::AV_CHANNEL_ORDER_AMBISONIC => ChannelOrder::Ambisonic,
order => panic!("invalid channel order: {order:?}"),
}
}
pub fn set_order(&mut self, order: ChannelOrder) {
self.0.order = AVChannelOrder(order as u32);
}
pub fn channels(&self) -> i32 {
self.0.nb_channels
}
pub fn as_ptr(&self) -> *const AVChannelLayout {
&self.0 as *const _
}
pub fn native_order_bits(&self) -> Option<u64> {
(self.0.order == AVChannelOrder::AV_CHANNEL_ORDER_NATIVE).then_some(unsafe { self.0.u.mask })
}
unsafe fn custom_channels_unchecked(&self) -> &[CustomChannel] {
slice::from_raw_parts(self.0.u.map.cast::<CustomChannel>(), self.0.nb_channels.max(0) as usize)
}
pub fn custom_channels(&self) -> Option<&[CustomChannel]> {
(self.0.order == AVChannelOrder::AV_CHANNEL_ORDER_CUSTOM).then_some(unsafe { self.custom_channels_unchecked() })
}
}
impl ChannelLayout {
pub const CUBE: ChannelLayout = Self::QUAD.with_channels_native(&[
Channel::TopFrontLeft,
Channel::TopFrontRight,
Channel::TopBackLeft,
Channel::TopBackRight,
]);
pub const HEXADECAGONAL: ChannelLayout = Self::OCTAGONAL.with_channels_native(&[
Channel::WideLeft,
Channel::WideRight,
Channel::TopBackLeft,
Channel::TopBackRight,
Channel::TopBackCenter,
Channel::TopFrontCenter,
Channel::TopFrontLeft,
Channel::TopFrontRight,
]);
pub const HEXAGONAL: ChannelLayout = Self::_5POINT0_BACK.with_channels_native(&[Channel::BackCenter]);
pub const MONO: ChannelLayout = Self::native(&[Channel::FrontCenter]);
pub const OCTAGONAL: ChannelLayout =
Self::_5POINT0.with_channels_native(&[Channel::BackLeft, Channel::BackCenter, Channel::BackRight]);
pub const QUAD: ChannelLayout = Self::STEREO.with_channels_native(&[Channel::BackLeft, Channel::BackRight]);
pub const STEREO: ChannelLayout = Self::native(&[Channel::FrontLeft, Channel::FrontRight]);
pub const STEREO_DOWNMIX: ChannelLayout = Self::native(&[Channel::StereoLeft, Channel::StereoRight]);
pub const SURROUND: ChannelLayout = Self::STEREO.with_channels_native(&[Channel::FrontCenter]);
pub const _22POINT2: ChannelLayout = Self::_5POINT1_BACK.with_channels_native(&[
Channel::FrontLeftOfCenter,
Channel::FrontRightOfCenter,
Channel::BackCenter,
Channel::LowFrequency2,
Channel::SideLeft,
Channel::SideRight,
Channel::TopFrontLeft,
Channel::TopFrontRight,
Channel::TopFrontCenter,
Channel::TopCenter,
Channel::TopBackLeft,
Channel::TopBackRight,
Channel::TopSideLeft,
Channel::TopSideRight,
Channel::TopBackCenter,
Channel::BottomFrontCenter,
Channel::BottomFrontLeft,
Channel::BottomFrontRight,
]);
pub const _2POINT1: ChannelLayout = Self::STEREO.with_channels_native(&[Channel::LowFrequency]);
pub const _2_1: ChannelLayout = Self::STEREO.with_channels_native(&[Channel::BackCenter]);
pub const _2_2: ChannelLayout = Self::STEREO.with_channels_native(&[Channel::SideLeft, Channel::SideRight]);
pub const _3POINT1: ChannelLayout = Self::SURROUND.with_channels_native(&[Channel::LowFrequency]);
pub const _4POINT0: ChannelLayout = Self::SURROUND.with_channels_native(&[Channel::BackCenter]);
pub const _4POINT1: ChannelLayout = Self::_4POINT0.with_channels_native(&[Channel::LowFrequency]);
pub const _5POINT0: ChannelLayout = Self::SURROUND.with_channels_native(&[Channel::SideLeft, Channel::SideRight]);
pub const _5POINT0_BACK: ChannelLayout =
Self::SURROUND.with_channels_native(&[Channel::BackLeft, Channel::BackRight]);
pub const _5POINT1: ChannelLayout = Self::_5POINT0.with_channels_native(&[Channel::LowFrequency]);
pub const _5POINT1_BACK: ChannelLayout = Self::_5POINT0_BACK.with_channels_native(&[Channel::LowFrequency]);
pub const _6POINT0: ChannelLayout = Self::_5POINT0.with_channels_native(&[Channel::BackCenter]);
pub const _6POINT0_FRONT: ChannelLayout =
Self::_2_2.with_channels_native(&[Channel::FrontLeftOfCenter, Channel::FrontRightOfCenter]);
pub const _6POINT1: ChannelLayout = Self::_5POINT1.with_channels_native(&[Channel::BackCenter]);
pub const _6POINT1_BACK: ChannelLayout = Self::_5POINT1_BACK.with_channels_native(&[Channel::BackCenter]);
pub const _6POINT1_FRONT: ChannelLayout = Self::_6POINT0_FRONT.with_channels_native(&[Channel::LowFrequency]);
pub const _7POINT0: ChannelLayout = Self::_5POINT0.with_channels_native(&[Channel::BackLeft, Channel::BackRight]);
pub const _7POINT0_FRONT: ChannelLayout =
Self::_5POINT0.with_channels_native(&[Channel::FrontLeftOfCenter, Channel::FrontRightOfCenter]);
pub const _7POINT1: ChannelLayout = Self::_5POINT1.with_channels_native(&[Channel::BackLeft, Channel::BackRight]);
pub const _7POINT1_TOP_BACK: ChannelLayout =
Self::_5POINT1_BACK.with_channels_native(&[Channel::TopFrontLeft, Channel::TopFrontRight]);
pub const _7POINT1_WIDE: ChannelLayout =
Self::_5POINT1.with_channels_native(&[Channel::FrontLeftOfCenter, Channel::FrontRightOfCenter]);
pub const _7POINT1_WIDE_BACK: ChannelLayout =
Self::_5POINT1_BACK.with_channels_native(&[Channel::FrontLeftOfCenter, Channel::FrontRightOfCenter]);
}
impl From<ChannelLayout> for AVChannelLayout {
fn from(v: ChannelLayout) -> AVChannelLayout {
v.0
}
}
impl From<AVChannelLayout> for ChannelLayout {
fn from(v: AVChannelLayout) -> ChannelLayout {
Self(v)
}
}
impl fmt::Debug for ChannelLayout {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut d = f.debug_struct("ChannelLayout");
d.field("order", &self.0.order);
d.field("nb_channels", &self.0.nb_channels);
if let Some(custom) = self.custom_channels() {
d.field("map", &custom);
}
else {
unsafe {
d.field("mask", &self.0.u.mask); | } | random_line_split |
|
homework1.py | -star (only 1%), which means this dataset is extremely unbalanced.
#
# The dataset only has 6 rows containing NA value and we decide to remove them from the data.
#%% [markdown]
# ## Problem 3
# Train a simple predictor to predict the star rating using two features:
#
# star_rating ≃ θ0 + θ1 × [review is verified] + θ2 × [review length].
#
# Report the values of θ0, θ1, and θ2. Briefly describe your interpretation of these values, i.e., what do θ0, θ1, and θ2 represent? Explain these in terms of the features and labels, e.g. if the coefficient of ‘review length’ is negative, what would that say about verified versus unverified reviews (1 mark)?
#%%
# Data preprocessing
data['verified_purchase_int'] = data.apply(lambda x: int(x['verified_purchase'] == "Y"), axis = 1)
data['review_body_length'] = data.apply(lambda x: len(x['review_body']), axis = 1)
data['theta_zero'] = 1
#%%
# Define My Own Regression
def myRegression(featureNames, labelName, data):
X, y = data[featureNames], data[labelName]
theta, residuals, rank, s = np.linalg.lstsq(X, y)
MSE = ((y - np.dot(X, theta))**2).mean()
for i in range(len(theta)):
print("Theta%1d: %.5f" % (i, theta[i]))
print("MSE: %.3f" % MSE)
featureNames = ['theta_zero', 'verified_purchase_int', 'review_body_length']
labelName = 'star_rating'
myRegression(featureNames, labelName, data)
#%% [markdown]
# We first convert "verified_purchase" from "Y" and "N" to "1" and "0" and calculate the length of "review body" in character as features.
#
# After define our own regression, we got three theta values $\theta_0, \theta_1, \theta_2$ and MSE.
#
# $\theta_0$ is a bias term, which means if there is no "verified_purchase" and "review_body" features, the predicted value of rating should be $\theta_0$.
#
# $\theta_1$ means the relationship between "verified_purchase" and "star_rating". If the purchase is verified, then the rating will increase by $\theta_1$.
#
# $\theta_2$ means the relationship between the length of "review_body" in character and "star_rating". If the length increase by 1, then the rating will increase by $\theta_2$.
#
# In this case, an interesting fact is that $\theta_2$ is a negative number, which means the more characters you write in your review, the lower rating you will rate this product. This fact is fun but reasonable since people will tend to write some bad reviews to complain when they are unsatisfied than to write some fancy words to praise when they are satisfied.
#%% [markdown]
# ## Problem 4
# Train another predictor that only uses one feature:
#
# star rating ≃ θ0 + θ1 × [review is verified]
#
# Report the values of θ0 and θ1. Note that coefficient you found here might be quite different (i.e., much larger or smaller) than the one from Question 3, even though these coefficients refer to the same feature. Provide an explanation as to why these coefficients might vary so significantly (1 mark).1
#%%
featureNames = ['theta_zero', 'verified_purchase_int']
labelName = 'star_rating'
myRegression(featureNames, labelName, data)
#%% [markdown]
# After removing the length of "review_body" features, compared with problem 3, the value of $\theta_0$ decreases from 4.845 to 4.578, and $\theta_1$ increases from 0.0499 to 0.1679.
#
# For $\theta_0$, this time it can be interpreted as the predicted value of rating score when the "verified_purchase" feature is 0. Compared with problem 3, as we know, if the length of "review_body" increases, the rating should decrease. So $\theta_0$ in problem 3 should be bigger than the one in problem 4 since the length of "review_body" is always bigger than or equal to 0 (so that it can offset the decrease aroused by review body).
#
# For $\theta_1$, it still means the relationship between "verified_purchase" and "star_rating". If the purchase is verified, then the rating will increase by $\theta_1$. But, as we noticed, if "verified_purchase" is 1, the predicted rating is $\theta_0+\theta_1$, and since $\theta_0$ decreases a lot, to compensate for this, $\theta_1$ should increase accordingly.
#%% [markdown]
# ## Problem 5
# Split the data into two fractions – the first 90% for training, and the remaining 10% testing (based on the order they appear in the file). Train the same model as above on the training set only. What is the model’s MSE on the training and on the test set (1 mark)?
#%%
def myRegression(featureNames, labelName, dataTrain, dataTest):
X, y = dataTrain[featureNames], da | atureNames, labelName):
train = data[:int(len(data)*ratio)]
test = data[int(len(data)*ratio):]
print("================ For ratio ", ratio, "================")
myRegression(featureNames, labelName, train, test)
#%%
trainByRatio(0.9, data, featureNames, labelName)
#%% [markdown]
# ## Problem 6
# Repeat the above experiment, varying the size of the training and test fractions between
# 5% and 95% for training (using the complement for testing). Show how the training and test error vary
# as a function of the training set size (again using a simple plot or table). Does the size of the training
# set make a significant difference in testing performance? Comment on why it might or might not make
# a significant difference in this instance (2 marks).
#%%
# To plot a graph, let's revise the function slightly so that we can store the MSE in a list
def myRegression(featureNames, labelName, dataTrain, dataTest, trainMSE, testMSE):
# Training
X, y = dataTrain[featureNames], dataTrain[labelName]
theta, residuals, rank, s = np.linalg.lstsq(X, y)
MSE = ((y - np.dot(X, theta))**2).mean()
trainMSE.append(MSE)
# Testing
X, y = dataTest[featureNames], dataTest[labelName]
MSE = ((y - np.dot(X, theta))**2).mean()
testMSE.append(MSE)
def trainByRatio(ratio, data, featureNames, labelName, trainMSE, testMSE):
train = data[:int(len(data)*ratio)]
test = data[int(len(data)*ratio):]
myRegression(featureNames, labelName, train, test, trainMSE, testMSE)
trainMSE, testMSE = [], []
# ratio from 5% to 95%, step by 5%
ratios = [i/100 for i in list(range(5, 100, 5))]
for ratio in ratios:
trainByRatio(ratio, data, featureNames, labelName, trainMSE, testMSE)
# plot a graph
plt.plot(ratios, trainMSE, 'r^-', label='Train MSE')
plt.plot(ratios, testMSE, 'g*-', label='Test MSE')
plt.title('MSE with different ratio of Train-Test split')
plt.xlabel('Ratio of Training data')
plt.ylabel('MSE')
plt.legend()
plt.show()
#%% [markdown]
# Yes. As we can see from the plot, the size of the training set makes a significant difference in testing performance. As we increase the training size, however, the test performance decreases. This isn't normal and may be due to the extremely unbalanced nature of this dataset. The star rating label may vary a lot between the training set and testing set as the ratio increases.
#
# The following plot proves our thought.
#%%
def calculatePortionOfFiveStars(ratio, data, trainPortion, testPortion):
train = data[:int(len(data)*ratio)]
test = data[int(len(data)*ratio):]
trainPortion.append(len(train[train['star_rating']==i])/len(train)*1 | taTrain[labelName]
theta, residuals, rank, s = np.linalg.lstsq(X, y)
print("================ Training ================")
MSE = ((y - np.dot(X, theta))**2).mean()
for i in range(len(theta)):
print("Theta%1d: %.5f" % (i, theta[i]))
print("MSE: %.3f" % MSE)
print("================ Testing ================")
X, y = dataTest[featureNames], dataTest[labelName]
MSE = ((y - np.dot(X, theta))**2).mean()
print("MSE: %.3f" % MSE)
def trainByRatio(ratio, data, fe | identifier_body |
homework1.py | that only uses one feature:
#
# star rating ≃ θ0 + θ1 × [review is verified]
#
# Report the values of θ0 and θ1. Note that coefficient you found here might be quite different (i.e., much larger or smaller) than the one from Question 3, even though these coefficients refer to the same feature. Provide an explanation as to why these coefficients might vary so significantly (1 mark).1
#%%
featureNames = ['theta_zero', 'verified_purchase_int']
labelName = 'star_rating'
myRegression(featureNames, labelName, data)
#%% [markdown]
# After removing the length of "review_body" features, compared with problem 3, the value of $\theta_0$ decreases from 4.845 to 4.578, and $\theta_1$ increases from 0.0499 to 0.1679.
#
# For $\theta_0$, this time it can be interpreted as the predicted value of rating score when the "verified_purchase" feature is 0. Compared with problem 3, as we know, if the length of "review_body" increases, the rating should decrease. So $\theta_0$ in problem 3 should be bigger than the one in problem 4 since the length of "review_body" is always bigger than or equal to 0 (so that it can offset the decrease aroused by review body).
#
# For $\theta_1$, it still means the relationship between "verified_purchase" and "star_rating". If the purchase is verified, then the rating will increase by $\theta_1$. But, as we noticed, if "verified_purchase" is 1, the predicted rating is $\theta_0+\theta_1$, and since $\theta_0$ decreases a lot, to compensate for this, $\theta_1$ should increase accordingly.
#%% [markdown]
# ## Problem 5
# Split the data into two fractions – the first 90% for training, and the remaining 10% testing (based on the order they appear in the file). Train the same model as above on the training set only. What is the model’s MSE on the training and on the test set (1 mark)?
#%%
def myRegression(featureNames, labelName, dataTrain, dataTest):
X, y = dataTrain[featureNames], dataTrain[labelName]
theta, residuals, rank, s = np.linalg.lstsq(X, y)
print("================ Training ================")
MSE = ((y - np.dot(X, theta))**2).mean()
for i in range(len(theta)):
print("Theta%1d: %.5f" % (i, theta[i]))
print("MSE: %.3f" % MSE)
print("================ Testing ================")
X, y = dataTest[featureNames], dataTest[labelName]
MSE = ((y - np.dot(X, theta))**2).mean()
print("MSE: %.3f" % MSE)
def trainByRatio(ratio, data, featureNames, labelName):
train = data[:int(len(data)*ratio)]
test = data[int(len(data)*ratio):]
print("================ For ratio ", ratio, "================")
myRegression(featureNames, labelName, train, test)
#%%
trainByRatio(0.9, data, featureNames, labelName)
#%% [markdown]
# ## Problem 6
# Repeat the above experiment, varying the size of the training and test fractions between
# 5% and 95% for training (using the complement for testing). Show how the training and test error vary
# as a function of the training set size (again using a simple plot or table). Does the size of the training
# set make a significant difference in testing performance? Comment on why it might or might not make
# a significant difference in this instance (2 marks).
#%%
# To plot a graph, let's revise the function slightly so that we can store the MSE in a list
def myRegression(featureNames, labelName, dataTrain, dataTest, trainMSE, testMSE):
# Training
X, y = dataTrain[featureNames], dataTrain[labelName]
theta, residuals, rank, s = np.linalg.lstsq(X, y)
MSE = ((y - np.dot(X, theta))**2).mean()
trainMSE.append(MSE)
# Testing
X, y = dataTest[featureNames], dataTest[labelName]
MSE = ((y - np.dot(X, theta))**2).mean()
testMSE.append(MSE)
def trainByRatio(ratio, data, featureNames, labelName, trainMSE, testMSE):
train = data[:int(len(data)*ratio)]
test = data[int(len(data)*ratio):]
myRegression(featureNames, labelName, train, test, trainMSE, testMSE)
trainMSE, testMSE = [], []
# ratio from 5% to 95%, step by 5%
ratios = [i/100 for i in list(range(5, 100, 5))]
for ratio in ratios:
trainByRatio(ratio, data, featureNames, labelName, trainMSE, testMSE)
# plot a graph
plt.plot(ratios, trainMSE, 'r^-', label='Train MSE')
plt.plot(ratios, testMSE, 'g*-', label='Test MSE')
plt.title('MSE with different ratio of Train-Test split')
plt.xlabel('Ratio of Training data')
plt.ylabel('MSE')
plt.legend()
plt.show()
#%% [markdown]
# Yes. As we can see from the plot, the size of the training set makes a significant difference in testing performance. As we increase the training size, however, the test performance decreases. This isn't normal and may be due to the extremely unbalanced nature of this dataset. The star rating label may vary a lot between the training set and testing set as the ratio increases.
#
# The following plot proves our thought.
#%%
def calculatePortionOfFiveStars(ratio, data, trainPortion, testPortion):
train = data[:int(len(data)*ratio)]
test = data[int(len(data)*ratio):]
trainPortion.append(len(train[train['star_rating']==i])/len(train)*100)
testPortion.append(len(test[test['star_rating']==i])/len(test)*100)
trainPortion, testPortion = [], []
# ratio from 5% to 95%, step by 5%
ratios = [i/100 for i in list(range(5, 100, 5))]
for ratio in ratios:
calculatePortionOfFiveStars(ratio, data, trainPortion, testPortion)
# plot a graph
plt.plot(ratios, trainPortion, 'r^-', label='Training data')
plt.plot(ratios, testPortion, 'g*-', label='Testing data')
plt.title('%% of 5-star ratings in Training/Testing data as ratio varies')
plt.xlabel('Ratio')
plt.ylabel('%% of 5-star ratings')
plt.legend()
plt.show()
#%% [markdown]
# # Task -- Classification
# In this question we’ll alter the prediction from our regression task, so that we are now classifying whether a review is verified. Continue using the 90%/10% training and test sets you constructed previously, i.e., train on the training set and report the error/accuracy on the testing set.
#%% [markdown]
# ## Problem 8
# First, let’s train a predictor that estimates whether a review is verified using the rating and the length:
#
# p(review is verified) ≃ σ(θ0 + θ1 × [star rating] + θ2 × [review length])
#
# Train a logistic regressor to make the above prediction (you may use a logistic regression library with default parameters, e.g. linear model.LogisticRegression() from sklearn). Report the classification accuracy of this predictor. Report also the proportion of labels that are positive (i.e., the proportion of reviews that are verified) and the proportion of predictions that are positive (1 mark).
#%%
# Define My Own Classification
from sklearn.linear_model import LogisticRegression
def myClassification(featureNames, labelName, dataTrain, dataTest):
X, y = dataTrain[featureNames], dataTrain[labelName]
clf = LogisticRegression().fit(X, y)
y_ = clf.predict(X)
print("================ Training ================")
print("Accuracy: ", clf.score(X, y))
print("Proportion of reviews that are verified: %.2f%%" % (len(dataTrain[dataTrain[featureNames]==1])/len(dataTrain)*100))
print("Proportion of predictions that are positive: %.2f%%" % (np.mean(y_==1)*100))
print("================ Testing ================")
X, y = dataTest[featureNames], dataTest[labelName]
y_ = clf.predict(X)
print("Accuracy: ", clf.score(X, y))
print("Proportion of reviews that are verified: %.2f%%" % (len(dataTest[dataTest[featureNames]==1])/len(dataTest)*100))
print("Proportion of predictions that are positive: %.2f%%" % (np.mean(y_==1)*100))
def trainByRatio(ratio, data, featureNames, labelN | ame):
tr | identifier_name |
|
homework1.py |
#%% [markdown]
# ## Problem 3
# Train a simple predictor to predict the star rating using two features:
#
# star_rating ≃ θ0 + θ1 × [review is verified] + θ2 × [review length].
#
# Report the values of θ0, θ1, and θ2. Briefly describe your interpretation of these values, i.e., what do θ0, θ1, and θ2 represent? Explain these in terms of the features and labels, e.g. if the coefficient of ‘review length’ is negative, what would that say about verified versus unverified reviews (1 mark)?
#%%
# Data preprocessing
data['verified_purchase_int'] = data.apply(lambda x: int(x['verified_purchase'] == "Y"), axis = 1)
data['review_body_length'] = data.apply(lambda x: len(x['review_body']), axis = 1)
data['theta_zero'] = 1
#%%
# Define My Own Regression
def myRegression(featureNames, labelName, data):
X, y = data[featureNames], data[labelName]
theta, residuals, rank, s = np.linalg.lstsq(X, y)
MSE = ((y - np.dot(X, theta))**2).mean()
for i in range(len(theta)):
print("Theta%1d: %.5f" % (i, theta[i]))
print("MSE: %.3f" % MSE)
featureNames = ['theta_zero', 'verified_purchase_int', 'review_body_length']
labelName = 'star_rating'
myRegression(featureNames, labelName, data)
#%% [markdown]
# We first convert "verified_purchase" from "Y" and "N" to "1" and "0" and calculate the length of "review body" in character as features.
#
# After define our own regression, we got three theta values $\theta_0, \theta_1, \theta_2$ and MSE.
#
# $\theta_0$ is a bias term, which means if there is no "verified_purchase" and "review_body" features, the predicted value of rating should be $\theta_0$.
#
# $\theta_1$ means the relationship between "verified_purchase" and "star_rating". If the purchase is verified, then the rating will increase by $\theta_1$.
#
# $\theta_2$ means the relationship between the length of "review_body" in character and "star_rating". If the length increase by 1, then the rating will increase by $\theta_2$.
#
# In this case, an interesting fact is that $\theta_2$ is a negative number, which means the more characters you write in your review, the lower rating you will rate this product. This fact is fun but reasonable since people will tend to write some bad reviews to complain when they are unsatisfied than to write some fancy words to praise when they are satisfied.
#%% [markdown]
# ## Problem 4
# Train another predictor that only uses one feature:
#
# star rating ≃ θ0 + θ1 × [review is verified]
#
# Report the values of θ0 and θ1. Note that coefficient you found here might be quite different (i.e., much larger or smaller) than the one from Question 3, even though these coefficients refer to the same feature. Provide an explanation as to why these coefficients might vary so significantly (1 mark).1
#%%
featureNames = ['theta_zero', 'verified_purchase_int']
labelName = 'star_rating'
myRegression(featureNames, labelName, data)
#%% [markdown]
# After removing the length of "review_body" features, compared with problem 3, the value of $\theta_0$ decreases from 4.845 to 4.578, and $\theta_1$ increases from 0.0499 to 0.1679.
#
# For $\theta_0$, this time it can be interpreted as the predicted value of rating score when the "verified_purchase" feature is 0. Compared with problem 3, as we know, if the length of "review_body" increases, the rating should decrease. So $\theta_0$ in problem 3 should be bigger than the one in problem 4 since the length of "review_body" is always bigger than or equal to 0 (so that it can offset the decrease aroused by review body).
#
# For $\theta_1$, it still means the relationship between "verified_purchase" and "star_rating". If the purchase is verified, then the rating will increase by $\theta_1$. But, as we noticed, if "verified_purchase" is 1, the predicted rating is $\theta_0+\theta_1$, and since $\theta_0$ decreases a lot, to compensate for this, $\theta_1$ should increase accordingly.
#%% [markdown]
# ## Problem 5
# Split the data into two fractions – the first 90% for training, and the remaining 10% testing (based on the order they appear in the file). Train the same model as above on the training set only. What is the model’s MSE on the training and on the test set (1 mark)?
#%%
def myRegression(featureNames, labelName, dataTrain, dataTest):
X, y = dataTrain[featureNames], dataTrain[labelName]
theta, residuals, rank, s = np.linalg.lstsq(X, y)
print("================ Training ================")
MSE = ((y - np.dot(X, theta))**2).mean()
for i in range(len(theta)):
print("Theta%1d: %.5f" % (i, theta[i]))
print("MSE: %.3f" % MSE)
print("================ Testing ================")
X, y = dataTest[featureNames], dataTest[labelName]
MSE = ((y - np.dot(X, theta))**2).mean()
print("MSE: %.3f" % MSE)
def trainByRatio(ratio, data, featureNames, labelName):
train = data[:int(len(data)*ratio)]
test = data[int(len(data)*ratio):]
print("================ For ratio ", ratio, "================")
myRegression(featureNames, labelName, train, test)
#%%
trainByRatio(0.9, data, featureNames, labelName)
#%% [markdown]
# ## Problem 6
# Repeat the above experiment, varying the size of the training and test fractions between
# 5% and 95% for training (using the complement for testing). Show how the training and test error vary
# as a function of the training set size (again using a simple plot or table). Does the size of the training
# set make a significant difference in testing performance? Comment on why it might or might not make
# a significant difference in this instance (2 marks).
#%%
# To plot a graph, let's revise the function slightly so that we can store the MSE in a list
def myRegression(featureNames, labelName, dataTrain, dataTest, trainMSE, testMSE):
# Training
X, y = dataTrain[featureNames], dataTrain[labelName]
theta, residuals, rank, s = np.linalg.lstsq(X, y)
MSE = ((y - np.dot(X, theta))**2).mean()
trainMSE.append(MSE)
# Testing
X, y = dataTest[featureNames], dataTest[labelName]
MSE = ((y - np.dot(X, theta))**2).mean()
testMSE.append(MSE)
def trainByRatio(ratio, data, featureNames, labelName, trainMSE, testMSE):
train = data[:int(len(data)*ratio)]
test = data[int(len(data)*ratio):]
myRegression(featureNames, labelName, train, test, trainMSE, testMSE)
trainMSE, testMSE = [], []
# ratio from 5% to 95%, step by 5%
ratios = [i/100 for i in list(range(5, 100, 5))]
for ratio in ratios:
trainByRatio(ratio, data, featureNames, labelName, trainMSE, testMSE)
# plot a graph
plt.plot(ratios, trainMSE, 'r^-', label='Train MSE')
plt.plot(ratios, testMSE, 'g*-', label='Test MSE')
plt.title('MSE with different ratio of Train-Test split')
plt.xlabel('Ratio of Training data')
plt.ylabel('MSE')
plt.legend()
plt.show()
#%% [markdown]
# Yes. As we can see from the plot, the size of the training set makes a significant difference in testing performance. As we increase the training size, however, the test performance decreases. This isn't normal and may be due to the extremely unbalanced nature of this dataset. The star rating label may vary a lot between the training set and testing set as the ratio increases.
#
# The following plot proves our thought.
#%%
def calculatePortionOfFiveStars(ratio, data, trainPortion, testPortion):
train = data[:int |
#%% [markdown]
# We can see from the above plot and printout that most of ratings are 5-star (87%) while the least rating is 2-star (only 1%), which means this dataset is extremely unbalanced.
#
# The dataset only has 6 rows containing NA value and we decide to remove them from the data. | random_line_split |
|
homework1.py | [markdown]
# We can see from the above plot and printout that most of ratings are 5-star (87%) while the least rating is 2-star (only 1%), which means this dataset is extremely unbalanced.
#
# The dataset only has 6 rows containing NA value and we decide to remove them from the data.
#%% [markdown]
# ## Problem 3
# Train a simple predictor to predict the star rating using two features:
#
# star_rating ≃ θ0 + θ1 × [review is verified] + θ2 × [review length].
#
# Report the values of θ0, θ1, and θ2. Briefly describe your interpretation of these values, i.e., what do θ0, θ1, and θ2 represent? Explain these in terms of the features and labels, e.g. if the coefficient of ‘review length’ is negative, what would that say about verified versus unverified reviews (1 mark)?
#%%
# Data preprocessing
data['verified_purchase_int'] = data.apply(lambda x: int(x['verified_purchase'] == "Y"), axis = 1)
data['review_body_length'] = data.apply(lambda x: len(x['review_body']), axis = 1)
data['theta_zero'] = 1
#%%
# Define My Own Regression
def myRegression(featureNames, labelName, data):
X, y = data[featureNames], data[labelName]
theta, residuals, rank, s = np.linalg.lstsq(X, y)
MSE = ((y - np.dot(X, theta))**2).mean()
for i in range(len(theta)):
print("Theta%1d: %.5f" % (i, theta[i]))
print("MSE: %.3f" % MSE)
featureNames = ['theta_zero', 'verified_purchase_int', 'review_body_length']
labelName = 'star_rating'
myRegression(featureNames, labelName, data)
#%% [markdown]
# We first convert "verified_purchase" from "Y" and "N" to "1" and "0" and calculate the length of "review body" in character as features.
#
# After define our own regression, we got three theta values $\theta_0, \theta_1, \theta_2$ and MSE.
#
# $\theta_0$ is a bias term, which means if there is no "verified_purchase" and "review_body" features, the predicted value of rating should be $\theta_0$.
#
# $\theta_1$ means the relationship between "verified_purchase" and "star_rating". If the purchase is verified, then the rating will increase by $\theta_1$.
#
# $\theta_2$ means the relationship between the length of "review_body" in character and "star_rating". If the length increase by 1, then the rating will increase by $\theta_2$.
#
# In this case, an interesting fact is that $\theta_2$ is a negative number, which means the more characters you write in your review, the lower rating you will rate this product. This fact is fun but reasonable since people will tend to write some bad reviews to complain when they are unsatisfied than to write some fancy words to praise when they are satisfied.
#%% [markdown]
# ## Problem 4
# Train another predictor that only uses one feature:
#
# star rating ≃ θ0 + θ1 × [review is verified]
#
# Report the values of θ0 and θ1. Note that coefficient you found here might be quite different (i.e., much larger or smaller) than the one from Question 3, even though these coefficients refer to the same feature. Provide an explanation as to why these coefficients might vary so significantly (1 mark).1
#%%
featureNames = ['theta_zero', 'verified_purchase_int']
labelName = 'star_rating'
myRegression(featureNames, labelName, data)
#%% [markdown]
# After removing the length of "review_body" features, compared with problem 3, the value of $\theta_0$ decreases from 4.845 to 4.578, and $\theta_1$ increases from 0.0499 to 0.1679.
#
# For $\theta_0$, this time it can be interpreted as the predicted value of rating score when the "verified_purchase" feature is 0. Compared with problem 3, as we know, if the length of "review_body" increases, the rating should decrease. So $\theta_0$ in problem 3 should be bigger than the one in problem 4 since the length of "review_body" is always bigger than or equal to 0 (so that it can offset the decrease aroused by review body).
#
# For $\theta_1$, it still means the relationship between "verified_purchase" and "star_rating". If the purchase is verified, then the rating will increase by $\theta_1$. But, as we noticed, if "verified_purchase" is 1, the predicted rating is $\theta_0+\theta_1$, and since $\theta_0$ decreases a lot, to compensate for this, $\theta_1$ should increase accordingly.
#%% [markdown]
# ## Problem 5
# Split the data into two fractions – the first 90% for training, and the remaining 10% testing (based on the order they appear in the file). Train the same model as above on the training set only. What is the model’s MSE on the training and on the test set (1 mark)?
#%%
def myRegression(featureNames, labelName, dataTrain, dataTest):
X, y = dataTrain[featureNames], dataTrain[labelName]
theta, residuals, rank, s = np.linalg.lstsq(X, y)
print("================ Training ================")
MSE = ((y - np.dot(X, theta))**2).mean()
for i in range(len(theta)):
print("Theta%1d: %.5f" % (i, theta[i]))
print("MSE: %.3f" % MSE)
print("================ Testing ================")
X, y = dataTest[featureNames], dataTest[labelName]
MSE = ((y - np.dot(X, theta))**2).mean()
print("MSE: %.3f" % MSE)
def trainByRatio(ratio, data, featureNames, labelName):
train = data[:int(len(data)*ratio)]
test = data[int(len(data)*ratio):]
print("================ For ratio ", ratio, "================")
myRegression(featureNames, labelName, train, test)
#%%
trainByRatio(0.9, data, featureNames, labelName)
#%% [markdown]
# ## Problem 6
# Repeat the above experiment, varying the size of the training and test fractions between
# 5% and 95% for training (using the complement for testing). Show how the training and test error vary
# as a function of the training set size (again using a simple plot or table). Does the size of the training
# set make a significant difference in testing performance? Comment on why it might or might not make
# a significant difference in this instance (2 marks).
#%%
# To plot a graph, let's revise the function slightly so that we can store the MSE in a list
def myRegression(featureNames, labelName, dataTrain, dataTest, trainMSE, testMSE):
# Training
X, y = dataTrain[featureNames], dataTrain[labelName]
theta, residuals, rank, s = np.linalg.lstsq(X, y)
MSE = ((y - np.dot(X, theta))**2).mean()
trainMSE.append(MSE)
# Testing
X, y = dataTest[featureNames], dataTest[labelName]
MSE = ((y - np.dot(X, theta))**2).mean()
testMSE.append(MSE)
def trainByRatio(ratio, data, featureNames, labelName, trainMSE, testMSE):
train = data[:int(len(data)*ratio)]
test = data[int(len(data)*ratio):]
myRegression(featureNames, labelName, train, test, trainMSE, testMSE)
trainMSE, testMSE = [], []
# ratio from 5% to 95%, step by 5%
ratios = [i/100 for i in list(range(5, 100, 5))]
for ratio in ratios:
trainByRatio(ratio, data, featureNames, labelName, trainMSE, testMSE)
# plot a graph
plt.plot(ratios, trainMSE, 'r^-', label='Train MSE')
plt.plot(ratios, testMSE, 'g*-', label='Test MSE')
plt.title('MSE with different ratio of Train-Test split')
plt.xlabel('Ratio of Training data')
plt.ylabel('MSE')
plt.legend()
plt.show()
#%% [markdown]
# Yes. As we can see from the plot, the size of the training set makes a significant difference in testing performance. As we increase the training size, however, the test performance decreases. This isn't normal and may be due to the extremely unbalanced nature of this dataset. The star rating label may vary a lot between the training set and testing set as the ratio increases.
#
# The following | "The percent of %1d-star: %.1f%%" % (i, (len(data[data['star_rating']==i])/len(data)*100)))
#%% | conditional_block |
|
page-email-management.js | enable: true,
createddate: '',
lastupdate: '',
createdby: '',
updatedby: ''
};
em.CategoryMailList = ko.observableArray([]);
em.UserMailList = ko.observableArray([]);
em.AlarmCodesMailList = ko.observableArray([]);
em.TemplateMailList = ko.observable();
em.isAlarmCode = ko.observable(false);
em.isInterval = ko.observable(false);
em.templateFilter = {
search: ""
};
em.contentIsLoading = ko.observable(false);
em.TableColumns = ko.observableArray([{ headerTemplate: "<center><input type='checkbox' class='deletecheckall' onclick=\"em.checkDeleteData(this, 'deleteall', 'all')\"/></center>", attributes: { style: "text-align: center;" }, width: 40, template: function template(d) {
return ["<input type='checkbox' class='deletecheck' idcheck='" + d._id + "' onclick=\"em.checkDeleteData(this, 'delete')\" />"].join(" ");
} }, {
field: "_id",
title: "ID",
headerAttributes: { style: "text-align: center;" }
}, {
field: "subject",
title: "Subject",
headerAttributes: { style: "text-align: center;" }
}, {
field: "category",
title: "Category",
headerAttributes: { style: "text-align: center;" },
attributes: { style: "text-align: center;" }
}, {
field: "enable",
title: "Enable",
headerAttributes: { style: "text-align: center;" },
attributes: { style: "text-align: center;" }
},
{
headerTemplate: "<center>Action</center>", width: 100,
template: function template(d) {
return ["<button class='btn btn-sm btn-warning' onclick='em.editData(\"" + d._id + "\")'><span class='fa fa-pencil' ></span></button>"].join(" ");
},
attributes: { style: "text-align: center;" }
}
]);
em.filter = ko.mapping.fromJS(em.templateFilter);
em.config = ko.mapping.fromJS(em.templateEmail);
em.isNew = ko.observable(false);
em.tempCheckIdDelete = ko.observableArray([]);
em.selectedTableID = ko.observable("");
em.checkDeleteData = function (elem, e) {
if (e === 'delete') {
if ($(elem).prop('checked') === true) em.tempCheckIdDelete.push($(elem).attr('idcheck'));else em.tempCheckIdDelete.remove(function (item) {
return item === $(elem).attr('idcheck');
});
}
if (e === 'deleteall') {
if ($(elem).prop('checked') === true) {
$('.deletecheck').each(function (index) {
$(this).prop("checked", true);
em.tempCheckIdDelete.push($(this).attr('idcheck'));
});
} else {
(function () {
var idtemp = '';
$('.deletecheck').each(function (index) {
$(this).prop("checked", false);
idtemp = $(this).attr('idcheck');
em.tempCheckIdDelete.remove(function (item) {
return item === idtemp;
});
});
})();
}
}
};
em.checkCategory = function() {
em.showHide($('#categoryList').data('kendoDropDownList').value());
}
em.showHide = function(category) {
var resObj = em.CategoryMailList().filter(function(obj) {
return obj.value == category;
});
var condition = resObj[0].condition.split(",");
em.isAlarmCode(false);
em.isInterval(false);
$.each(condition, function(idx, val){
if(val.indexOf("isAlarmCode") >= 0) { | em.isInterval(true);
}
});
var catVal = $('#categoryList').data('kendoDropDownList').value();
if(catVal == "alarm01") {
$('#templateMail').html(em.TemplateMailList().alarmTemplate)
} else {
$('#templateMail').html(em.TemplateMailList().dataTemplate)
}
}
em.resetDDL = function() {
$('#categoryList').data('kendoDropDownList').select(0);
$('#userList').data('kendoMultiSelect').value([]);
$('#alarmcodesList').data('kendoMultiSelect').value([]);
}
em.setDDL = function(data) {
$('#categoryList').data('kendoDropDownList').value(data.category);
$('#userList').data('kendoMultiSelect').value(data.receivers);
$('#alarmcodesList').data('kendoMultiSelect').value(data.alarmcodes);
em.showHide(data.category);
}
em.newData = function () {
em.isNew(true);
ko.mapping.fromJS(em.templateEmail, em.config);
$('#editor').data('kendoEditor').value("");
em.resetDDL();
em.checkCategory();
setTimeout(function(){
$('#modalUpdate').modal('show');
}, 100);
};
em.editData = function (id) {
em.isNew(false);
toolkit.ajaxPost(viewModel.appName + 'email/editemail', { _id: id }, function (res) {
if (!app.isFine(res)) {
return;
}
ko.mapping.fromJS(res.data, em.config);
em.setDDL(res.data);
$('#editor').data('kendoEditor').value(res.data.template);
setTimeout(function(){
$('#modalUpdate').modal('show');
}, 100);
});
};
em.setEditor = function() {
$("#editor").html("");
$("#editor").kendoEditor({
resizable: {
content: true,
toolbar: true,
},
messages: {
// fontName: "Source Sans Pro, Lato , Open Sans , Helvetica Neue, Arial, sans-serif"
fontNameInherit: "Source Sans Pro, Lato , Open Sans , Helvetica Neue, Arial, sans-serif",
fontSize: 12
}
});
}
em.saveChanges = function () {
if (!toolkit.isFormValid(".form-group")) {
return;
}
var param = ko.mapping.toJS(em.config);
param.id = param._id;
param.intervaltime = parseInt(param.intervaltime);
param.category = $('#categoryList').data('kendoDropDownList').value();
param.receivers = $('#userList').data('kendoMultiSelect').value();
param.alarmcodes = $('#alarmcodesList').data('kendoMultiSelect').value();
param.template = $('#editor').data('kendoEditor').value();
param.lastupdate = new Date();
if(em.isNew()) {
param.createddate = new Date();
}
toolkit.ajaxPost(viewModel.appName + 'email/saveemail', param, function (res) {
if (!app.isFine(res)) {
return;
}
var dataEmail = res.data;
var resCreate = em.UserMailList().filter(function(obj) {
return obj.value == dataEmail.createdby;
});
dataEmail.createdby = resCreate[0].text;
var resUpdate = em.UserMailList().filter(function(obj) {
return obj.value == dataEmail.updatedby;
});
dataEmail.updatedby = resUpdate[0].text;
var ajaxToFile = $.ajax({
url: "http://ostrowfm-realtime.eaciitapp.com/email/mailtofile",
data: dataEmail,
contentType: false,
dataType: "json",
type: 'GET',
success: function (data) {
}
});
$('#modalUpdate').modal('hide');
em.refreshData();
swal({ title: res.message, type: "success" });
}, function (err) {
toolkit.showError(err.responseText);
});
};
em.refreshData = function () {
em.contentIsLoading(true);
em.generateGrid();
$('.grid-email').data('kendoGrid').dataSource.read();
em.tempCheckIdDelete([]);
ko.mapping.fromJS(em.templateEmail, em.config);
};
em.deleteemail = function () {
if (em.tempCheckIdDelete().length === 0) {
swal({
title: "",
text: 'You havent choose any email to delete',
type: "warning",
confirmButtonColor: "#DD6B55",
confirmButtonText: "OK",
closeOnConfirm: true
});
} else {
swal({
title: "Are you sure?",
text: 'Data email(s) ' + em.tempCheckIdDelete().toString() + ' will be deleted',
type: "warning",
showCancelButton: true,
confirmButtonColor: "#DD6B55",
confirmButtonText: "Delete",
closeOnConfirm: true
}, function () {
setTimeout(function () {
toolkit.ajaxPost(viewModel.appName + "email/deleteemail", { _id: em.tempCheckIdDelete() }, function (res) {
if (!app.isFine(res)) {
return;
}
em.refreshData();
swal({ title: "Email(s) successfully deleted", type: "success" });
});
}, 1000);
});
}
};
em.generateGrid = function () {
$(".grid-email").html("");
$('.grid-email').kendoGrid({
dataSource: {
transport: {
read: {
url: viewModel.appName + "email/search",
type: "POST",
data: ko.mapping.toJS(em.filter),
dataType | em.isAlarmCode(true);
} else if(val.indexOf("isInterval") >= 0) { | random_line_split |
page-email-management.js | : true,
createddate: '',
lastupdate: '',
createdby: '',
updatedby: ''
};
em.CategoryMailList = ko.observableArray([]);
em.UserMailList = ko.observableArray([]);
em.AlarmCodesMailList = ko.observableArray([]);
em.TemplateMailList = ko.observable();
em.isAlarmCode = ko.observable(false);
em.isInterval = ko.observable(false);
em.templateFilter = {
search: ""
};
em.contentIsLoading = ko.observable(false);
em.TableColumns = ko.observableArray([{ headerTemplate: "<center><input type='checkbox' class='deletecheckall' onclick=\"em.checkDeleteData(this, 'deleteall', 'all')\"/></center>", attributes: { style: "text-align: center;" }, width: 40, template: function template(d) {
return ["<input type='checkbox' class='deletecheck' idcheck='" + d._id + "' onclick=\"em.checkDeleteData(this, 'delete')\" />"].join(" ");
} }, {
field: "_id",
title: "ID",
headerAttributes: { style: "text-align: center;" }
}, {
field: "subject",
title: "Subject",
headerAttributes: { style: "text-align: center;" }
}, {
field: "category",
title: "Category",
headerAttributes: { style: "text-align: center;" },
attributes: { style: "text-align: center;" }
}, {
field: "enable",
title: "Enable",
headerAttributes: { style: "text-align: center;" },
attributes: { style: "text-align: center;" }
},
{
headerTemplate: "<center>Action</center>", width: 100,
template: function template(d) {
return ["<button class='btn btn-sm btn-warning' onclick='em.editData(\"" + d._id + "\")'><span class='fa fa-pencil' ></span></button>"].join(" ");
},
attributes: { style: "text-align: center;" }
}
]);
em.filter = ko.mapping.fromJS(em.templateFilter);
em.config = ko.mapping.fromJS(em.templateEmail);
em.isNew = ko.observable(false);
em.tempCheckIdDelete = ko.observableArray([]);
em.selectedTableID = ko.observable("");
em.checkDeleteData = function (elem, e) {
if (e === 'delete') {
if ($(elem).prop('checked') === true) em.tempCheckIdDelete.push($(elem).attr('idcheck'));else em.tempCheckIdDelete.remove(function (item) {
return item === $(elem).attr('idcheck');
});
}
if (e === 'deleteall') {
if ($(elem).prop('checked') === true) {
$('.deletecheck').each(function (index) {
$(this).prop("checked", true);
em.tempCheckIdDelete.push($(this).attr('idcheck'));
});
} else |
}
};
em.checkCategory = function() {
em.showHide($('#categoryList').data('kendoDropDownList').value());
}
em.showHide = function(category) {
var resObj = em.CategoryMailList().filter(function(obj) {
return obj.value == category;
});
var condition = resObj[0].condition.split(",");
em.isAlarmCode(false);
em.isInterval(false);
$.each(condition, function(idx, val){
if(val.indexOf("isAlarmCode") >= 0) {
em.isAlarmCode(true);
} else if(val.indexOf("isInterval") >= 0) {
em.isInterval(true);
}
});
var catVal = $('#categoryList').data('kendoDropDownList').value();
if(catVal == "alarm01") {
$('#templateMail').html(em.TemplateMailList().alarmTemplate)
} else {
$('#templateMail').html(em.TemplateMailList().dataTemplate)
}
}
em.resetDDL = function() {
$('#categoryList').data('kendoDropDownList').select(0);
$('#userList').data('kendoMultiSelect').value([]);
$('#alarmcodesList').data('kendoMultiSelect').value([]);
}
em.setDDL = function(data) {
$('#categoryList').data('kendoDropDownList').value(data.category);
$('#userList').data('kendoMultiSelect').value(data.receivers);
$('#alarmcodesList').data('kendoMultiSelect').value(data.alarmcodes);
em.showHide(data.category);
}
em.newData = function () {
em.isNew(true);
ko.mapping.fromJS(em.templateEmail, em.config);
$('#editor').data('kendoEditor').value("");
em.resetDDL();
em.checkCategory();
setTimeout(function(){
$('#modalUpdate').modal('show');
}, 100);
};
em.editData = function (id) {
em.isNew(false);
toolkit.ajaxPost(viewModel.appName + 'email/editemail', { _id: id }, function (res) {
if (!app.isFine(res)) {
return;
}
ko.mapping.fromJS(res.data, em.config);
em.setDDL(res.data);
$('#editor').data('kendoEditor').value(res.data.template);
setTimeout(function(){
$('#modalUpdate').modal('show');
}, 100);
});
};
em.setEditor = function() {
$("#editor").html("");
$("#editor").kendoEditor({
resizable: {
content: true,
toolbar: true,
},
messages: {
// fontName: "Source Sans Pro, Lato , Open Sans , Helvetica Neue, Arial, sans-serif"
fontNameInherit: "Source Sans Pro, Lato , Open Sans , Helvetica Neue, Arial, sans-serif",
fontSize: 12
}
});
}
em.saveChanges = function () {
if (!toolkit.isFormValid(".form-group")) {
return;
}
var param = ko.mapping.toJS(em.config);
param.id = param._id;
param.intervaltime = parseInt(param.intervaltime);
param.category = $('#categoryList').data('kendoDropDownList').value();
param.receivers = $('#userList').data('kendoMultiSelect').value();
param.alarmcodes = $('#alarmcodesList').data('kendoMultiSelect').value();
param.template = $('#editor').data('kendoEditor').value();
param.lastupdate = new Date();
if(em.isNew()) {
param.createddate = new Date();
}
toolkit.ajaxPost(viewModel.appName + 'email/saveemail', param, function (res) {
if (!app.isFine(res)) {
return;
}
var dataEmail = res.data;
var resCreate = em.UserMailList().filter(function(obj) {
return obj.value == dataEmail.createdby;
});
dataEmail.createdby = resCreate[0].text;
var resUpdate = em.UserMailList().filter(function(obj) {
return obj.value == dataEmail.updatedby;
});
dataEmail.updatedby = resUpdate[0].text;
var ajaxToFile = $.ajax({
url: "http://ostrowfm-realtime.eaciitapp.com/email/mailtofile",
data: dataEmail,
contentType: false,
dataType: "json",
type: 'GET',
success: function (data) {
}
});
$('#modalUpdate').modal('hide');
em.refreshData();
swal({ title: res.message, type: "success" });
}, function (err) {
toolkit.showError(err.responseText);
});
};
em.refreshData = function () {
em.contentIsLoading(true);
em.generateGrid();
$('.grid-email').data('kendoGrid').dataSource.read();
em.tempCheckIdDelete([]);
ko.mapping.fromJS(em.templateEmail, em.config);
};
em.deleteemail = function () {
if (em.tempCheckIdDelete().length === 0) {
swal({
title: "",
text: 'You havent choose any email to delete',
type: "warning",
confirmButtonColor: "#DD6B55",
confirmButtonText: "OK",
closeOnConfirm: true
});
} else {
swal({
title: "Are you sure?",
text: 'Data email(s) ' + em.tempCheckIdDelete().toString() + ' will be deleted',
type: "warning",
showCancelButton: true,
confirmButtonColor: "#DD6B55",
confirmButtonText: "Delete",
closeOnConfirm: true
}, function () {
setTimeout(function () {
toolkit.ajaxPost(viewModel.appName + "email/deleteemail", { _id: em.tempCheckIdDelete() }, function (res) {
if (!app.isFine(res)) {
return;
}
em.refreshData();
swal({ title: "Email(s) successfully deleted", type: "success" });
});
}, 1000);
});
}
};
em.generateGrid = function () {
$(".grid-email").html("");
$('.grid-email').kendoGrid({
dataSource: {
transport: {
read: {
url: viewModel.appName + "email/search",
type: "POST",
data: ko.mapping.toJS(em.filter),
| {
(function () {
var idtemp = '';
$('.deletecheck').each(function (index) {
$(this).prop("checked", false);
idtemp = $(this).attr('idcheck');
em.tempCheckIdDelete.remove(function (item) {
return item === idtemp;
});
});
})();
} | conditional_block |
binary_search_tree1.py | time
logging.basicConfig(
level=logging.ERROR,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class Node():
def __init__(self, data=None):
self._data = data
self._left, self._right = None, None
def __str__(self):
return 'Node:<data:%s>, <left:%s>, <right:%s>' % (
str(self._data), str(self._left), str(self._right))
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def left(self):
return self._left
@left.setter
def left(self, value):
self._left = value
@property
def right(self):
return self._right
@right.setter
def right(self, value):
self._right = value
def check_null(func):
@functools.wraps(func)
def wrapper(self, *args, **kw):
if self.__bool__(): # check if the BinarySearchTree() object is None
return func(self, *args, **kw)
else:
if func.__name__ in ['_insert', '_insert2']:
self._root = Node(args[0])
else:
print('The tree is empty')
return wrapper
# class Ad():
# def nam(self):
# pass
#
# print(Ad().nam.__name__)
# # nam
class BinarySearchTree():
"""
如果非空,那么左子树的所有节点都小于根节点,右子树的所有节点都大于根节点,数为二叉搜索树。
左右子树都为二叉搜索树。
"""
def __init__(self):
self._root = None
def __str__(self):
""" yield 迭代器 """
tree2list = [x.data for x in self._generate_node()]
return 'the BinarySearchTree is %s' % tree2list
def __bool__(self):
if self._root is not None:
return True
else:
return False
@staticmethod
def _redirect(pre_node, is_left, target): # staticmethod no need pass self 与类对象无关
"""
将target节点赋值成 pre_node的is_left/right子节点
:param is_left: 将target赋成父节点 pre_node 的 left 还是 right 子节点
"""
if is_left:
pre_node.left = target
else:
pre_node.right = target
def _generate_node(self):
queue = [self._root]
while queue:
node = queue.pop(0)
yield node
queue.extend([x for x in (node.left, node.right) if x != None])
# (node.left, node.right) is tuple
@check_null
def _metal_find(self, value, node, alert=True):
"""
内部接口: 实现了基本的查找功能,并且实现了跟踪父节点和判断是否为左右子节点的功能
思 路: 比较简单
:param value:
:param node:
:param alert:
:return: node, _pre_node, is_left
找到的node, 该节点的父节点_pre_node, 该节点是_pre_node的左还是右节点bool(is_left)
"""
# if you want the pre_node and is_left get the specific value, let the node=root
is_left, _pre_node = None, None
while node and value != node.data:
# _pre_node 作用跟踪父节点
_pre_node = node
if value < node.data:
node = node.left
# is_left 作用跟踪是否为左子节点
is_left = True
elif value > node.data:
node = node.right
is_left = False
# while 循环完没找到,则node is None
# while 循环完找到的话,则node is not None 跳过if,return 找到的node
if alert and node is None: # alert and (node is None)
print('There is no node<%s>' % value)
return node, _pre_node, is_left
def find(self, value):
"""暴露给外面的接口,按值查找,返回节点"""
# *_ 除第一个外的其他返回值
result, *_ = self._metal_find(value, self._root)
return result
@check_null
def _insert(self, value, node): # node 实际往往是root
"""
recursive insert method
:param node: 树中存在的某个节点
:return: node: 插入的节点node 这样其实插入的node(value) 是叶节点
"""
# _insert函数最终结果是
# 1 找到value==node.data的节点即已有这个节点,执行print(),再返回这个节点
# 2 node is None,然后将此节点新建出来,执行node = Node(value)
if node is None:
node = Node(value)
else:
if value < node.data:
# _insert()返回待插入的节点 当前节点的左子节点 指向待插入的节点
node.left = self._insert(value, node.left)
elif value > node.data:
# _insert()返回待插入的节点 当前节点的右子节点 指向待插入的节点
node.right = self._insert(value, node.right)
else:
print('have the same value')
return node # 注意将node返回
@check_null
def _insert2(self, value):
"""
Iterative insert method
先 _metal_find() 迭代找到 value, 找到 value说明已存在,没找到 _redirect() 新建节点
"""
result, pre_node, is_left = self._metal_find(value, self._root, False) # 查找
if result is None: # 没找到通过self._redirect() 赋值
self._redirect(pre_node, is_left, Node(value))
else: # 找到说明已经存在
print('already have the value')
# 默认走循环的实现, 递归的程序栈很容易爆掉,并且test_insert()测试了下循环比递归快很多
def insert(self, value, isrecursion=False):
if isrecursion:
self._insert(value, self._root)
else:
self._insert2(value)
@check_null
def _find_extremum(self, node, by='max'):
"""
找 max min 节点
:return node:
"""
if by == 'max':
while node.right:
node = node.right
elif by == 'min':
while node.left:
node = node.left
return node
def findmax(self):
return self._find_extremum(self._root)
def findmin(self):
return self._find_extremum(self._root, by='min')
@check_null
def _delete(self, value, node):
""" recursion delete
step1: 通过value 与 node.data比较来找到要删除的节点
step2: 要删除的节点又有三种situations
situation1: 要删除的节点 是叶节点,没有子节点。
situation2: 要删除的节点 只有一个子节点。
situation3: 要删除的节点 有两个子节点。
:return: 删除完value以后的新的node
"""
if not node:
print('can\'t find')
else: # step1
# If the key to be deleted is smaller than the root's
# key then it lies in left subtree
if value < node.data:
node.left = self._delete(value, node.left)
# If the kye to be delete is greater than the root's key
# then it lies in right subtree
elif value > node.data:
node.right = self._delete(value, node.right)
# If key is same as root's key, then this is the node
# to be deleted
else: # step2
# Node with two children: Get the inorder successor 中序继承者
# 最后node.left = self._delete(tmp.data, node.left)其实转化成了
# 后边 Node with only one child or no child 的情形
### 可以找左子树的最大值或者右子树的最小值作为successor
### 而左子树的最大值或者右子树的最小值必然只有一 | ly one child or no child 的情形
if node.left and node.right:
# find the largest in the left subtree as successor
tmp = self._find_extremum(node.left) # default by max
# Copy the inorder successor's content to this node
node.data = tmp.data
# Delete the inorder successor
node.left = self._delete | 个或零个节点
### 所以转化成了前边 Node with on | identifier_body |
binary_search_tree1.py | time
logging.basicConfig(
level=logging.ERROR,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class Node():
def __init__(self, data=None):
self._data = data
self._left, self._right = None, None
def __str__(self):
return 'Node:<data:%s>, <left:%s>, <right:%s>' % (
str(self._data), str(self._left), str(self._right))
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def left(self):
return self._left
@left.setter
def left(self, value):
self._left = value
@property
def right(self):
return self._right
@right.setter
def right(self, value):
self._right = value
def check_null(func):
@functools.wraps(func)
def wrapper(self, *args, **kw):
if self.__bool__(): # check if the BinarySearchTree() object is None
return func(self, *args, **kw)
else:
if func.__name__ in ['_insert', '_insert2']:
self._root = Node(args[0])
else:
print('The tree is empty')
return wrapper
# class Ad():
# def nam(self):
# pass
#
# print(Ad().nam.__name__)
# # nam
class BinarySearchTree():
"""
如果非空,那么左子树的所有节点都小于根节点,右子树的所有节点都大于根节点,数为二叉搜索树。
左右子树都为二叉搜索树。
"""
def __init__(self):
self._root = None
def __str__(self):
""" yield 迭代器 """
tree2list = [x.data for x in self._generate_node()]
return 'the BinarySearchTree is %s' % tree2list
def __bool__(self):
if self._root is not None:
return True
else:
return False
@staticmethod
def _redirect(pre_node, is_left, target): # staticmethod no need pass self 与类对象无关
"""
将target节点赋值成 pre_node的is_left/right子节点
:param is_left: 将target赋成父节点 pre_node 的 left 还是 right 子节点
"""
if is_left:
pre_node.left = target
else:
pre_node.right = target
def _generate_node(self):
queue = [self._root]
while queue:
node = queue.pop(0)
yield node
queue.extend([x for x in (node.left, node.right) if x != None])
# (node.left, node.right) is tuple
@check_null
def _metal_find(self, value, node, alert=True):
"""
内部接口: 实现了基本的查找功能,并且实现了跟踪父节点和判断是否为左右子节点的功能
思 路: 比较简单
:param value:
:param node:
:param alert:
:return: node, _pre_node, is_left
找到的node, 该节点的父节点_pre_node, 该节点是_pre_node的左还是右节点bool(is_left)
"""
# if you want the pre_node and is_left get the specific value, let the node=root
is_left, _pre_node = None, None
while node and value != node.data:
# _pre_node 作用跟踪父节点
_pre_node = node
if value < node.data:
node = node.left
# is_left 作用跟踪是否为左子节点
is_left = True
elif value > node.data:
node = node.right
is_left = False
# while 循环完没找到,则node is None
# while 循环完找到的话,则node is not None 跳过if,return 找到的node
if alert and node is None: # alert and (node is None)
print('There is no node<%s>' % value)
return node, _pre_node, is_left
def find(self, value):
"""暴露给外面的接口,按值查找,返回节点"""
# *_ 除第一个外的其他返回值
result, *_ = self._metal_find(value, self._root)
return result
@check_null
def _insert(self, value, node): # node 实际往往是root
"""
recursive insert method
:param node: 树中存在的某个节点
:return: node: 插入的节点node 这样其实插入的node(value) 是叶节点
"""
# _insert函数最终结果是
# 1 找到value==node.data的节点即已有这个节点,执行print(),再返回这个节点
# 2 node is None,然后将此节点新建出来,执行node = Node(value)
if node is None:
node = Node(value)
else:
if value < node.data:
# _insert()返回待插入的节点 当前节点的左子节点 指向待插入的节点
node.left = self._insert(value, node.left)
elif value > node.data:
# _insert()返回待插入的节点 当前节点的右子节点 指向待插入的节点
node.right = self._insert(value, node.right)
else:
print('have the same value')
return node # 注意将node返回
@check_null
def _insert2(self, value):
"""
Iterative insert method
先 _metal_find() 迭代找到 value, 找到 value说明已存在,没找到 _redirect() 新建节点
"""
result, pre_node, is_left = self._metal_find(value, self._root, False) # 查找
if result is None: # 没找到通过self._redirect() 赋值
self._redirect(pre_node, is_left, Node(value))
else: # 找到说明已经存在
print('already have the value')
# 默认走循环的实现, 递归的程序栈很容易爆掉,并且test_insert()测试了下循环比递归快很多
def insert(self, value, isrecursion=False):
if isrecursion:
self._insert(value, self._root)
else:
self._insert2(value)
@check_null
def _find_extremum(self, node, by='max'):
"""
找 max min 节点
:return node:
"""
if by == 'max':
while node.right:
node = node.right
elif by == 'min':
while node.left:
node = node.left
return node
def findmax(self):
return self._find_extremum(self._root)
def findmin(self):
return self._find_extremum(self._root, by='min')
@check_null
def _delete(self, value, node):
""" recursion delete
step1: 通过value 与 node.data比较来找到要删除的节点
step2: 要删除的节点又有三种situations
situation1: 要删除的节点 是叶节点,没有子节点。
situation2: 要删除的节点 只有一个子节点。
situation3: 要删除的节点 有两个子节点。
:return: 删除完value以后的新的node
"""
if not node:
print('can\'t find')
else: # step1
# If the key to be deleted is smaller than the root's
# key then it lies in left subtree
if value < node.data:
node.left = self._delete(value, node.left)
# If the kye to be delete is grea | an the root's key
# then it lies in right subtree
elif value > node.data:
node.right = self._delete(value, node.right)
# If key is same as root's key, then this is the node
# to be deleted
else: # step2
# Node with two children: Get the inorder successor 中序继承者
# 最后node.left = self._delete(tmp.data, node.left)其实转化成了
# 后边 Node with only one child or no child 的情形
### 可以找左子树的最大值或者右子树的最小值作为successor
### 而左子树的最大值或者右子树的最小值必然只有一个或零个节点
### 所以转化成了前边 Node with only one child or no child 的情形
if node.left and node.right:
# find the largest in the left subtree as successor
tmp = self._find_extremum(node.left) # default by max
# Copy the inorder successor's content to this node
node.data = tmp.data
# Delete the inorder successor
node.left = self._delete | ter th | identifier_name |
binary_search_tree1.py | time
logging.basicConfig(
level=logging.ERROR,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class Node():
def __init__(self, data=None):
self._data = data
self._left, self._right = None, None
def __str__(self):
return 'Node:<data:%s>, <left:%s>, <right:%s>' % (
str(self._data), str(self._left), str(self._right))
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def left(self):
return self._left
@left.setter
def left(self, value):
self._left = value
@property
def right(self):
return self._right
@right.setter
def right(self, value):
self._right = value
def check_null(func):
@functools.wraps(func)
def wrapper(self, *args, **kw):
if self.__bool__(): # check if the BinarySearchTree() object is None
return func(self, *args, **kw)
else:
if func.__name__ in ['_insert', '_insert2']:
self._root = Node(args[0])
else:
print('The tree is empty')
return wrapper
# class Ad():
# def nam(self):
# pass
#
# print(Ad().nam.__name__)
# # nam
class BinarySearchTree():
"""
如果非空,那么左子树的所有节点都小于根节点,右子树的所有节点都大于根节点,数为二叉搜索树。
左右子树都为二叉搜索树。
"""
def __init__(self):
self._root = None
def __str__(self):
""" yield 迭代器 """
tree2list = [x.data for x in self._generate_node()]
return 'the BinarySearchTree is %s' % tree2list
def __bool__(self):
if self._root is not None:
return True
else:
return False
@staticmethod
def _redirect(pre_node, is_left, target): # staticmethod no need pass self 与类对象无关
"""
将target节点赋值成 pre_node的is_left/right子节点
:param is_left: 将target赋成父节点 pre_node 的 left 还是 right 子节点
"""
if is_left:
pre_node.left = target
else:
pre_node.right = target
def _generate_node(self):
queue = [self._root]
while queue:
node = queue.pop(0)
yield node
queue.extend([x for x in (node.left, node.right) if x != None])
# (node.left, node.right) is tuple
@check_null
def _metal_find(self, value, node, alert=True):
"""
内部接口: 实现了基本的查找功能,并且实现了跟踪父节点和判断是否为左右子节点的功能
思 路: 比较简单
:param value:
:param node:
:param alert:
:return: node, _pre_node, is_left
找到的node, 该节点的父节点_pre_node, 该节点是_pre_node的左还是右节点bool(is_left)
"""
# if you want the pre_node and is_left get the specific value, let the node=root
is_left, _pre_node = None, None
while node | # is_left 作用跟踪是否为左子节点
is_left = True
elif value > node.data:
node = node.right
is_left = False
# while 循环完没找到,则node is None
# while 循环完找到的话,则node is not None 跳过if,return 找到的node
if alert and node is None: # alert and (node is None)
print('There is no node<%s>' % value)
return node, _pre_node, is_left
def find(self, value):
"""暴露给外面的接口,按值查找,返回节点"""
# *_ 除第一个外的其他返回值
result, *_ = self._metal_find(value, self._root)
return result
@check_null
def _insert(self, value, node): # node 实际往往是root
"""
recursive insert method
:param node: 树中存在的某个节点
:return: node: 插入的节点node 这样其实插入的node(value) 是叶节点
"""
# _insert函数最终结果是
# 1 找到value==node.data的节点即已有这个节点,执行print(),再返回这个节点
# 2 node is None,然后将此节点新建出来,执行node = Node(value)
if node is None:
node = Node(value)
else:
if value < node.data:
# _insert()返回待插入的节点 当前节点的左子节点 指向待插入的节点
node.left = self._insert(value, node.left)
elif value > node.data:
# _insert()返回待插入的节点 当前节点的右子节点 指向待插入的节点
node.right = self._insert(value, node.right)
else:
print('have the same value')
return node # 注意将node返回
@check_null
def _insert2(self, value):
"""
Iterative insert method
先 _metal_find() 迭代找到 value, 找到 value说明已存在,没找到 _redirect() 新建节点
"""
result, pre_node, is_left = self._metal_find(value, self._root, False) # 查找
if result is None: # 没找到通过self._redirect() 赋值
self._redirect(pre_node, is_left, Node(value))
else: # 找到说明已经存在
print('already have the value')
# 默认走循环的实现, 递归的程序栈很容易爆掉,并且test_insert()测试了下循环比递归快很多
def insert(self, value, isrecursion=False):
if isrecursion:
self._insert(value, self._root)
else:
self._insert2(value)
@check_null
def _find_extremum(self, node, by='max'):
"""
找 max min 节点
:return node:
"""
if by == 'max':
while node.right:
node = node.right
elif by == 'min':
while node.left:
node = node.left
return node
def findmax(self):
return self._find_extremum(self._root)
def findmin(self):
return self._find_extremum(self._root, by='min')
@check_null
def _delete(self, value, node):
""" recursion delete
step1: 通过value 与 node.data比较来找到要删除的节点
step2: 要删除的节点又有三种situations
situation1: 要删除的节点 是叶节点,没有子节点。
situation2: 要删除的节点 只有一个子节点。
situation3: 要删除的节点 有两个子节点。
:return: 删除完value以后的新的node
"""
if not node:
print('can\'t find')
else: # step1
# If the key to be deleted is smaller than the root's
# key then it lies in left subtree
if value < node.data:
node.left = self._delete(value, node.left)
# If the kye to be delete is greater than the root's key
# then it lies in right subtree
elif value > node.data:
node.right = self._delete(value, node.right)
# If key is same as root's key, then this is the node
# to be deleted
else: # step2
# Node with two children: Get the inorder successor 中序继承者
# 最后node.left = self._delete(tmp.data, node.left)其实转化成了
# 后边 Node with only one child or no child 的情形
### 可以找左子树的最大值或者右子树的最小值作为successor
### 而左子树的最大值或者右子树的最小值必然只有一个或零个节点
### 所以转化成了前边 Node with only one child or no child 的情形
if node.left and node.right:
# find the largest in the left subtree as successor
tmp = self._find_extremum(node.left) # default by max
# Copy the inorder successor's content to this node
node.data = tmp.data
# Delete the inorder successor
node.left = self._delete(tmp.data | and value != node.data:
# _pre_node 作用跟踪父节点
_pre_node = node
if value < node.data:
node = node.left
| conditional_block |
binary_search_tree1.py | 那么左子树的所有节点都小于根节点,右子树的所有节点都大于根节点,数为二叉搜索树。
左右子树都为二叉搜索树。
"""
def __init__(self):
self._root = None
def __str__(self):
""" yield 迭代器 """
tree2list = [x.data for x in self._generate_node()]
return 'the BinarySearchTree is %s' % tree2list
def __bool__(self):
if self._root is not None:
return True
else:
return False
@staticmethod
def _redirect(pre_node, is_left, target): # staticmethod no need pass self 与类对象无关
"""
将target节点赋值成 pre_node的is_left/right子节点
:param is_left: 将target赋成父节点 pre_node 的 left 还是 right 子节点
"""
if is_left:
pre_node.left = target
else:
pre_node.right = target
def _generate_node(self):
queue = [self._root]
while queue:
node = queue.pop(0)
yield node
queue.extend([x for x in (node.left, node.right) if x != None])
# (node.left, node.right) is tuple
@check_null
def _metal_find(self, value, node, alert=True):
"""
内部接口: 实现了基本的查找功能,并且实现了跟踪父节点和判断是否为左右子节点的功能
思 路: 比较简单
:param value:
:param node:
:param alert:
:return: node, _pre_node, is_left
找到的node, 该节点的父节点_pre_node, 该节点是_pre_node的左还是右节点bool(is_left)
"""
# if you want the pre_node and is_left get the specific value, let the node=root
is_left, _pre_node = None, None
while node and value != node.data:
# _pre_node 作用跟踪父节点
_pre_node = node
if value < node.data:
node = node.left
# is_left 作用跟踪是否为左子节点
is_left = True
elif value > node.data:
node = node.right
is_left = False
# while 循环完没找到,则node is None
# while 循环完找到的话,则node is not None 跳过if,return 找到的node
if alert and node is None: # alert and (node is None)
print('There is no node<%s>' % value)
return node, _pre_node, is_left
def find(self, value):
"""暴露给外面的接口,按值查找,返回节点"""
# *_ 除第一个外的其他返回值
result, *_ = self._metal_find(value, self._root)
return result
@check_null
def _insert(self, value, node): # node 实际往往是root
"""
recursive insert method
:param node: 树中存在的某个节点
:return: node: 插入的节点node 这样其实插入的node(value) 是叶节点
"""
# _insert函数最终结果是
# 1 找到value==node.data的节点即已有这个节点,执行print(),再返回这个节点
# 2 node is None,然后将此节点新建出来,执行node = Node(value)
if node is None:
node = Node(value)
else:
if value < node.data:
# _insert()返回待插入的节点 当前节点的左子节点 指向待插入的节点
node.left = self._insert(value, node.left)
elif value > node.data:
# _insert()返回待插入的节点 当前节点的右子节点 指向待插入的节点
node.right = self._insert(value, node.right)
else:
print('have the same value')
return node # 注意将node返回
@check_null
def _insert2(self, value):
"""
Iterative insert method
先 _metal_find() 迭代找到 value, 找到 value说明已存在,没找到 _redirect() 新建节点
"""
result, pre_node, is_left = self._metal_find(value, self._root, False) # 查找
if result is None: # 没找到通过self._redirect() 赋值
self._redirect(pre_node, is_left, Node(value))
else: # 找到说明已经存在
print('already have the value')
# 默认走循环的实现, 递归的程序栈很容易爆掉,并且test_insert()测试了下循环比递归快很多
def insert(self, value, isrecursion=False):
if isrecursion:
self._insert(value, self._root)
else:
self._insert2(value)
@check_null
def _find_extremum(self, node, by='max'):
"""
找 max min 节点
:return node:
"""
if by == 'max':
while node.right:
node = node.right
elif by == 'min':
while node.left:
node = node.left
return node
def findmax(self):
return self._find_extremum(self._root)
def findmin(self):
return self._find_extremum(self._root, by='min')
@check_null
def _delete(self, value, node):
""" recursion delete
step1: 通过value 与 node.data比较来找到要删除的节点
step2: 要删除的节点又有三种situations
situation1: 要删除的节点 是叶节点,没有子节点。
situation2: 要删除的节点 只有一个子节点。
situation3: 要删除的节点 有两个子节点。
:return: 删除完value以后的新的node
"""
if not node:
print('can\'t find')
else: # step1
# If the key to be deleted is smaller than the root's
# key then it lies in left subtree
if value < node.data:
node.left = self._delete(value, node.left)
# If the kye to be delete is greater than the root's key
# then it lies in right subtree
elif value > node.data:
node.right = self._delete(value, node.right)
# If key is same as root's key, then this is the node
# to be deleted
else: # step2
# Node with two children: Get the inorder successor 中序继承者
# 最后node.left = self._delete(tmp.data, node.left)其实转化成了
# 后边 Node with only one child or no child 的情形
### 可以找左子树的最大值或者右子树的最小值作为successor
### 而左子树的最大值或者右子树的最小值必然只有一个或零个节点
### 所以转化成了前边 Node with only one child or no child 的情形
if node.left and node.right:
# find the largest in the left subtree as successor
tmp = self._find_extremum(node.left) # default by max
# Copy the inorder successor's content to this node
node.data = tmp.data
# Delete the inorder successor
node.left = self._delete(tmp.data, node.left)
# Node with only one child or no child
else:
if node.left is None:
node = node.right
else:
node = node.left
return node # 最后层层返回
@check_null
def _delete2(self, value, node):
"""非递归删除
首先: 找到要删除的节点result
再次: 找到并删除result的successor,再将successor的data赋给要删除的节点result
讨论复杂的2个节点的情况:
1 找到value所在的节点result,该节点有两个子节点
2 找到result的左子节点的max记为tmp,tmp只有0或1个节点
3 从result中删除tmp,tmp只有0或1个节点,
4 ...
"""
# 首先: 找到要删除的节点result
result, pre_node, is_left = self._metal_find(value, node)
if result is None:
return
# 有2个节点的情况
if result.left and result.right:
tmp = self._find_extremum(result.left) # 再次: 找到result的successor
self._delete2(tmp.data, result) # 再次: 删除result的successor 这步会走后边else里 "# 有1个或者没有" 的情形
result.data = tmp.data # 再将successor的data赋给要删除的节点result | # 有1个或者没有
else: | random_line_split |
|
new_solver_4.py | ridge = ridge_vertices[i]
partname = 'Part-' + str(i+1)
myModel.Part(dimensionality=THREE_D, name=partname, type=
DEFORMABLE_BODY)
try:
myModel.parts[partname].DatumPointByCoordinate(coords=(vertices[ridge[0]][0],vertices[ridge[0]][1], vertices[ridge[0]][2]))
myModel.parts[partname].DatumPointByCoordinate(coords=(vertices[ridge[1]][0],vertices[ridge[1]][1], vertices[ridge[1]][2]))
except IndexError:
myModel.parts[partname].DatumPointByCoordinate(coords=(vertices[i][0],vertices[i][1], vertices[i][2]))
myModel.parts[partname].WirePolyLine(mergeType=IMPRINT, meshable=
ON, points=((myModel.parts[partname].datums[1],
myModel.parts[partname].datums[2]), ))
#### MATERIAL AND SECTION DEFINITION ####
# Truss section
def define_material(network):
myModel.Material(name='Material-2')
myModel.materials['Material-2'].Elastic(table=((network.beam_young, network.beam_poisson), ))
myModel.CircularProfile(name='Profile-1', r=network.beam_profile)
myModel.BeamSection(consistentMassMatrix=False, integration=
DURING_ANALYSIS, material='Material-2', name='Section-2', poissonRatio=0.0,
profile='Profile-1', temperatureVar=LINEAR)
for i in range(len(ridge_vertices)):
partname = 'Part-' + str(i+1)
myModel.parts[partname].SectionAssignment(offset=0.0,
offsetField='', offsetType=MIDDLE_SURFACE, region=Region(
edges=myModel.parts[partname].edges.getSequenceFromMask(mask=('[#1 ]', ), )),
sectionName='Section-2', thicknessAssignment=
FROM_SECTION)
for i in range(len(ridge_vertices)):
partname = 'Part-' + str(i+1)
myModel.parts[partname].assignBeamSectionOrientation(method=
N1_COSINES, n1=(0.0, 0.0, -1.0), region=Region(
edges=myModel.parts[partname].edges.getSequenceFromMask(mask=('[#1 ]', ), )))
mdb.models['Model-1'].rootAssembly.DatumCsysByDefault(CARTESIAN)
#### ASSEMBLY ####
# Creation of instances
def assembly(network):
list_node_label=[]
for i in range(len(ridge_vertices)):
partname = 'Part-' + str(i+1)
instancename = 'Part-' + str(i+1) + '-1'
myModel.rootAssembly.Instance(dependent=OFF, name=instancename,
part=myModel.parts[partname])
for k in range(len(vertices)):
ridge=network.list_nodes_ridges[k]
instancename = 'Part-' + str(ridge[0]+1) + '-1'
try:
coords = (vertices[k][0],vertices[k][1],vertices[k][2])
except IndexError:
coords = (vertices[k][0],vertices[k][1],0.0)
list_node_label.append(myModel.rootAssembly.instances[instancename].vertices.findAt(coords).index)
filename = 'node_label_%09d.csv' % test_number
with open(filename,'w') as writeFile:
writer = csv.writer(writeFile,delimiter=',')
writer.writerow(list_node_label)
return list_node_label
# Step Creation
def set_steps(network):
myModel.StaticStep(name='Step-1', previous='Initial',maxNumInc=1000, minInc=1e-10, nlgeom=ON)
#myModel.FieldOutputRequest(name='F-Output-3',createStepName='Step-1', variables=('COORD', 'S','E','SE'),numIntervals=
# iterations)
myModel.fieldOutputRequests['F-Output-1'].setValues(variables=('S', 'E', 'U', 'RF', 'CF', 'COORD'))
myModel.fieldOutputRequests['F-Output-1'].setValues(
numIntervals=50)
myModel.steps['Step-1'].setValues(stabilizationMethod=DISSIPATED_ENERGY_FRACTION,
continueDampingFactors=True, adaptiveDampingRatio=0.1)
"""
def set_boundary_conditions(network): ## can be changed with the node label list
list_vertices_right = []
for k in network.boundary_nodes_right:
ridge=list_nodes_ridges[k]
#print(ridge)
instancename = 'Part-' + str(ridge[0]+1) + '-1'
try:
coords = (vertices[k][0],vertices[k][1],vertices[k][2])
except IndexError:
coords = (vertices[k][0],vertices[k][1],0.0)
list_vertices_right.append(myModel.rootAssembly.instances[instancename].vertices.findAt(coords))
list_vertices_left = []
for k in network.boundary_nodes_left:
ridge=list_nodes_ridges[k]
instancename = 'Part-' + str(ridge[0]+1) + '-1'
try:
coords = (vertices[k][0],vertices[k][1],vertices[k][2])
except IndexError:
coords = (vertices[k][0],vertices[k][1],0.0)
list_vertices_left.append(myModel.rootAssembly.instances[instancename].vertices.findAt(coords))
myModel.PinnedBC(createStepName='Initial', localCsys=None, name=
'BC-1', region=Region(vertices=VertexArray(list_vertices_left)))
myModel.DisplacementBC(amplitude=UNSET, createStepName='Step-1',
distributionType=UNIFORM, fieldName='', fixed=OFF, localCsys=None, name=
'BC-2', region=Region(vertices=VertexArray(list_vertices_right)), u1=traction_distance, u2=0.0,u3=0.0,ur3=UNSET)
"""
def set_boundary_conditions(network): ## can be changed with the node label list
#list_vertices_right = []
for k in network.boundary_nodes_right:
ridge=list_nodes_ridges[k]
#print(ridge)
instancename = 'Part-' + str(ridge[0]+1) + '-1'
try:
coords = (vertices[k][0],vertices[k][1],vertices[k][2])
except IndexError:
coords = (vertices[k][0],vertices[k][1],0.0)
vertice = myModel.rootAssembly.instances[instancename].vertices.findAt(coords)
if vertice.index==0:
myModel.DisplacementBC(amplitude=UNSET, createStepName='Step-1', distributionType=UNIFORM, fieldName='', fixed=OFF, localCsys=None, name='BC-'+str(ridge[0]+1), region=Region(vertices=mdb.models['Model-1'].rootAssembly.instances[instancename].vertices[:1]), u1=traction_distance, u2=0.0,u3=0.0,ur3=UNSET)
else:
|
#list_vertices_left = []
for k in network.boundary_nodes_left:
ridge=list_nodes_ridges[k]
#print(ridge)
instancename = 'Part-' + str(ridge[0]+1) + '-1'
try:
coords = (vertices[k][0],vertices[k][1],vertices[k][2])
except IndexError:
coords = (vertices[k][0],vertices[k][1],0.0)
vertice = myModel.rootAssembly.instances[instancename].vertices.findAt(coords)
if vertice.index==0:
myModel.PinnedBC(createStepName='Initial', localCsys=None, name='BC-'+str(ridge[0]+1), region=Region(vertices=mdb.models['Model-1'].rootAssembly.instances[instancename].vertices[:1]))
else:
myModel.PinnedBC(createStepName='Initial', localCsys=None, name='BC-'+str(ridge[0]+1), region=Region(vertices=mdb.models['Model-1'].rootAssembly.instances[instancename].vertices[1:]))
## to be deleted and adapted with network
def define_mesh(mask):
number_elements = []
for i in range(len(ridge_vertices)):
instancename = 'Part-' + str(i+1) + '-1'
#myModel.rootAssembly.setElementType(elemTypes=(ElemType(
# elemCode=B21, elemLibrary=STANDARD), ), regions=(
# myModel.rootAssembly.instances[instancename].edges.getSequenceFromMask(
# mask=('[#1 ]', ), ), ))
if int(network.dimension)==2:
myModel.rootAssembly.setElementType(elemTypes=( | myModel.DisplacementBC(amplitude=UNSET, createStepName='Step-1', distributionType=UNIFORM, fieldName='', fixed=OFF, localCsys=None, name='BC-'+str(ridge[0]+1), region=Region(vertices=mdb.models['Model-1'].rootAssembly.instances[instancename].vertices[1:]), u1=traction_distance, u2=0.0,u3=0.0,ur3=UNSET)
"""
list_vertices_right.append(myModel.rootAssembly.instances[instancename].vertices.findAt(coords))""" | conditional_block |
new_solver_4.py | ridge = ridge_vertices[i]
partname = 'Part-' + str(i+1)
myModel.Part(dimensionality=THREE_D, name=partname, type=
DEFORMABLE_BODY)
try:
myModel.parts[partname].DatumPointByCoordinate(coords=(vertices[ridge[0]][0],vertices[ridge[0]][1], vertices[ridge[0]][2]))
myModel.parts[partname].DatumPointByCoordinate(coords=(vertices[ridge[1]][0],vertices[ridge[1]][1], vertices[ridge[1]][2]))
except IndexError:
myModel.parts[partname].DatumPointByCoordinate(coords=(vertices[i][0],vertices[i][1], vertices[i][2]))
myModel.parts[partname].WirePolyLine(mergeType=IMPRINT, meshable=
ON, points=((myModel.parts[partname].datums[1],
myModel.parts[partname].datums[2]), ))
#### MATERIAL AND SECTION DEFINITION ####
# Truss section
def define_material(network):
myModel.Material(name='Material-2')
myModel.materials['Material-2'].Elastic(table=((network.beam_young, network.beam_poisson), ))
myModel.CircularProfile(name='Profile-1', r=network.beam_profile)
myModel.BeamSection(consistentMassMatrix=False, integration=
DURING_ANALYSIS, material='Material-2', name='Section-2', poissonRatio=0.0,
profile='Profile-1', temperatureVar=LINEAR)
for i in range(len(ridge_vertices)):
partname = 'Part-' + str(i+1)
myModel.parts[partname].SectionAssignment(offset=0.0,
offsetField='', offsetType=MIDDLE_SURFACE, region=Region(
edges=myModel.parts[partname].edges.getSequenceFromMask(mask=('[#1 ]', ), )),
sectionName='Section-2', thicknessAssignment=
FROM_SECTION)
for i in range(len(ridge_vertices)):
partname = 'Part-' + str(i+1)
myModel.parts[partname].assignBeamSectionOrientation(method=
N1_COSINES, n1=(0.0, 0.0, -1.0), region=Region(
edges=myModel.parts[partname].edges.getSequenceFromMask(mask=('[#1 ]', ), )))
mdb.models['Model-1'].rootAssembly.DatumCsysByDefault(CARTESIAN)
#### ASSEMBLY ####
# Creation of instances
def assembly(network):
list_node_label=[]
for i in range(len(ridge_vertices)):
partname = 'Part-' + str(i+1)
instancename = 'Part-' + str(i+1) + '-1'
myModel.rootAssembly.Instance(dependent=OFF, name=instancename,
part=myModel.parts[partname])
for k in range(len(vertices)):
ridge=network.list_nodes_ridges[k]
instancename = 'Part-' + str(ridge[0]+1) + '-1'
try:
coords = (vertices[k][0],vertices[k][1],vertices[k][2])
except IndexError:
coords = (vertices[k][0],vertices[k][1],0.0)
list_node_label.append(myModel.rootAssembly.instances[instancename].vertices.findAt(coords).index)
filename = 'node_label_%09d.csv' % test_number
with open(filename,'w') as writeFile:
writer = csv.writer(writeFile,delimiter=',')
writer.writerow(list_node_label)
return list_node_label
# Step Creation
def set_steps(network):
myModel.StaticStep(name='Step-1', previous='Initial',maxNumInc=1000, minInc=1e-10, nlgeom=ON)
#myModel.FieldOutputRequest(name='F-Output-3',createStepName='Step-1', variables=('COORD', 'S','E','SE'),numIntervals=
# iterations)
myModel.fieldOutputRequests['F-Output-1'].setValues(variables=('S', 'E', 'U', 'RF', 'CF', 'COORD'))
myModel.fieldOutputRequests['F-Output-1'].setValues(
numIntervals=50)
myModel.steps['Step-1'].setValues(stabilizationMethod=DISSIPATED_ENERGY_FRACTION,
continueDampingFactors=True, adaptiveDampingRatio=0.1)
"""
def set_boundary_conditions(network): ## can be changed with the node label list
list_vertices_right = []
for k in network.boundary_nodes_right:
ridge=list_nodes_ridges[k]
#print(ridge)
instancename = 'Part-' + str(ridge[0]+1) + '-1'
try:
coords = (vertices[k][0],vertices[k][1],vertices[k][2])
except IndexError:
coords = (vertices[k][0],vertices[k][1],0.0)
list_vertices_right.append(myModel.rootAssembly.instances[instancename].vertices.findAt(coords))
list_vertices_left = []
for k in network.boundary_nodes_left:
ridge=list_nodes_ridges[k]
instancename = 'Part-' + str(ridge[0]+1) + '-1'
try:
coords = (vertices[k][0],vertices[k][1],vertices[k][2])
except IndexError:
coords = (vertices[k][0],vertices[k][1],0.0)
list_vertices_left.append(myModel.rootAssembly.instances[instancename].vertices.findAt(coords))
myModel.PinnedBC(createStepName='Initial', localCsys=None, name=
'BC-1', region=Region(vertices=VertexArray(list_vertices_left)))
myModel.DisplacementBC(amplitude=UNSET, createStepName='Step-1',
distributionType=UNIFORM, fieldName='', fixed=OFF, localCsys=None, name=
'BC-2', region=Region(vertices=VertexArray(list_vertices_right)), u1=traction_distance, u2=0.0,u3=0.0,ur3=UNSET)
"""
def | (network): ## can be changed with the node label list
#list_vertices_right = []
for k in network.boundary_nodes_right:
ridge=list_nodes_ridges[k]
#print(ridge)
instancename = 'Part-' + str(ridge[0]+1) + '-1'
try:
coords = (vertices[k][0],vertices[k][1],vertices[k][2])
except IndexError:
coords = (vertices[k][0],vertices[k][1],0.0)
vertice = myModel.rootAssembly.instances[instancename].vertices.findAt(coords)
if vertice.index==0:
myModel.DisplacementBC(amplitude=UNSET, createStepName='Step-1', distributionType=UNIFORM, fieldName='', fixed=OFF, localCsys=None, name='BC-'+str(ridge[0]+1), region=Region(vertices=mdb.models['Model-1'].rootAssembly.instances[instancename].vertices[:1]), u1=traction_distance, u2=0.0,u3=0.0,ur3=UNSET)
else:
myModel.DisplacementBC(amplitude=UNSET, createStepName='Step-1', distributionType=UNIFORM, fieldName='', fixed=OFF, localCsys=None, name='BC-'+str(ridge[0]+1), region=Region(vertices=mdb.models['Model-1'].rootAssembly.instances[instancename].vertices[1:]), u1=traction_distance, u2=0.0,u3=0.0,ur3=UNSET)
"""
list_vertices_right.append(myModel.rootAssembly.instances[instancename].vertices.findAt(coords))"""
#list_vertices_left = []
for k in network.boundary_nodes_left:
ridge=list_nodes_ridges[k]
#print(ridge)
instancename = 'Part-' + str(ridge[0]+1) + '-1'
try:
coords = (vertices[k][0],vertices[k][1],vertices[k][2])
except IndexError:
coords = (vertices[k][0],vertices[k][1],0.0)
vertice = myModel.rootAssembly.instances[instancename].vertices.findAt(coords)
if vertice.index==0:
myModel.PinnedBC(createStepName='Initial', localCsys=None, name='BC-'+str(ridge[0]+1), region=Region(vertices=mdb.models['Model-1'].rootAssembly.instances[instancename].vertices[:1]))
else:
myModel.PinnedBC(createStepName='Initial', localCsys=None, name='BC-'+str(ridge[0]+1), region=Region(vertices=mdb.models['Model-1'].rootAssembly.instances[instancename].vertices[1:]))
## to be deleted and adapted with network
def define_mesh(mask):
number_elements = []
for i in range(len(ridge_vertices)):
instancename = 'Part-' + str(i+1) + '-1'
#myModel.rootAssembly.setElementType(elemTypes=(ElemType(
# elemCode=B21, elemLibrary=STANDARD), ), regions=(
# myModel.rootAssembly.instances[instancename].edges.getSequenceFromMask(
# mask=('[#1 ]', ), ), ))
if int(network.dimension)==2:
myModel.rootAssembly.setElementType(elemTypes=( | set_boundary_conditions | identifier_name |
new_solver_4.py | range(len(ridge_vertices)):
partname = 'Part-' + str(i+1)
myModel.parts[partname].assignBeamSectionOrientation(method=
N1_COSINES, n1=(0.0, 0.0, -1.0), region=Region(
edges=myModel.parts[partname].edges.getSequenceFromMask(mask=('[#1 ]', ), )))
mdb.models['Model-1'].rootAssembly.DatumCsysByDefault(CARTESIAN)
#### ASSEMBLY ####
# Creation of instances
def assembly(network):
list_node_label=[]
for i in range(len(ridge_vertices)):
partname = 'Part-' + str(i+1)
instancename = 'Part-' + str(i+1) + '-1'
myModel.rootAssembly.Instance(dependent=OFF, name=instancename,
part=myModel.parts[partname])
for k in range(len(vertices)):
ridge=network.list_nodes_ridges[k]
instancename = 'Part-' + str(ridge[0]+1) + '-1'
try:
coords = (vertices[k][0],vertices[k][1],vertices[k][2])
except IndexError:
coords = (vertices[k][0],vertices[k][1],0.0)
list_node_label.append(myModel.rootAssembly.instances[instancename].vertices.findAt(coords).index)
filename = 'node_label_%09d.csv' % test_number
with open(filename,'w') as writeFile:
writer = csv.writer(writeFile,delimiter=',')
writer.writerow(list_node_label)
return list_node_label
# Step Creation
def set_steps(network):
myModel.StaticStep(name='Step-1', previous='Initial',maxNumInc=1000, minInc=1e-10, nlgeom=ON)
#myModel.FieldOutputRequest(name='F-Output-3',createStepName='Step-1', variables=('COORD', 'S','E','SE'),numIntervals=
# iterations)
myModel.fieldOutputRequests['F-Output-1'].setValues(variables=('S', 'E', 'U', 'RF', 'CF', 'COORD'))
myModel.fieldOutputRequests['F-Output-1'].setValues(
numIntervals=50)
myModel.steps['Step-1'].setValues(stabilizationMethod=DISSIPATED_ENERGY_FRACTION,
continueDampingFactors=True, adaptiveDampingRatio=0.1)
"""
def set_boundary_conditions(network): ## can be changed with the node label list
list_vertices_right = []
for k in network.boundary_nodes_right:
ridge=list_nodes_ridges[k]
#print(ridge)
instancename = 'Part-' + str(ridge[0]+1) + '-1'
try:
coords = (vertices[k][0],vertices[k][1],vertices[k][2])
except IndexError:
coords = (vertices[k][0],vertices[k][1],0.0)
list_vertices_right.append(myModel.rootAssembly.instances[instancename].vertices.findAt(coords))
list_vertices_left = []
for k in network.boundary_nodes_left:
ridge=list_nodes_ridges[k]
instancename = 'Part-' + str(ridge[0]+1) + '-1'
try:
coords = (vertices[k][0],vertices[k][1],vertices[k][2])
except IndexError:
coords = (vertices[k][0],vertices[k][1],0.0)
list_vertices_left.append(myModel.rootAssembly.instances[instancename].vertices.findAt(coords))
myModel.PinnedBC(createStepName='Initial', localCsys=None, name=
'BC-1', region=Region(vertices=VertexArray(list_vertices_left)))
myModel.DisplacementBC(amplitude=UNSET, createStepName='Step-1',
distributionType=UNIFORM, fieldName='', fixed=OFF, localCsys=None, name=
'BC-2', region=Region(vertices=VertexArray(list_vertices_right)), u1=traction_distance, u2=0.0,u3=0.0,ur3=UNSET)
"""
def set_boundary_conditions(network): ## can be changed with the node label list
#list_vertices_right = []
for k in network.boundary_nodes_right:
ridge=list_nodes_ridges[k]
#print(ridge)
instancename = 'Part-' + str(ridge[0]+1) + '-1'
try:
coords = (vertices[k][0],vertices[k][1],vertices[k][2])
except IndexError:
coords = (vertices[k][0],vertices[k][1],0.0)
vertice = myModel.rootAssembly.instances[instancename].vertices.findAt(coords)
if vertice.index==0:
myModel.DisplacementBC(amplitude=UNSET, createStepName='Step-1', distributionType=UNIFORM, fieldName='', fixed=OFF, localCsys=None, name='BC-'+str(ridge[0]+1), region=Region(vertices=mdb.models['Model-1'].rootAssembly.instances[instancename].vertices[:1]), u1=traction_distance, u2=0.0,u3=0.0,ur3=UNSET)
else:
myModel.DisplacementBC(amplitude=UNSET, createStepName='Step-1', distributionType=UNIFORM, fieldName='', fixed=OFF, localCsys=None, name='BC-'+str(ridge[0]+1), region=Region(vertices=mdb.models['Model-1'].rootAssembly.instances[instancename].vertices[1:]), u1=traction_distance, u2=0.0,u3=0.0,ur3=UNSET)
"""
list_vertices_right.append(myModel.rootAssembly.instances[instancename].vertices.findAt(coords))"""
#list_vertices_left = []
for k in network.boundary_nodes_left:
ridge=list_nodes_ridges[k]
#print(ridge)
instancename = 'Part-' + str(ridge[0]+1) + '-1'
try:
coords = (vertices[k][0],vertices[k][1],vertices[k][2])
except IndexError:
coords = (vertices[k][0],vertices[k][1],0.0)
vertice = myModel.rootAssembly.instances[instancename].vertices.findAt(coords)
if vertice.index==0:
myModel.PinnedBC(createStepName='Initial', localCsys=None, name='BC-'+str(ridge[0]+1), region=Region(vertices=mdb.models['Model-1'].rootAssembly.instances[instancename].vertices[:1]))
else:
myModel.PinnedBC(createStepName='Initial', localCsys=None, name='BC-'+str(ridge[0]+1), region=Region(vertices=mdb.models['Model-1'].rootAssembly.instances[instancename].vertices[1:]))
## to be deleted and adapted with network
def define_mesh(mask):
number_elements = []
for i in range(len(ridge_vertices)):
instancename = 'Part-' + str(i+1) + '-1'
#myModel.rootAssembly.setElementType(elemTypes=(ElemType(
# elemCode=B21, elemLibrary=STANDARD), ), regions=(
# myModel.rootAssembly.instances[instancename].edges.getSequenceFromMask(
# mask=('[#1 ]', ), ), ))
if int(network.dimension)==2:
myModel.rootAssembly.setElementType(elemTypes=(ElemType(
elemCode=B22, elemLibrary=EXPLICIT), ), regions=(
myModel.rootAssembly.instances[instancename].edges.getSequenceFromMask(
mask=('[#1 ]', ), ), ))
elif int(network.dimension)==3:
myModel.rootAssembly.setElementType(elemTypes=(ElemType(
elemCode=B32, elemLibrary=EXPLICIT), ), regions=(
myModel.rootAssembly.instances[instancename].edges.getSequenceFromMask(
mask=('[#1 ]', ), ), ))
myModel.rootAssembly.seedPartInstance(regions=(
mdb.models['Model-1'].rootAssembly.instances[instancename], ), size=tensile_test.element_size)
mdb.models['Model-1'].rootAssembly.generateMesh(regions=(
mdb.models['Model-1'].rootAssembly.instances[instancename], ))
number_elements.append(len(mdb.models['Model-1'].rootAssembly.instances[instancename].elements))
filename = 'number_elements_%09d.csv' % test_number
with open(filename,'w') as writeFile:
writer = csv.writer(writeFile,delimiter=',')
writer.writerow(number_elements)
list_nodes_ridges=[[] for i in range(len(vertices))]
for i in range(len(ridge_vertices)):
list_nodes_ridges[ridge_vertices[i][0]].append(i)
list_nodes_ridges[ridge_vertices[i][1]].append(i)
def create_connectors(network):
connector_list=[]
for k in range(len(list_nodes_ridges)):
if int(network.dimension)==2: coords = (vertices[k][0],vertices[k][1],0.0)
elif int(network.dimension)==3: coords = (vertices[k][0],vertices[k][1],vertices[k][2])
list_ridge = list_nodes_ridges[k]
if len(list_ridge) > 1: | for i in range(len(list_ridge)-1):
instancename1='Part-'+str(list_ridge[i]+1)+'-1'
instancename2='Part-'+str(list_ridge[i+1]+1)+'-1' | random_line_split |
|
new_solver_4.py | .parts[partname].WirePolyLine(mergeType=IMPRINT, meshable=
ON, points=((myModel.parts[partname].datums[1],
myModel.parts[partname].datums[2]), ))
#### MATERIAL AND SECTION DEFINITION ####
# Truss section
def define_material(network):
myModel.Material(name='Material-2')
myModel.materials['Material-2'].Elastic(table=((network.beam_young, network.beam_poisson), ))
myModel.CircularProfile(name='Profile-1', r=network.beam_profile)
myModel.BeamSection(consistentMassMatrix=False, integration=
DURING_ANALYSIS, material='Material-2', name='Section-2', poissonRatio=0.0,
profile='Profile-1', temperatureVar=LINEAR)
for i in range(len(ridge_vertices)):
partname = 'Part-' + str(i+1)
myModel.parts[partname].SectionAssignment(offset=0.0,
offsetField='', offsetType=MIDDLE_SURFACE, region=Region(
edges=myModel.parts[partname].edges.getSequenceFromMask(mask=('[#1 ]', ), )),
sectionName='Section-2', thicknessAssignment=
FROM_SECTION)
for i in range(len(ridge_vertices)):
partname = 'Part-' + str(i+1)
myModel.parts[partname].assignBeamSectionOrientation(method=
N1_COSINES, n1=(0.0, 0.0, -1.0), region=Region(
edges=myModel.parts[partname].edges.getSequenceFromMask(mask=('[#1 ]', ), )))
mdb.models['Model-1'].rootAssembly.DatumCsysByDefault(CARTESIAN)
#### ASSEMBLY ####
# Creation of instances
def assembly(network):
list_node_label=[]
for i in range(len(ridge_vertices)):
partname = 'Part-' + str(i+1)
instancename = 'Part-' + str(i+1) + '-1'
myModel.rootAssembly.Instance(dependent=OFF, name=instancename,
part=myModel.parts[partname])
for k in range(len(vertices)):
ridge=network.list_nodes_ridges[k]
instancename = 'Part-' + str(ridge[0]+1) + '-1'
try:
coords = (vertices[k][0],vertices[k][1],vertices[k][2])
except IndexError:
coords = (vertices[k][0],vertices[k][1],0.0)
list_node_label.append(myModel.rootAssembly.instances[instancename].vertices.findAt(coords).index)
filename = 'node_label_%09d.csv' % test_number
with open(filename,'w') as writeFile:
writer = csv.writer(writeFile,delimiter=',')
writer.writerow(list_node_label)
return list_node_label
# Step Creation
def set_steps(network):
myModel.StaticStep(name='Step-1', previous='Initial',maxNumInc=1000, minInc=1e-10, nlgeom=ON)
#myModel.FieldOutputRequest(name='F-Output-3',createStepName='Step-1', variables=('COORD', 'S','E','SE'),numIntervals=
# iterations)
myModel.fieldOutputRequests['F-Output-1'].setValues(variables=('S', 'E', 'U', 'RF', 'CF', 'COORD'))
myModel.fieldOutputRequests['F-Output-1'].setValues(
numIntervals=50)
myModel.steps['Step-1'].setValues(stabilizationMethod=DISSIPATED_ENERGY_FRACTION,
continueDampingFactors=True, adaptiveDampingRatio=0.1)
"""
def set_boundary_conditions(network): ## can be changed with the node label list
list_vertices_right = []
for k in network.boundary_nodes_right:
ridge=list_nodes_ridges[k]
#print(ridge)
instancename = 'Part-' + str(ridge[0]+1) + '-1'
try:
coords = (vertices[k][0],vertices[k][1],vertices[k][2])
except IndexError:
coords = (vertices[k][0],vertices[k][1],0.0)
list_vertices_right.append(myModel.rootAssembly.instances[instancename].vertices.findAt(coords))
list_vertices_left = []
for k in network.boundary_nodes_left:
ridge=list_nodes_ridges[k]
instancename = 'Part-' + str(ridge[0]+1) + '-1'
try:
coords = (vertices[k][0],vertices[k][1],vertices[k][2])
except IndexError:
coords = (vertices[k][0],vertices[k][1],0.0)
list_vertices_left.append(myModel.rootAssembly.instances[instancename].vertices.findAt(coords))
myModel.PinnedBC(createStepName='Initial', localCsys=None, name=
'BC-1', region=Region(vertices=VertexArray(list_vertices_left)))
myModel.DisplacementBC(amplitude=UNSET, createStepName='Step-1',
distributionType=UNIFORM, fieldName='', fixed=OFF, localCsys=None, name=
'BC-2', region=Region(vertices=VertexArray(list_vertices_right)), u1=traction_distance, u2=0.0,u3=0.0,ur3=UNSET)
"""
def set_boundary_conditions(network): ## can be changed with the node label list
#list_vertices_right = []
for k in network.boundary_nodes_right:
ridge=list_nodes_ridges[k]
#print(ridge)
instancename = 'Part-' + str(ridge[0]+1) + '-1'
try:
coords = (vertices[k][0],vertices[k][1],vertices[k][2])
except IndexError:
coords = (vertices[k][0],vertices[k][1],0.0)
vertice = myModel.rootAssembly.instances[instancename].vertices.findAt(coords)
if vertice.index==0:
myModel.DisplacementBC(amplitude=UNSET, createStepName='Step-1', distributionType=UNIFORM, fieldName='', fixed=OFF, localCsys=None, name='BC-'+str(ridge[0]+1), region=Region(vertices=mdb.models['Model-1'].rootAssembly.instances[instancename].vertices[:1]), u1=traction_distance, u2=0.0,u3=0.0,ur3=UNSET)
else:
myModel.DisplacementBC(amplitude=UNSET, createStepName='Step-1', distributionType=UNIFORM, fieldName='', fixed=OFF, localCsys=None, name='BC-'+str(ridge[0]+1), region=Region(vertices=mdb.models['Model-1'].rootAssembly.instances[instancename].vertices[1:]), u1=traction_distance, u2=0.0,u3=0.0,ur3=UNSET)
"""
list_vertices_right.append(myModel.rootAssembly.instances[instancename].vertices.findAt(coords))"""
#list_vertices_left = []
for k in network.boundary_nodes_left:
ridge=list_nodes_ridges[k]
#print(ridge)
instancename = 'Part-' + str(ridge[0]+1) + '-1'
try:
coords = (vertices[k][0],vertices[k][1],vertices[k][2])
except IndexError:
coords = (vertices[k][0],vertices[k][1],0.0)
vertice = myModel.rootAssembly.instances[instancename].vertices.findAt(coords)
if vertice.index==0:
myModel.PinnedBC(createStepName='Initial', localCsys=None, name='BC-'+str(ridge[0]+1), region=Region(vertices=mdb.models['Model-1'].rootAssembly.instances[instancename].vertices[:1]))
else:
myModel.PinnedBC(createStepName='Initial', localCsys=None, name='BC-'+str(ridge[0]+1), region=Region(vertices=mdb.models['Model-1'].rootAssembly.instances[instancename].vertices[1:]))
## to be deleted and adapted with network
def define_mesh(mask):
| number_elements = []
for i in range(len(ridge_vertices)):
instancename = 'Part-' + str(i+1) + '-1'
#myModel.rootAssembly.setElementType(elemTypes=(ElemType(
# elemCode=B21, elemLibrary=STANDARD), ), regions=(
# myModel.rootAssembly.instances[instancename].edges.getSequenceFromMask(
# mask=('[#1 ]', ), ), ))
if int(network.dimension)==2:
myModel.rootAssembly.setElementType(elemTypes=(ElemType(
elemCode=B22, elemLibrary=EXPLICIT), ), regions=(
myModel.rootAssembly.instances[instancename].edges.getSequenceFromMask(
mask=('[#1 ]', ), ), ))
elif int(network.dimension)==3:
myModel.rootAssembly.setElementType(elemTypes=(ElemType(
elemCode=B32, elemLibrary=EXPLICIT), ), regions=(
myModel.rootAssembly.instances[instancename].edges.getSequenceFromMask(
mask=('[#1 ]', ), ), ))
myModel.rootAssembly.seedPartInstance(regions=(
mdb.models['Model-1'].rootAssembly.instances[instancename], ), size=tensile_test.element_size)
mdb.models['Model-1'].rootAssembly.generateMesh(regions=( | identifier_body |
|
analysis.py | """
# Extract data
data = util.extract_data(datasets, axis)
titles = util.extract_title(datasets)
xmin, xmax = util.set_limits(data, xmin, xmax, xrange_, axis)
if bins is None:
bins = util.bin_default(axis, xmin, xmax)
fig, ax = util.fig_ax(axes)
fig._flowml_axis = (axis, )
# Plotting preferences
alpha = util.alpha(len(data))
# We do not use the Matplotlib API for histograms because we want to have transparent plots
# Following example: http://matplotlib.org/examples/api/histogram_path_demo.html
max_value = float('-inf')
for (d, t) in zip(data, titles ):
(hist, bin_edges) = np.histogram(d, bins = bins, range = (xmin, xmax))
left = np.array(bin_edges[:-1])
right = np.array(bin_edges[1:])
# FIXES a bug in MPLD3 0.3 regarding NaNs in coordinates
bottom = 1e-6*np.ones(len(left))
top = bottom + hist
XY = np.array([[left,left,right,right], [bottom, top, top, bottom]]).T
barpath = matplotlib.path.Path.make_compound_path_from_polys(XY)
# serves to get the current color
base_line, = ax.plot(hist, alpha = 0)
patch = matplotlib.patches.PathPatch(barpath, facecolor = base_line.get_color(),
edgecolor = base_line.get_color(), alpha = alpha)
# Clear the unneeded line
base_line.remove()
patch.set_label(t)
ax.add_patch(patch)
max_value = max(max_value, top.max())
ax.set_xlim(xmin, xmax)
ax.set_ylim(1, max_value )
ax.set_xlabel(axis)
ax.set_yscale(util.default_yscale(axis))
if len(data) > 1:
|
else:
ax.set_title(titles[0])
return fig
def hist2(datasets, axis1, axis2, bins = None,
xmin = None, xmax = None, ymin = None, ymax = None, range_ = None,
axes = None, transform = None):
datax = util.extract_data(datasets, axis1)
datay = util.extract_data(datasets, axis2)
titles = util.extract_title(datasets)
try:
xrange_ = range_[0]
yrange_ = range_[1]
except:
xrange_ = None
yrange_ = None
xmin, xmax = util.set_limits(datax, xmin, xmax, xrange_, axis1)
ymin, ymax = util.set_limits(datay, ymin, ymax, yrange_, axis2)
if not isinstance(transform, (list, tuple)):
transform = [transform, transform]
scaling = [None, None]
scaling[0], transform[0] = util.default_scaling(axis1, scaling = scaling[0], transform = transform[0])
scaling[1], transform[1] = util.default_scaling(axis2, scaling = scaling[1], transform = transform[1])
for index, d in enumerate(datax):
datax[index] = transform[0](d)
for index, d in enumerate(datay):
datay[index] = transform[1](d)
xmin_transformed, xmax_transformed = util.set_limits(datax)
ymin_transformed, ymax_transformed = util.set_limits(datay)
# Determine how many bins to use
if bins is None:
bins = [None, None]
if isinstance(bins, int):
bins = [bins, bins]
bins = list(bins)
bins[0] = util.bin_default(axis1, xmin, xmax, bins = bins[0])
bins[1] = util.bin_default(axis2, xmin, xmax, bins = bins[1])
fig, ax = util.fig_ax(axes)
fig._flowml_axis = (axis1, axis2)
# We do not use the Matplotlib API for histograms because we want to have transparent plots
# Following example: http://matplotlib.org/examples/api/histogram_path_demo.html
den_ = []
range_ = ((xmin_transformed, xmax_transformed),(ymin_transformed, ymax_transformed))
for (dx, dy) in zip(datax, datay):
den, xedge, yedge = np.histogram2d(dx, dy, bins = bins, range = range_)
den_.append(den)
alpha = util.alpha(len(den_))
proxy = []
line_collections = []
levels = 10**np.arange(0,7)
for den in den_:
line, = ax.plot(0,0)
ln = ax.imshow(den.T, cmap = make_cmap(line.get_color()), origin = 'lower',
norm = matplotlib.colors.LogNorm(),
extent = [xmin, xmax, ymin, ymax],
interpolation = 'none',
aspect = 'auto')
line_collections.append(ln)
proxy.append( plt.Rectangle((0,0),1,1,fc = line.get_color(),alpha = alpha))
line.remove()
if len(datax) == 1:
ax.set_title(titles[0])
elif len(datax) > 1:
ax.legend(proxy, titles)
ax.set_xlabel(axis1)
ax.set_ylabel(axis2)
ax.set_xscale(scaling[0])
ax.set_yscale(scaling[1])
return fig
def make_cmap(target, background = None):
if background is None:
background = 'white'
cc = matplotlib.colors.ColorConverter()
target = cc.to_rgb(target)
background = cc.to_rgb(background)
# Start halfway to filled
start = [(bg*0.9+0.1*tg) for bg, tg in zip(background, target)]
cdict = {'red': [], 'green': [], 'blue': []}
for (v, c) in zip(start, ['red', 'green', 'blue']):
cdict[c].append( (0, v, v))
for (v, c) in zip(target, ['red', 'green', 'blue']):
cdict[c].append( (1, v, v))
cmap = matplotlib.colors.LinearSegmentedColormap('my_colormap', cdict, 256)
# makes under-range values transparent
cmap.set_under(alpha = 0)
return cmap
def _2d_backend(ax, den_, xgrid, ygrid, titles, axis1, axis2, transform = None):
alpha = util.alpha(len(den_))
proxy = []
line_collections = []
levels = 10**np.arange(0,7)
for den in den_:
line, = ax.plot(0,0)
#ln = ax.contourf(xgrid, ygrid, den.T,
# norm = matplotlib.colors.LogNorm(vmin=1.),
# cmap = make_cmap(line.get_color()), alpha = alpha,
# levels = levels)
ln = ax.imshow(den.T, cmap = make_cmap(line.get_color()), origin = 'lower',
norm = matplotlib.colors.LogNorm(),
extent = [np.min(xgrid), np.max(xgrid), np.min(ygrid), np.max(ygrid)],
interpolation = 'none',
aspect = 'auto')
line_collections.append(ln)
proxy.append( plt.Rectangle((0,0),1,1,fc = line.get_color(),alpha = alpha))
line.remove()
ax.legend(proxy, titles)
ax.set_xlabel(axis1)
ax.set_ylabel(axis2)
if transform is not None:
# set ticks
xticks = transform[0](np.concatenate([0*np.ones(1), 10**np.arange(0,6)]))
yticks = transform[1](np.concatenate([0*np.ones(1), 10**np.arange(0,6)]))
xticklabels = ["0"]
for j in range(0,6):
xticklabels.append("1e{}".format(j))
yticklabels = ["0"]
for j in range(0,6):
yticklabels.append("1e{}".format(j))
#print xticks
#print xticklabels
#print len(xticks)
#print len(xticklabels)
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels)
ax.set_yticks(yticks)
ax.set_yticklabels(yticklabels)
# This feature does not yet work with this kind of density plot
#plugins.connect(ax.figure, plugins.InteractiveLegendPlugin(line_collections, titles))
def mean_matrix(datasets, axes = None, label = None):
"""Computes the mean intensity matrix for given axes and label
Args:
dataset (list): List of FlowData objects
Kwargs:
axes (list): list of column names to evaluate (e.g., 'CD45')
label (list or string): name(s) of boolean columns in datasets
"""
fn = lambda fd, axis: fd[axis].mean()
return fn_matrix(datasets, fn, axes, label)
def median_matrix(datasets, axes = None, label = None):
"""Computes the | ax.legend() | conditional_block |
analysis.py | ) == 1:
ax.set_title(titles[0])
elif len(datax) > 1:
ax.legend(proxy, titles)
ax.set_xlabel(axis1)
ax.set_ylabel(axis2)
ax.set_xscale(scaling[0])
ax.set_yscale(scaling[1])
return fig
def make_cmap(target, background = None):
if background is None:
background = 'white'
cc = matplotlib.colors.ColorConverter()
target = cc.to_rgb(target)
background = cc.to_rgb(background)
# Start halfway to filled
start = [(bg*0.9+0.1*tg) for bg, tg in zip(background, target)]
cdict = {'red': [], 'green': [], 'blue': []}
for (v, c) in zip(start, ['red', 'green', 'blue']):
cdict[c].append( (0, v, v))
for (v, c) in zip(target, ['red', 'green', 'blue']):
cdict[c].append( (1, v, v))
cmap = matplotlib.colors.LinearSegmentedColormap('my_colormap', cdict, 256)
# makes under-range values transparent
cmap.set_under(alpha = 0)
return cmap
def _2d_backend(ax, den_, xgrid, ygrid, titles, axis1, axis2, transform = None):
alpha = util.alpha(len(den_))
proxy = []
line_collections = []
levels = 10**np.arange(0,7)
for den in den_:
line, = ax.plot(0,0)
#ln = ax.contourf(xgrid, ygrid, den.T,
# norm = matplotlib.colors.LogNorm(vmin=1.),
# cmap = make_cmap(line.get_color()), alpha = alpha,
# levels = levels)
ln = ax.imshow(den.T, cmap = make_cmap(line.get_color()), origin = 'lower',
norm = matplotlib.colors.LogNorm(),
extent = [np.min(xgrid), np.max(xgrid), np.min(ygrid), np.max(ygrid)],
interpolation = 'none',
aspect = 'auto')
line_collections.append(ln)
proxy.append( plt.Rectangle((0,0),1,1,fc = line.get_color(),alpha = alpha))
line.remove()
ax.legend(proxy, titles)
ax.set_xlabel(axis1)
ax.set_ylabel(axis2)
if transform is not None:
# set ticks
xticks = transform[0](np.concatenate([0*np.ones(1), 10**np.arange(0,6)]))
yticks = transform[1](np.concatenate([0*np.ones(1), 10**np.arange(0,6)]))
xticklabels = ["0"]
for j in range(0,6):
xticklabels.append("1e{}".format(j))
yticklabels = ["0"]
for j in range(0,6):
yticklabels.append("1e{}".format(j))
#print xticks
#print xticklabels
#print len(xticks)
#print len(xticklabels)
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels)
ax.set_yticks(yticks)
ax.set_yticklabels(yticklabels)
# This feature does not yet work with this kind of density plot
#plugins.connect(ax.figure, plugins.InteractiveLegendPlugin(line_collections, titles))
def mean_matrix(datasets, axes = None, label = None):
"""Computes the mean intensity matrix for given axes and label
Args:
dataset (list): List of FlowData objects
Kwargs:
axes (list): list of column names to evaluate (e.g., 'CD45')
label (list or string): name(s) of boolean columns in datasets
"""
fn = lambda fd, axis: fd[axis].mean()
return fn_matrix(datasets, fn, axes, label)
def median_matrix(datasets, axes = None, label = None):
"""Computes the median intensity matrix for given axes and label
Args:
dataset (list): List of FlowData objects
Kwargs:
axes (list): list of column names to evaluate (e.g., 'CD45')
label (list or string): name(s) of boolean columns in datasets
"""
fn = lambda fd, axis: fd[axis].median()
return fn_matrix(datasets, fn, axes, label)
def count_matrix(datasets, labels):
"""Counts the events in the given label.
Args:
dataset (list): List of FlowData objects
label (list or string): name(s) of boolean columns in datasets
"""
fn = lambda fd, axis: fd.shape[0]
return fn_matrix(datasets, fn, axes = None, label = labels)
def percent_matrix(datasets, label, relative_to = None):
"""Precentage of events with a given label.
Args:
dataset (list): List of FlowData objects
label (list or string): name(s) of boolean columns in datasets
"""
if relative_to is None:
fn = lambda fd, la: fd[fd[la]].shape[0]*100./fd._original_length
else:
fn = lambda fd, la: fd[fd[la]].shape[0]*100./fd[fd[relative_to]].shape[0]
matrix = [ [ fn(fd,la) for fd in datasets] for la in label]
cols = [fd.title for fd in datasets]
mat = pd.DataFrame(matrix, index = label, columns = cols)
# https://stackoverflow.com/questions/18876022/how-to-format-ipython-html-display-of-pandas-dataframe
style = '<style>.dataframe td { text-align: right; }</style>'
from IPython.display import HTML
int_frmt = lambda x: '{:,}'.format( x )
float_frmt = lambda x: '{:,.0f}'.format( x ) if x > 1e3 else '{:,.2f}'.format( x )
frmt_map = { np.dtype( 'int64' ):int_frmt, np.dtype( 'float64' ):float_frmt }
frmt = { col:frmt_map[ mat.dtypes[ col ] ] for col in mat.columns if mat.dtypes[ col ] in frmt_map.keys( ) }
html = HTML(style + mat.to_html( formatters=frmt ) )
return mat
def fn_matrix(datasets, fn, axes = None, label = None):
"""Apply a function to a list of datasets and either a list of axes or labels.
Applies a user provided function *fn* to produce a matrix with columns for
each dataset and rows either given by axes or label. Both axes and label
cannot both be lists.
Args:
dataset (list): List of FlowData objects
fn (function): A function taking a FlowData object and a column name
and returning a value.
Kwargs:
axes (list): list of column names to evaluate (e.g., 'CD45')
label (list or string): name(s) of boolean columns in datasets
"""
if isinstance(axes, list) and isinstance(label, list):
raise NotImplementedError('Only one of label or axes can be a list')
# By default, run over axes if no keywords given
if axes is None and label is None:
axes = datasets[0].columns
if axes is None:
if isinstance(label, str):
label = [label]
matrix = [ [fn(fd[fd[la]], axes) for fd in datasets] for la in label]
index = label
elif isinstance(axes, list):
index = axes
if label is not None:
matrix = [ [ fn(fd[fd[label]], axis) for fd in datasets] for axis in axes]
else:
matrix = [ [ fn(fd, axis) for fd in datasets] for axis in axes]
cols = [fd.title for fd in datasets]
mfn = pd.DataFrame(matrix, index = index, columns = cols)
return mfn
def tsne(fdarray, new_label = 'tsne', channels = None, transform = 'arcsinh', sample = 6000,
verbose = False, backgate = True):
"""Perform t-SNE/viSNE on the FlowData object
"""
fdarray = util.make_list(fdarray)
# If the user has not provided a list of channels to use,
# use the intersection of all isotope channels
if channels is None:
channel_set = []
for fd in fdarray:
channel_set.append(set(fd.isotopes))
channels = list(set.intersection(*channel_set))
# Make a copy of the data in files that we want
points = []
for fd in fdarray:
points.append(np.vstack([ fd[ch] for ch in channels ]).T)
# transform
if transform == 'arcsinh':
for pts in points:
# Apply the transform inplace to the data
np.arcsinh(5*pts, pts)
| # Randomly sample to reduce the number of points
sample_masks = [] | random_line_split |
|
analysis.py | interpolation = 'none',
aspect = 'auto')
line_collections.append(ln)
proxy.append( plt.Rectangle((0,0),1,1,fc = line.get_color(),alpha = alpha))
line.remove()
ax.legend(proxy, titles)
ax.set_xlabel(axis1)
ax.set_ylabel(axis2)
if transform is not None:
# set ticks
xticks = transform[0](np.concatenate([0*np.ones(1), 10**np.arange(0,6)]))
yticks = transform[1](np.concatenate([0*np.ones(1), 10**np.arange(0,6)]))
xticklabels = ["0"]
for j in range(0,6):
xticklabels.append("1e{}".format(j))
yticklabels = ["0"]
for j in range(0,6):
yticklabels.append("1e{}".format(j))
#print xticks
#print xticklabels
#print len(xticks)
#print len(xticklabels)
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels)
ax.set_yticks(yticks)
ax.set_yticklabels(yticklabels)
# This feature does not yet work with this kind of density plot
#plugins.connect(ax.figure, plugins.InteractiveLegendPlugin(line_collections, titles))
def mean_matrix(datasets, axes = None, label = None):
"""Computes the mean intensity matrix for given axes and label
Args:
dataset (list): List of FlowData objects
Kwargs:
axes (list): list of column names to evaluate (e.g., 'CD45')
label (list or string): name(s) of boolean columns in datasets
"""
fn = lambda fd, axis: fd[axis].mean()
return fn_matrix(datasets, fn, axes, label)
def median_matrix(datasets, axes = None, label = None):
"""Computes the median intensity matrix for given axes and label
Args:
dataset (list): List of FlowData objects
Kwargs:
axes (list): list of column names to evaluate (e.g., 'CD45')
label (list or string): name(s) of boolean columns in datasets
"""
fn = lambda fd, axis: fd[axis].median()
return fn_matrix(datasets, fn, axes, label)
def count_matrix(datasets, labels):
"""Counts the events in the given label.
Args:
dataset (list): List of FlowData objects
label (list or string): name(s) of boolean columns in datasets
"""
fn = lambda fd, axis: fd.shape[0]
return fn_matrix(datasets, fn, axes = None, label = labels)
def percent_matrix(datasets, label, relative_to = None):
"""Precentage of events with a given label.
Args:
dataset (list): List of FlowData objects
label (list or string): name(s) of boolean columns in datasets
"""
if relative_to is None:
fn = lambda fd, la: fd[fd[la]].shape[0]*100./fd._original_length
else:
fn = lambda fd, la: fd[fd[la]].shape[0]*100./fd[fd[relative_to]].shape[0]
matrix = [ [ fn(fd,la) for fd in datasets] for la in label]
cols = [fd.title for fd in datasets]
mat = pd.DataFrame(matrix, index = label, columns = cols)
# https://stackoverflow.com/questions/18876022/how-to-format-ipython-html-display-of-pandas-dataframe
style = '<style>.dataframe td { text-align: right; }</style>'
from IPython.display import HTML
int_frmt = lambda x: '{:,}'.format( x )
float_frmt = lambda x: '{:,.0f}'.format( x ) if x > 1e3 else '{:,.2f}'.format( x )
frmt_map = { np.dtype( 'int64' ):int_frmt, np.dtype( 'float64' ):float_frmt }
frmt = { col:frmt_map[ mat.dtypes[ col ] ] for col in mat.columns if mat.dtypes[ col ] in frmt_map.keys( ) }
html = HTML(style + mat.to_html( formatters=frmt ) )
return mat
def fn_matrix(datasets, fn, axes = None, label = None):
"""Apply a function to a list of datasets and either a list of axes or labels.
Applies a user provided function *fn* to produce a matrix with columns for
each dataset and rows either given by axes or label. Both axes and label
cannot both be lists.
Args:
dataset (list): List of FlowData objects
fn (function): A function taking a FlowData object and a column name
and returning a value.
Kwargs:
axes (list): list of column names to evaluate (e.g., 'CD45')
label (list or string): name(s) of boolean columns in datasets
"""
if isinstance(axes, list) and isinstance(label, list):
raise NotImplementedError('Only one of label or axes can be a list')
# By default, run over axes if no keywords given
if axes is None and label is None:
axes = datasets[0].columns
if axes is None:
if isinstance(label, str):
label = [label]
matrix = [ [fn(fd[fd[la]], axes) for fd in datasets] for la in label]
index = label
elif isinstance(axes, list):
index = axes
if label is not None:
matrix = [ [ fn(fd[fd[label]], axis) for fd in datasets] for axis in axes]
else:
matrix = [ [ fn(fd, axis) for fd in datasets] for axis in axes]
cols = [fd.title for fd in datasets]
mfn = pd.DataFrame(matrix, index = index, columns = cols)
return mfn
def tsne(fdarray, new_label = 'tsne', channels = None, transform = 'arcsinh', sample = 6000,
verbose = False, backgate = True):
"""Perform t-SNE/viSNE on the FlowData object
"""
fdarray = util.make_list(fdarray)
# If the user has not provided a list of channels to use,
# use the intersection of all isotope channels
if channels is None:
channel_set = []
for fd in fdarray:
channel_set.append(set(fd.isotopes))
channels = list(set.intersection(*channel_set))
# Make a copy of the data in files that we want
points = []
for fd in fdarray:
points.append(np.vstack([ fd[ch] for ch in channels ]).T)
# transform
if transform == 'arcsinh':
for pts in points:
# Apply the transform inplace to the data
np.arcsinh(5*pts, pts)
# Randomly sample to reduce the number of points
sample_masks = []
for pts in points:
if sample < pts.shape[0]:
# If we have enough points to subsample
sample_masks.append(np.random.choice(pts.shape[0], sample, replace = False))
else:
# Otherwise we add all the points
sample_masks.append(np.array(range(pts.shape[0])))
# Sample the points, and construct a large matrix
sample_points = []
for mask, pts in zip(sample_masks, points):
sample_points.append(pts[mask,:])
X = np.vstack(sample_points)
# Perform t-SNE
Y = lib_tsne.tsne(X, verbose = verbose)
assert Y is not None, ('t-SNE failed to return')
# Split Y into a matrix for each dataset
splits = np.cumsum( np.array([ mask.shape[0] for mask in sample_masks], dtype = int))
Y_split = np.split(Y, splits, axis = 0)
# now expand data to reassign these points back into the dataset
tsne_coords = []
for (pts, mask, Yspt) in zip(points, sample_masks, Y_split):
npoints = pts.shape[0]
Z = np.zeros((npoints, 2))*float('NaN')
Z[mask,:] = Yspt
tsne_coords.append(Z)
# If a point didn't get sampled, place its t-SNE coordinates at its nearest
# neighbor.
if backgate:
kd = KDTree(X)
# select points not assigned values with t-SNE
for pts, mask, coords, j in zip(points, sample_masks, tsne_coords, range(len(points))):
nan_points = np.argwhere(np.isnan(coords[:,0]))
d,near = kd.query(pts[nan_points],1)
# convert back to coordinates on the whole dataset
coords[nan_points, :] = Y[near,:]
tsne_coords[j] = coords
# add to data to FlowData structure
for fd, coords in zip(fdarray, tsne_coords):
fd[new_label+'1'] = coords[:,0]
fd[new_label+'2'] = coords[:,1]
def | heatmap | identifier_name |
|
analysis.py | , bins = bins, range = (xmin, xmax))
left = np.array(bin_edges[:-1])
right = np.array(bin_edges[1:])
# FIXES a bug in MPLD3 0.3 regarding NaNs in coordinates
bottom = 1e-6*np.ones(len(left))
top = bottom + hist
XY = np.array([[left,left,right,right], [bottom, top, top, bottom]]).T
barpath = matplotlib.path.Path.make_compound_path_from_polys(XY)
# serves to get the current color
base_line, = ax.plot(hist, alpha = 0)
patch = matplotlib.patches.PathPatch(barpath, facecolor = base_line.get_color(),
edgecolor = base_line.get_color(), alpha = alpha)
# Clear the unneeded line
base_line.remove()
patch.set_label(t)
ax.add_patch(patch)
max_value = max(max_value, top.max())
ax.set_xlim(xmin, xmax)
ax.set_ylim(1, max_value )
ax.set_xlabel(axis)
ax.set_yscale(util.default_yscale(axis))
if len(data) > 1:
ax.legend()
else:
ax.set_title(titles[0])
return fig
def hist2(datasets, axis1, axis2, bins = None,
xmin = None, xmax = None, ymin = None, ymax = None, range_ = None,
axes = None, transform = None):
datax = util.extract_data(datasets, axis1)
datay = util.extract_data(datasets, axis2)
titles = util.extract_title(datasets)
try:
xrange_ = range_[0]
yrange_ = range_[1]
except:
xrange_ = None
yrange_ = None
xmin, xmax = util.set_limits(datax, xmin, xmax, xrange_, axis1)
ymin, ymax = util.set_limits(datay, ymin, ymax, yrange_, axis2)
if not isinstance(transform, (list, tuple)):
transform = [transform, transform]
scaling = [None, None]
scaling[0], transform[0] = util.default_scaling(axis1, scaling = scaling[0], transform = transform[0])
scaling[1], transform[1] = util.default_scaling(axis2, scaling = scaling[1], transform = transform[1])
for index, d in enumerate(datax):
datax[index] = transform[0](d)
for index, d in enumerate(datay):
datay[index] = transform[1](d)
xmin_transformed, xmax_transformed = util.set_limits(datax)
ymin_transformed, ymax_transformed = util.set_limits(datay)
# Determine how many bins to use
if bins is None:
bins = [None, None]
if isinstance(bins, int):
bins = [bins, bins]
bins = list(bins)
bins[0] = util.bin_default(axis1, xmin, xmax, bins = bins[0])
bins[1] = util.bin_default(axis2, xmin, xmax, bins = bins[1])
fig, ax = util.fig_ax(axes)
fig._flowml_axis = (axis1, axis2)
# We do not use the Matplotlib API for histograms because we want to have transparent plots
# Following example: http://matplotlib.org/examples/api/histogram_path_demo.html
den_ = []
range_ = ((xmin_transformed, xmax_transformed),(ymin_transformed, ymax_transformed))
for (dx, dy) in zip(datax, datay):
den, xedge, yedge = np.histogram2d(dx, dy, bins = bins, range = range_)
den_.append(den)
alpha = util.alpha(len(den_))
proxy = []
line_collections = []
levels = 10**np.arange(0,7)
for den in den_:
line, = ax.plot(0,0)
ln = ax.imshow(den.T, cmap = make_cmap(line.get_color()), origin = 'lower',
norm = matplotlib.colors.LogNorm(),
extent = [xmin, xmax, ymin, ymax],
interpolation = 'none',
aspect = 'auto')
line_collections.append(ln)
proxy.append( plt.Rectangle((0,0),1,1,fc = line.get_color(),alpha = alpha))
line.remove()
if len(datax) == 1:
ax.set_title(titles[0])
elif len(datax) > 1:
ax.legend(proxy, titles)
ax.set_xlabel(axis1)
ax.set_ylabel(axis2)
ax.set_xscale(scaling[0])
ax.set_yscale(scaling[1])
return fig
def make_cmap(target, background = None):
if background is None:
background = 'white'
cc = matplotlib.colors.ColorConverter()
target = cc.to_rgb(target)
background = cc.to_rgb(background)
# Start halfway to filled
start = [(bg*0.9+0.1*tg) for bg, tg in zip(background, target)]
cdict = {'red': [], 'green': [], 'blue': []}
for (v, c) in zip(start, ['red', 'green', 'blue']):
cdict[c].append( (0, v, v))
for (v, c) in zip(target, ['red', 'green', 'blue']):
cdict[c].append( (1, v, v))
cmap = matplotlib.colors.LinearSegmentedColormap('my_colormap', cdict, 256)
# makes under-range values transparent
cmap.set_under(alpha = 0)
return cmap
def _2d_backend(ax, den_, xgrid, ygrid, titles, axis1, axis2, transform = None):
alpha = util.alpha(len(den_))
proxy = []
line_collections = []
levels = 10**np.arange(0,7)
for den in den_:
line, = ax.plot(0,0)
#ln = ax.contourf(xgrid, ygrid, den.T,
# norm = matplotlib.colors.LogNorm(vmin=1.),
# cmap = make_cmap(line.get_color()), alpha = alpha,
# levels = levels)
ln = ax.imshow(den.T, cmap = make_cmap(line.get_color()), origin = 'lower',
norm = matplotlib.colors.LogNorm(),
extent = [np.min(xgrid), np.max(xgrid), np.min(ygrid), np.max(ygrid)],
interpolation = 'none',
aspect = 'auto')
line_collections.append(ln)
proxy.append( plt.Rectangle((0,0),1,1,fc = line.get_color(),alpha = alpha))
line.remove()
ax.legend(proxy, titles)
ax.set_xlabel(axis1)
ax.set_ylabel(axis2)
if transform is not None:
# set ticks
xticks = transform[0](np.concatenate([0*np.ones(1), 10**np.arange(0,6)]))
yticks = transform[1](np.concatenate([0*np.ones(1), 10**np.arange(0,6)]))
xticklabels = ["0"]
for j in range(0,6):
xticklabels.append("1e{}".format(j))
yticklabels = ["0"]
for j in range(0,6):
yticklabels.append("1e{}".format(j))
#print xticks
#print xticklabels
#print len(xticks)
#print len(xticklabels)
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels)
ax.set_yticks(yticks)
ax.set_yticklabels(yticklabels)
# This feature does not yet work with this kind of density plot
#plugins.connect(ax.figure, plugins.InteractiveLegendPlugin(line_collections, titles))
def mean_matrix(datasets, axes = None, label = None):
"""Computes the mean intensity matrix for given axes and label
Args:
dataset (list): List of FlowData objects
Kwargs:
axes (list): list of column names to evaluate (e.g., 'CD45')
label (list or string): name(s) of boolean columns in datasets
"""
fn = lambda fd, axis: fd[axis].mean()
return fn_matrix(datasets, fn, axes, label)
def median_matrix(datasets, axes = None, label = None):
"""Computes the median intensity matrix for given axes and label
Args:
dataset (list): List of FlowData objects
Kwargs:
axes (list): list of column names to evaluate (e.g., 'CD45')
label (list or string): name(s) of boolean columns in datasets
"""
fn = lambda fd, axis: fd[axis].median()
return fn_matrix(datasets, fn, axes, label)
def count_matrix(datasets, labels):
| """Counts the events in the given label.
Args:
dataset (list): List of FlowData objects
label (list or string): name(s) of boolean columns in datasets
"""
fn = lambda fd, axis: fd.shape[0]
return fn_matrix(datasets, fn, axes = None, label = labels) | identifier_body |
|
main.rs | data to disk
let mut cert_file = File::create(certs_path.join(format!(
"{}/{}",
domain,
certificates::CERT_FILE_NAME
)))?;
cert_file.write_all(&cert.serialize_der()?)?;
// write key data to disk
let key_file_path =
certs_path.join(format!("{}/{}", domain, certificates::KEY_FILE_NAME));
let mut key_file = File::create(&key_file_path)?;
#[cfg(unix)]
{
// set permissions so only owner can read
match key_file.set_permissions(std::fs::Permissions::from_mode(0o400)) {
Ok(_) => (),
Err(_) => log::warn!(
"could not set permissions for new key file {}",
key_file_path.display()
),
}
}
key_file.write_all(&cert.serialize_private_key_der())?;
reload_certs = true;
}
}
hostnames.push(hostname);
}
// if new certificates were generated, reload the certificate store
let certs = if reload_certs {
certificates::CertStore::load_from(&certs_path)?
} else {
// there must already have been certificates loaded
certs.unwrap()
};
// parse listening addresses
let mut addrs = vec![];
for i in matches.opt_strs("addr") {
addrs.push(i.parse()?);
}
#[cfg_attr(not(unix), allow(unused_mut))]
let mut empty = addrs.is_empty();
#[cfg(unix)]
let mut sockets = vec![];
#[cfg(unix)]
{
for i in matches.opt_strs("socket") {
sockets.push(i.parse()?);
}
empty &= sockets.is_empty();
}
if empty {
addrs = vec![
SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), DEFAULT_PORT),
SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), DEFAULT_PORT),
];
}
Ok(Args {
addrs,
#[cfg(unix)]
sockets,
content_dir: check_path(matches.opt_get_default("content", "content".into())?)?,
certs: Arc::new(certs),
hostnames,
language: matches.opt_str("lang"),
serve_secret: matches.opt_present("serve-secret"),
log_ips: matches.opt_present("log-ip"),
only_tls13: matches.opt_present("only-tls13"),
central_config: matches.opt_present("central-conf"),
skip_port_check: matches.opt_present("skip-port-check"),
})
}
fn check_path(s: String) -> Result<PathBuf, String> {
let p = PathBuf::from(s);
if p.as_path().exists() {
Ok(p)
} else {
Err(format!("No such file: {p:?}"))
}
}
/// TLS configuration.
static TLS: Lazy<TlsAcceptor> = Lazy::new(acceptor);
fn acceptor() -> TlsAcceptor {
let config = if ARGS.only_tls13 {
ServerConfig::builder()
.with_safe_default_cipher_suites()
.with_safe_default_kx_groups()
.with_protocol_versions(&[&rustls::version::TLS13])
.expect("could not build server config")
} else {
ServerConfig::builder().with_safe_defaults()
}
.with_no_client_auth()
.with_cert_resolver(ARGS.certs.clone());
TlsAcceptor::from(Arc::new(config))
}
struct RequestHandle<T> {
stream: TlsStream<T>,
local_port_check: Option<u16>,
log_line: String,
metadata: Arc<Mutex<FileOptions>>,
}
impl RequestHandle<TcpStream> {
/// Creates a new request handle for the given stream. If establishing the TLS
/// session fails, returns a corresponding log line.
async fn new(stream: TcpStream, metadata: Arc<Mutex<FileOptions>>) -> Result<Self, String> {
let local_addr = stream.local_addr().unwrap().to_string();
// try to get the remote IP address if desired
let peer_addr = if ARGS.log_ips {
stream
.peer_addr()
.map_err(|_| {
format!(
// use nonexistent status code 01 if peer IP is unknown
"{local_addr} - \"\" 01 \"IP error\" error:could not get peer address",
)
})?
.ip()
.to_string()
} else {
// Do not log IP address, but something else so columns still line up.
"-".into()
};
let log_line = format!("{local_addr} {peer_addr}",);
let local_port_check = if ARGS.skip_port_check {
None
} else {
Some(stream.local_addr().unwrap().port())
};
match TLS.accept(stream).await {
Ok(stream) => Ok(Self {
stream,
local_port_check,
log_line,
metadata,
}),
// use nonexistent status code 00 if connection was not established
Err(e) => Err(format!("{log_line} \"\" 00 \"TLS error\" error:{e}")),
}
}
}
#[cfg(unix)]
impl RequestHandle<UnixStream> {
async fn new_unix(
stream: UnixStream,
metadata: Arc<Mutex<FileOptions>>,
) -> Result<Self, String> {
let log_line = format!(
"unix:{} -",
stream
.local_addr()
.ok()
.and_then(|addr| Some(addr.as_pathname()?.to_string_lossy().into_owned()))
.unwrap_or_default()
);
match TLS.accept(stream).await {
Ok(stream) => Ok(Self {
stream,
// TODO add port check for unix sockets, requires extra arg for port
local_port_check: None,
log_line,
metadata,
}),
// use nonexistent status code 00 if connection was not established
Err(e) => Err(format!("{} \"\" 00 \"TLS error\" error:{}", log_line, e)),
}
}
}
impl<T> RequestHandle<T>
where
T: AsyncWriteExt + AsyncReadExt + Unpin,
{
/// Do the necessary actions to handle this request. Returns a corresponding
/// log line as Err or Ok, depending on if the request finished with or
/// without errors.
async fn handle(mut self) -> Result<String, String> {
// not already in error condition
let result = match self.parse_request().await {
Ok(url) => self.send_response(url).await,
Err((status, msg)) => self.send_header(status, msg).await,
};
let close_result = self.stream.shutdown().await;
match (result, close_result) {
(Err(e), _) => Err(format!("{} error:{}", self.log_line, e)),
(Ok(_), Err(e)) => Err(format!("{} error:{}", self.log_line, e)),
(Ok(_), Ok(_)) => Ok(self.log_line),
}
}
/// Return the URL requested by the client.
async fn parse_request(&mut self) -> std::result::Result<Url, (u8, &'static str)> {
// Because requests are limited to 1024 bytes (plus 2 bytes for CRLF), we
// can use a fixed-sized buffer on the stack, avoiding allocations and
// copying, and stopping bad clients from making us use too much memory.
let mut request = [0; 1026];
let mut buf = &mut request[..];
let mut len = 0;
// Read until CRLF, end-of-stream, or there's no buffer space left.
//
// Since neither CR nor LF can be part of a URI according to
// ISOC-RFC 3986, we could use BufRead::read_line here, but that does
// not allow us to cap the number of read bytes at 1024+2.
let result = loop {
let bytes_read = if let Ok(read) = self.stream.read(buf).await {
read
} else {
break Err((BAD_REQUEST, "Request ended unexpectedly"));
};
len += bytes_read;
if request[..len].ends_with(b"\r\n") {
break Ok(());
} else if bytes_read == 0 {
break Err((BAD_REQUEST, "Request ended unexpectedly"));
}
buf = &mut request[len..];
}
.and_then(|()| {
std::str::from_utf8(&request[..len - 2]).or(Err((BAD_REQUEST, "Non-UTF-8 request")))
});
let request = result.map_err(|e| {
// write empty request to log line for uniformity
write!(self.log_line, " \"\"").unwrap();
e
})?;
// log literal request (might be different from or not an actual URL)
write!(self.log_line, " \"{request}\"").unwrap();
let mut url = Url::parse(request).or(Err((BAD_REQUEST, "Invalid URL")))?;
// Validate the URL:
// correct scheme
if url.scheme() != "gemini" {
return Err((PROXY_REQUEST_REFUSED, "Unsupported URL scheme"));
}
| // no userinfo and no fragment
if url.password().is_some() || !url.username().is_empty() || url.fragment().is_some() {
return Err((BAD_REQUEST, "URL contains fragment or userinfo"));
} | random_line_split |
|
main.rs | else {
// already listening on the other unspecified address
log::warn!("Could not start listener on {}, but already listening on another unspecified address. Probably your system automatically listens in dual stack?", addr);
continue;
}
}
Ok(listener) => listener,
};
listening_unspecified |= addr.ip().is_unspecified();
handles.push(tokio::spawn(async move {
log::info!("Started listener on {}", addr);
loop {
let (stream, _) = listener.accept().await.unwrap_or_else(|e| {
panic!("could not accept new connection on {addr}: {e}")
});
let arc = arc.clone();
tokio::spawn(async {
match RequestHandle::new(stream, arc).await {
Ok(handle) => match handle.handle().await {
Ok(info) => log::info!("{}", info),
Err(err) => log::warn!("{}", err),
},
Err(log_line) => {
log::warn!("{}", log_line);
}
}
});
}
}))
};
#[cfg(unix)]
for socketpath in &ARGS.sockets {
let arc = mimetypes.clone();
if socketpath.exists() && socketpath.metadata()
.expect("Failed to get existing socket metadata")
.file_type()
.is_socket() {
log::warn!("Socket already exists, attempting to remove {}", socketpath.display());
let _ = std::fs::remove_file(socketpath);
}
let listener = match UnixListener::bind(socketpath) {
Err(e) => {
panic!("Failed to listen on {}: {}", socketpath.display(), e)
}
Ok(listener) => listener,
};
handles.push(tokio::spawn(async move {
log::info!("Started listener on {}", socketpath.display());
loop {
let (stream, _) = listener.accept().await.unwrap_or_else(|e| {
panic!("could not accept new connection on {}: {}", socketpath.display(), e)
});
let arc = arc.clone();
tokio::spawn(async {
match RequestHandle::new_unix(stream, arc).await {
Ok(handle) => match handle.handle().await {
Ok(info) => log::info!("{}", info),
Err(err) => log::warn!("{}", err),
},
Err(log_line) => {
log::warn!("{}", log_line);
}
}
});
}
}))
};
futures_util::future::join_all(handles).await;
});
}
type Result<T = (), E = Box<dyn Error + Send + Sync>> = std::result::Result<T, E>;
static ARGS: Lazy<Args> = Lazy::new(|| {
args().unwrap_or_else(|s| {
eprintln!("{s}");
std::process::exit(1);
})
});
struct Args {
addrs: Vec<SocketAddr>,
#[cfg(unix)]
sockets: Vec<PathBuf>,
content_dir: PathBuf,
certs: Arc<certificates::CertStore>,
hostnames: Vec<Host>,
language: Option<String>,
serve_secret: bool,
log_ips: bool,
only_tls13: bool,
central_config: bool,
skip_port_check: bool,
}
fn args() -> Result<Args> {
let args: Vec<String> = std::env::args().collect();
let mut opts = getopts::Options::new();
opts.optopt(
"",
"content",
"Root of the content directory (default ./content/)",
"DIR",
);
opts.optopt(
"",
"certs",
"Root of the certificate directory (default ./.certificates/)",
"DIR",
);
opts.optmulti(
"",
"addr",
&format!("Address to listen on (default 0.0.0.0:{DEFAULT_PORT} and [::]:{DEFAULT_PORT}; multiple occurences means listening on multiple interfaces)"),
"IP:PORT",
);
#[cfg(unix)]
opts.optmulti(
"",
"socket",
"Unix socket to listen on (multiple occurences means listening on multiple sockets)",
"PATH",
);
opts.optmulti(
"",
"hostname",
"Domain name of this Gemini server, enables checking hostname and port in requests. (multiple occurences means basic vhosts)",
"NAME",
);
opts.optopt(
"",
"lang",
"RFC 4646 Language code for text/gemini documents",
"LANG",
);
opts.optflag("h", "help", "Print this help text and exit.");
opts.optflag("V", "version", "Print version information and exit.");
opts.optflag(
"3",
"only-tls13",
"Only use TLSv1.3 (default also allows TLSv1.2)",
);
opts.optflag(
"",
"serve-secret",
"Enable serving secret files (files/directories starting with a dot)",
);
opts.optflag("", "log-ip", "Output the remote IP address when logging.");
opts.optflag(
"C",
"central-conf",
"Use a central .meta file in the content root directory. Decentral config files will be ignored.",
);
opts.optflag(
"e",
"ed25519",
"Generate keys using the Ed25519 signature algorithm instead of the default ECDSA.",
);
opts.optflag(
"",
"skip-port-check",
"Skip URL port check even when a hostname is specified.",
);
let matches = opts.parse(&args[1..]).map_err(|f| f.to_string())?;
if matches.opt_present("h") {
eprintln!("{}", opts.usage(&format!("Usage: {} [options]", &args[0])));
std::process::exit(0);
}
if matches.opt_present("V") {
eprintln!("agate {}", env!("CARGO_PKG_VERSION"));
std::process::exit(0);
}
// try to open the certificate directory
let certs_path = matches.opt_get_default("certs", ".certificates".to_string())?;
let (certs, certs_path) = match check_path(certs_path.clone()) {
// the directory exists, try to load certificates
Ok(certs_path) => match certificates::CertStore::load_from(&certs_path) {
// all is good
Ok(certs) => (Some(certs), certs_path),
// the certificate directory did not contain certificates, but we can generate some
// because the hostname option was given
Err(certificates::CertLoadError::Empty) if matches.opt_present("hostname") => {
(None, certs_path)
}
// failed loading certificates or missing hostname to generate them
Err(e) => return Err(e.into()),
},
// the directory does not exist
Err(_) => {
// since certificate management should be automated, we are going to create the directory too
log::info!(
"The certificate directory {:?} does not exist, creating it.",
certs_path
);
std::fs::create_dir(&certs_path).expect("could not create certificate directory");
// we just created the directory, skip loading from it
(None, PathBuf::from(certs_path))
}
};
// If we have not loaded any certificates yet, we have to try to reload them later.
// This ensures we get the right error message.
let mut reload_certs = certs.is_none();
let mut hostnames = vec![];
for s in matches.opt_strs("hostname") {
// normalize hostname, add punycoding if necessary
let hostname = Host::parse(&s)?;
// check if we have a certificate for that domain
if let Host::Domain(ref domain) = hostname {
if !matches!(certs, Some(ref certs) if certs.has_domain(domain)) {
log::info!("No certificate or key found for {:?}, generating them.", s);
let mut cert_params = CertificateParams::new(vec![domain.clone()]);
cert_params
.distinguished_name
.push(DnType::CommonName, domain);
// <CertificateParams as Default>::default() already implements a
// date in the far future from the time of writing: 4096-01-01
if matches.opt_present("e") {
cert_params.alg = &rcgen::PKCS_ED25519;
}
// generate the certificate with the configuration
let cert = Certificate::from_params(cert_params)?;
// make sure the certificate directory exists
fs::create_dir(certs_path.join(domain))?;
// write certificate data to disk
let mut cert_file = File::create(certs_path.join(format!(
"{}/{}",
domain,
certificates::CERT_FILE_NAME
)))?;
cert_file.write_all(&cert.serialize_der()?)?;
// write key data to disk
let key_file_path =
certs_path.join(format!("{}/{}", domain, certificates::KEY_FILE_NAME));
let mut key_file = File::create(&key_file_path)?;
#[cfg(unix)]
{
// set permissions so only owner can read
match key_file.set_permissions(std::fs::Permissions::from_mode(0o400)) {
Ok(_) => (),
Err(_) => log::warn!(
"could not set permissions for new key file {}",
key_file_path.display()
),
}
}
| {
panic!("Failed to listen on {addr}: {e}")
} | conditional_block |
|
main.rs | .optopt(
"",
"certs",
"Root of the certificate directory (default ./.certificates/)",
"DIR",
);
opts.optmulti(
"",
"addr",
&format!("Address to listen on (default 0.0.0.0:{DEFAULT_PORT} and [::]:{DEFAULT_PORT}; multiple occurences means listening on multiple interfaces)"),
"IP:PORT",
);
#[cfg(unix)]
opts.optmulti(
"",
"socket",
"Unix socket to listen on (multiple occurences means listening on multiple sockets)",
"PATH",
);
opts.optmulti(
"",
"hostname",
"Domain name of this Gemini server, enables checking hostname and port in requests. (multiple occurences means basic vhosts)",
"NAME",
);
opts.optopt(
"",
"lang",
"RFC 4646 Language code for text/gemini documents",
"LANG",
);
opts.optflag("h", "help", "Print this help text and exit.");
opts.optflag("V", "version", "Print version information and exit.");
opts.optflag(
"3",
"only-tls13",
"Only use TLSv1.3 (default also allows TLSv1.2)",
);
opts.optflag(
"",
"serve-secret",
"Enable serving secret files (files/directories starting with a dot)",
);
opts.optflag("", "log-ip", "Output the remote IP address when logging.");
opts.optflag(
"C",
"central-conf",
"Use a central .meta file in the content root directory. Decentral config files will be ignored.",
);
opts.optflag(
"e",
"ed25519",
"Generate keys using the Ed25519 signature algorithm instead of the default ECDSA.",
);
opts.optflag(
"",
"skip-port-check",
"Skip URL port check even when a hostname is specified.",
);
let matches = opts.parse(&args[1..]).map_err(|f| f.to_string())?;
if matches.opt_present("h") {
eprintln!("{}", opts.usage(&format!("Usage: {} [options]", &args[0])));
std::process::exit(0);
}
if matches.opt_present("V") {
eprintln!("agate {}", env!("CARGO_PKG_VERSION"));
std::process::exit(0);
}
// try to open the certificate directory
let certs_path = matches.opt_get_default("certs", ".certificates".to_string())?;
let (certs, certs_path) = match check_path(certs_path.clone()) {
// the directory exists, try to load certificates
Ok(certs_path) => match certificates::CertStore::load_from(&certs_path) {
// all is good
Ok(certs) => (Some(certs), certs_path),
// the certificate directory did not contain certificates, but we can generate some
// because the hostname option was given
Err(certificates::CertLoadError::Empty) if matches.opt_present("hostname") => {
(None, certs_path)
}
// failed loading certificates or missing hostname to generate them
Err(e) => return Err(e.into()),
},
// the directory does not exist
Err(_) => {
// since certificate management should be automated, we are going to create the directory too
log::info!(
"The certificate directory {:?} does not exist, creating it.",
certs_path
);
std::fs::create_dir(&certs_path).expect("could not create certificate directory");
// we just created the directory, skip loading from it
(None, PathBuf::from(certs_path))
}
};
// If we have not loaded any certificates yet, we have to try to reload them later.
// This ensures we get the right error message.
let mut reload_certs = certs.is_none();
let mut hostnames = vec![];
for s in matches.opt_strs("hostname") {
// normalize hostname, add punycoding if necessary
let hostname = Host::parse(&s)?;
// check if we have a certificate for that domain
if let Host::Domain(ref domain) = hostname {
if !matches!(certs, Some(ref certs) if certs.has_domain(domain)) {
log::info!("No certificate or key found for {:?}, generating them.", s);
let mut cert_params = CertificateParams::new(vec![domain.clone()]);
cert_params
.distinguished_name
.push(DnType::CommonName, domain);
// <CertificateParams as Default>::default() already implements a
// date in the far future from the time of writing: 4096-01-01
if matches.opt_present("e") {
cert_params.alg = &rcgen::PKCS_ED25519;
}
// generate the certificate with the configuration
let cert = Certificate::from_params(cert_params)?;
// make sure the certificate directory exists
fs::create_dir(certs_path.join(domain))?;
// write certificate data to disk
let mut cert_file = File::create(certs_path.join(format!(
"{}/{}",
domain,
certificates::CERT_FILE_NAME
)))?;
cert_file.write_all(&cert.serialize_der()?)?;
// write key data to disk
let key_file_path =
certs_path.join(format!("{}/{}", domain, certificates::KEY_FILE_NAME));
let mut key_file = File::create(&key_file_path)?;
#[cfg(unix)]
{
// set permissions so only owner can read
match key_file.set_permissions(std::fs::Permissions::from_mode(0o400)) {
Ok(_) => (),
Err(_) => log::warn!(
"could not set permissions for new key file {}",
key_file_path.display()
),
}
}
key_file.write_all(&cert.serialize_private_key_der())?;
reload_certs = true;
}
}
hostnames.push(hostname);
}
// if new certificates were generated, reload the certificate store
let certs = if reload_certs {
certificates::CertStore::load_from(&certs_path)?
} else {
// there must already have been certificates loaded
certs.unwrap()
};
// parse listening addresses
let mut addrs = vec![];
for i in matches.opt_strs("addr") {
addrs.push(i.parse()?);
}
#[cfg_attr(not(unix), allow(unused_mut))]
let mut empty = addrs.is_empty();
#[cfg(unix)]
let mut sockets = vec![];
#[cfg(unix)]
{
for i in matches.opt_strs("socket") {
sockets.push(i.parse()?);
}
empty &= sockets.is_empty();
}
if empty {
addrs = vec![
SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), DEFAULT_PORT),
SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), DEFAULT_PORT),
];
}
Ok(Args {
addrs,
#[cfg(unix)]
sockets,
content_dir: check_path(matches.opt_get_default("content", "content".into())?)?,
certs: Arc::new(certs),
hostnames,
language: matches.opt_str("lang"),
serve_secret: matches.opt_present("serve-secret"),
log_ips: matches.opt_present("log-ip"),
only_tls13: matches.opt_present("only-tls13"),
central_config: matches.opt_present("central-conf"),
skip_port_check: matches.opt_present("skip-port-check"),
})
}
fn check_path(s: String) -> Result<PathBuf, String> {
let p = PathBuf::from(s);
if p.as_path().exists() {
Ok(p)
} else {
Err(format!("No such file: {p:?}"))
}
}
/// TLS configuration.
static TLS: Lazy<TlsAcceptor> = Lazy::new(acceptor);
fn acceptor() -> TlsAcceptor {
let config = if ARGS.only_tls13 {
ServerConfig::builder()
.with_safe_default_cipher_suites()
.with_safe_default_kx_groups()
.with_protocol_versions(&[&rustls::version::TLS13])
.expect("could not build server config")
} else {
ServerConfig::builder().with_safe_defaults()
}
.with_no_client_auth()
.with_cert_resolver(ARGS.certs.clone());
TlsAcceptor::from(Arc::new(config))
}
struct | <T> {
stream: TlsStream<T>,
local_port_check: Option<u16>,
log_line: String,
metadata: Arc<Mutex<FileOptions>>,
}
impl RequestHandle<TcpStream> {
/// Creates a new request handle for the given stream. If establishing the TLS
/// session fails, returns a corresponding log line.
async fn new(stream: TcpStream, metadata: Arc<Mutex<FileOptions>>) -> Result<Self, String> {
let local_addr = stream.local_addr().unwrap().to_string();
// try to get the remote IP address if desired
let peer_addr = if ARGS.log_ips {
stream
.peer_addr()
.map_err(|_| {
format!(
// use nonexistent status code 01 if peer IP is unknown
"{local_addr} - \" | RequestHandle | identifier_name |
certificate_manager.rs | use edgelet_core::CertificateProperties;
use failure::ResultExt;
pub use crate::error::{Error, ErrorKind};
pub struct CertificateManager<C: CreateCertificate + Clone> {
certificate: Arc<RwLock<Option<Certificate>>>,
crypto: C,
props: CertificateProperties,
creation_time: Instant,
}
#[derive(Clone)]
struct Certificate {
cert: String,
private_key: String,
}
impl<C: CreateCertificate + Clone> CertificateManager<C> {
pub fn new(crypto: C, props: CertificateProperties) -> Result<Self, Error> {
let cert_manager = Self {
certificate: Arc::new(RwLock::new(None)),
crypto,
props,
creation_time: Instant::now(),
};
{
let mut cert = cert_manager
.certificate
.write()
.expect("Locking the certificate for write failed.");
let created_certificate = cert_manager.create_cert()?;
*cert = Some(created_certificate);
}
Ok(cert_manager)
}
// Convenience function since native-tls does not yet support PEM
// and since everything else uses PEM certificates, we want to keep
// the actual storage of the certificate in the PEM format.
#[cfg(unix)]
pub fn get_pkcs12_certificate(&self) -> Result<Vec<u8>, Error> {
let stored_cert_bundle = self.get_certificate()?;
let cert = stored_cert_bundle.cert.as_bytes();
let mut certs =
X509::stack_from_pem(cert).with_context(|_| ErrorKind::CertificateConversionError)?;
let mut ca_certs = Stack::new().with_context(|_| ErrorKind::CertificateConversionError)?;
for cert in certs.split_off(1) {
ca_certs
.push(cert)
.with_context(|_| ErrorKind::CertificateConversionError)?;
}
let key = PKey::private_key_from_pem(stored_cert_bundle.private_key.as_bytes())
.expect("Error processing private key from pem");
let server_cert = &certs[0];
let mut builder = Pkcs12::builder();
builder.ca(ca_certs);
let pkcs_certs = builder
.build("", "", &key, &server_cert)
.with_context(|_| ErrorKind::CertificateConversionError)?;
Ok(pkcs_certs
.to_der()
.with_context(|_| ErrorKind::CertificateConversionError)?)
}
pub fn get_stored_cert_bytes(&self) -> Result<String, Error> {
let stored_cert = self.get_certificate()?;
Ok(stored_cert.cert)
}
pub fn schedule_expiration_timer<F>(
&self,
expiration_callback: F,
) -> impl Future<Item = (), Error = Error>
where
F: FnOnce() -> Result<(), ()> + Sync + Send + 'static,
{
// Now, let's set a timer to expire this certificate
// expire the certificate with 2 minutes remaining in it's lifetime
let when = self.compute_certificate_alarm_time();
// Fail if the cert has already been expired when the call to create
// a timer happens.
if when < (Instant::now() + Duration::from_secs(1)) {
Either::A(future::err(Error::from(
ErrorKind::CertificateTimerCreationError,
)))
} else {
Either::B(
Delay::new(when)
.map_err(|_| Error::from(ErrorKind::CertificateTimerCreationError))
.and_then(move |_| match expiration_callback() {
Ok(_) => Ok(()),
Err(_) => Err(Error::from(ErrorKind::CertificateTimerRuntimeError)),
}),
)
}
}
fn get_certificate(&self) -> Result<Certificate, Error> {
// Try to directly read
let stored_cert = self
.certificate
.read()
.expect("Locking the certificate for read failed.");
match stored_cert.as_ref() {
Some(stored_cert) => Ok(stored_cert.clone()),
None => Err(Error::from(ErrorKind::CertificateNotFound)),
}
}
fn create_cert(&self) -> Result<Certificate, Error> {
// In some use cases, the CA cert might change - to protect against that,
// we will retry once (after attempting to delete) if the cert creation fails.
let cert = if let Ok(val) = self.crypto.create_certificate(&self.props) {
val
} else {
self.crypto
.destroy_certificate(self.props.alias().to_string())
.with_context(|_| ErrorKind::CertificateDeletionError)?;
self.crypto
.create_certificate(&self.props)
.with_context(|_| ErrorKind::CertificateCreationError)?
};
let cert_pem = cert
.pem()
.with_context(|_| ErrorKind::CertificateCreationError)?;
let cert_private_key = cert
.get_private_key()
.with_context(|_| ErrorKind::CertificateCreationError)?;
let pk = match cert_private_key {
Some(pk) => pk,
None => panic!("Unable to acquire a private key."),
};
// Our implementations do not return a ref, and if they did, it would be unusable by Tokio
// a ref simply is a label/alias to a private key, not the actual bits.
let pk_bytes = match pk {
PrivateKey::Ref(_) => panic!(
"A reference private key does not contain the bits needed for the TLS certificate."
),
PrivateKey::Key(KeyBytes::Pem(k)) => k,
};
let cert_str = String::from_utf8(cert_pem.as_ref().to_vec())
.with_context(|_| ErrorKind::CertificateCreationError)?;
let key_str = String::from_utf8(pk_bytes.as_bytes().to_vec())
.with_context(|_| ErrorKind::CertificateCreationError)?;
Ok(Certificate {
cert: cert_str,
private_key: key_str,
})
}
// Determine when to sound the alarm and renew the certificate.
#[allow(clippy::cast_possible_truncation)]
#[allow(clippy::cast_sign_loss)]
#[allow(clippy::cast_precision_loss)]
fn compute_certificate_alarm_time(&self) -> Instant {
self.creation_time
+ Duration::from_secs((*self.props.validity_in_secs() as f64 * 0.95) as u64)
}
#[cfg(test)]
fn has_certificate(&self) -> bool {
!self
.certificate
.read()
.expect("Locking the certificate for read failed.")
.is_none()
}
}
#[cfg(test)]
mod tests {
use super::{CertificateManager, ErrorKind, Future};
use edgelet_core::crypto::{KeyBytes, PrivateKey};
use edgelet_core::{CertificateProperties, CertificateType};
use chrono::{DateTime, Utc};
use edgelet_core::{
Certificate as CoreCertificate, CertificateProperties as CoreCertificateProperties,
CreateCertificate as CoreCreateCertificate, Error as CoreError,
PrivateKey as CorePrivateKey,
};
#[test]
pub fn test_cert_manager_pem_has_cert() {
let crypto = TestCrypto::new().unwrap();
let edgelet_cert_props = CertificateProperties::new(
123_456,
"IOTEDGED_TLS_COMMONNAME".to_string(),
CertificateType::Server,
"iotedge-tls".to_string(),
);
let manager = CertificateManager::new(crypto, edgelet_cert_props).unwrap();
let cert = manager.get_certificate().unwrap();
assert_eq!(cert.cert, "test".to_string());
assert_eq!(manager.has_certificate(), true);
}
#[test]
pub fn test_cert_manager_expired_timer_creation() {
let crypto = TestCrypto::new().unwrap();
let edgelet_cert_props = CertificateProperties::new(
1, // 150 second validity
"IOTEDGED_TLS_COMMONNAME".to_string(),
CertificateType::Server,
"iotedge-tls".to_string(),
);
let manager = CertificateManager::new(crypto, edgelet_cert_props).unwrap();
let _timer = manager.schedule_expiration_timer(|| Ok(()));
}
#[test]
pub fn test_cert_manager_expired_timer_creation_fails() {
let crypto = TestCrypto::new().unwrap();
let edgelet_cert_props = CertificateProperties::new(
50, // 50 second validity
"IOTEDGED_TLS_COMMONNAME".to_string(),
CertificateType::Server,
"iotedge-tls".to_string(),
);
let manager = CertificateManager::new(crypto, edgelet_cert_props).unwrap();
let timer = manager.schedule_expiration_timer(|| Ok(())).wait();
match timer {
Ok(_) => panic!("Should not be okay to create this timer..."),
Err(err) => {
if let ErrorKind::CertificateTimerCreationError = err.kind() {
assert_eq!(true, true);
} else {
panic!(
"Expected a CertificteTimerCreationError type, but got {:?}",
err
);
}
}
}
}
#[derive(Clone)]
struct TestCrypto {
created: bool,
}
impl TestCrypto {
pub fn new() -> Result<Self, CoreError> {
Ok(Self { created: true })
}
}
impl CoreCreateCertificate for TestCrypto {
type Certificate = TestCertificate;
fn |
use edgelet_core::crypto::{
Certificate as CryptoCertificate, CreateCertificate, KeyBytes, PrivateKey, Signature,
}; | random_line_split |
|
certificate_manager.rs | Certificate, CreateCertificate, KeyBytes, PrivateKey, Signature,
};
use edgelet_core::CertificateProperties;
use failure::ResultExt;
pub use crate::error::{Error, ErrorKind};
pub struct CertificateManager<C: CreateCertificate + Clone> {
certificate: Arc<RwLock<Option<Certificate>>>,
crypto: C,
props: CertificateProperties,
creation_time: Instant,
}
#[derive(Clone)]
struct Certificate {
cert: String,
private_key: String,
}
impl<C: CreateCertificate + Clone> CertificateManager<C> {
pub fn new(crypto: C, props: CertificateProperties) -> Result<Self, Error> {
let cert_manager = Self {
certificate: Arc::new(RwLock::new(None)),
crypto,
props,
creation_time: Instant::now(),
};
{
let mut cert = cert_manager
.certificate
.write()
.expect("Locking the certificate for write failed.");
let created_certificate = cert_manager.create_cert()?;
*cert = Some(created_certificate);
}
Ok(cert_manager)
}
// Convenience function since native-tls does not yet support PEM
// and since everything else uses PEM certificates, we want to keep
// the actual storage of the certificate in the PEM format.
#[cfg(unix)]
pub fn get_pkcs12_certificate(&self) -> Result<Vec<u8>, Error> {
let stored_cert_bundle = self.get_certificate()?;
let cert = stored_cert_bundle.cert.as_bytes();
let mut certs =
X509::stack_from_pem(cert).with_context(|_| ErrorKind::CertificateConversionError)?;
let mut ca_certs = Stack::new().with_context(|_| ErrorKind::CertificateConversionError)?;
for cert in certs.split_off(1) {
ca_certs
.push(cert)
.with_context(|_| ErrorKind::CertificateConversionError)?;
}
let key = PKey::private_key_from_pem(stored_cert_bundle.private_key.as_bytes())
.expect("Error processing private key from pem");
let server_cert = &certs[0];
let mut builder = Pkcs12::builder();
builder.ca(ca_certs);
let pkcs_certs = builder
.build("", "", &key, &server_cert)
.with_context(|_| ErrorKind::CertificateConversionError)?;
Ok(pkcs_certs
.to_der()
.with_context(|_| ErrorKind::CertificateConversionError)?)
}
pub fn get_stored_cert_bytes(&self) -> Result<String, Error> {
let stored_cert = self.get_certificate()?;
Ok(stored_cert.cert)
}
pub fn schedule_expiration_timer<F>(
&self,
expiration_callback: F,
) -> impl Future<Item = (), Error = Error>
where
F: FnOnce() -> Result<(), ()> + Sync + Send + 'static,
{
// Now, let's set a timer to expire this certificate
// expire the certificate with 2 minutes remaining in it's lifetime
let when = self.compute_certificate_alarm_time();
// Fail if the cert has already been expired when the call to create
// a timer happens.
if when < (Instant::now() + Duration::from_secs(1)) {
Either::A(future::err(Error::from(
ErrorKind::CertificateTimerCreationError,
)))
} else {
Either::B(
Delay::new(when)
.map_err(|_| Error::from(ErrorKind::CertificateTimerCreationError))
.and_then(move |_| match expiration_callback() {
Ok(_) => Ok(()),
Err(_) => Err(Error::from(ErrorKind::CertificateTimerRuntimeError)),
}),
)
}
}
fn get_certificate(&self) -> Result<Certificate, Error> {
// Try to directly read
let stored_cert = self
.certificate
.read()
.expect("Locking the certificate for read failed.");
match stored_cert.as_ref() {
Some(stored_cert) => Ok(stored_cert.clone()),
None => Err(Error::from(ErrorKind::CertificateNotFound)),
}
}
fn create_cert(&self) -> Result<Certificate, Error> {
// In some use cases, the CA cert might change - to protect against that,
// we will retry once (after attempting to delete) if the cert creation fails.
let cert = if let Ok(val) = self.crypto.create_certificate(&self.props) {
val
} else {
self.crypto
.destroy_certificate(self.props.alias().to_string())
.with_context(|_| ErrorKind::CertificateDeletionError)?;
self.crypto
.create_certificate(&self.props)
.with_context(|_| ErrorKind::CertificateCreationError)?
};
let cert_pem = cert
.pem()
.with_context(|_| ErrorKind::CertificateCreationError)?;
let cert_private_key = cert
.get_private_key()
.with_context(|_| ErrorKind::CertificateCreationError)?;
let pk = match cert_private_key {
Some(pk) => pk,
None => panic!("Unable to acquire a private key."),
};
// Our implementations do not return a ref, and if they did, it would be unusable by Tokio
// a ref simply is a label/alias to a private key, not the actual bits.
let pk_bytes = match pk {
PrivateKey::Ref(_) => panic!(
"A reference private key does not contain the bits needed for the TLS certificate."
),
PrivateKey::Key(KeyBytes::Pem(k)) => k,
};
let cert_str = String::from_utf8(cert_pem.as_ref().to_vec())
.with_context(|_| ErrorKind::CertificateCreationError)?;
let key_str = String::from_utf8(pk_bytes.as_bytes().to_vec())
.with_context(|_| ErrorKind::CertificateCreationError)?;
Ok(Certificate {
cert: cert_str,
private_key: key_str,
})
}
// Determine when to sound the alarm and renew the certificate.
#[allow(clippy::cast_possible_truncation)]
#[allow(clippy::cast_sign_loss)]
#[allow(clippy::cast_precision_loss)]
fn compute_certificate_alarm_time(&self) -> Instant {
self.creation_time
+ Duration::from_secs((*self.props.validity_in_secs() as f64 * 0.95) as u64)
}
#[cfg(test)]
fn has_certificate(&self) -> bool {
!self
.certificate
.read()
.expect("Locking the certificate for read failed.")
.is_none()
}
}
#[cfg(test)]
mod tests {
use super::{CertificateManager, ErrorKind, Future};
use edgelet_core::crypto::{KeyBytes, PrivateKey};
use edgelet_core::{CertificateProperties, CertificateType};
use chrono::{DateTime, Utc};
use edgelet_core::{
Certificate as CoreCertificate, CertificateProperties as CoreCertificateProperties,
CreateCertificate as CoreCreateCertificate, Error as CoreError,
PrivateKey as CorePrivateKey,
};
#[test]
pub fn test_cert_manager_pem_has_cert() {
let crypto = TestCrypto::new().unwrap();
let edgelet_cert_props = CertificateProperties::new(
123_456,
"IOTEDGED_TLS_COMMONNAME".to_string(),
CertificateType::Server,
"iotedge-tls".to_string(),
);
let manager = CertificateManager::new(crypto, edgelet_cert_props).unwrap();
let cert = manager.get_certificate().unwrap();
assert_eq!(cert.cert, "test".to_string());
assert_eq!(manager.has_certificate(), true);
}
#[test]
pub fn test_cert_manager_expired_timer_creation() {
let crypto = TestCrypto::new().unwrap();
let edgelet_cert_props = CertificateProperties::new(
1, // 150 second validity
"IOTEDGED_TLS_COMMONNAME".to_string(),
CertificateType::Server,
"iotedge-tls".to_string(),
);
let manager = CertificateManager::new(crypto, edgelet_cert_props).unwrap();
let _timer = manager.schedule_expiration_timer(|| Ok(()));
}
#[test]
pub fn test_cert_manager_expired_timer_creation_fails() {
let crypto = TestCrypto::new().unwrap();
let edgelet_cert_props = CertificateProperties::new(
50, // 50 second validity
"IOTEDGED_TLS_COMMONNAME".to_string(),
CertificateType::Server,
"iotedge-tls".to_string(),
);
let manager = CertificateManager::new(crypto, edgelet_cert_props).unwrap();
let timer = manager.schedule_expiration_timer(|| Ok(())).wait();
match timer {
Ok(_) => panic!("Should not be okay to create this timer..."),
Err(err) => |
}
}
#[derive(Clone)]
struct TestCrypto {
created: bool,
}
impl TestCrypto {
pub fn new() -> Result<Self, CoreError> {
Ok(Self { created: true })
}
}
impl CoreCreateCertificate for TestCrypto {
type Certificate = TestCertificate;
fn create_certificate(
&self,
_properties | {
if let ErrorKind::CertificateTimerCreationError = err.kind() {
assert_eq!(true, true);
} else {
panic!(
"Expected a CertificteTimerCreationError type, but got {:?}",
err
);
}
} | conditional_block |
certificate_manager.rs | (Clone)]
struct Certificate {
cert: String,
private_key: String,
}
impl<C: CreateCertificate + Clone> CertificateManager<C> {
pub fn new(crypto: C, props: CertificateProperties) -> Result<Self, Error> {
let cert_manager = Self {
certificate: Arc::new(RwLock::new(None)),
crypto,
props,
creation_time: Instant::now(),
};
{
let mut cert = cert_manager
.certificate
.write()
.expect("Locking the certificate for write failed.");
let created_certificate = cert_manager.create_cert()?;
*cert = Some(created_certificate);
}
Ok(cert_manager)
}
// Convenience function since native-tls does not yet support PEM
// and since everything else uses PEM certificates, we want to keep
// the actual storage of the certificate in the PEM format.
#[cfg(unix)]
pub fn get_pkcs12_certificate(&self) -> Result<Vec<u8>, Error> {
let stored_cert_bundle = self.get_certificate()?;
let cert = stored_cert_bundle.cert.as_bytes();
let mut certs =
X509::stack_from_pem(cert).with_context(|_| ErrorKind::CertificateConversionError)?;
let mut ca_certs = Stack::new().with_context(|_| ErrorKind::CertificateConversionError)?;
for cert in certs.split_off(1) {
ca_certs
.push(cert)
.with_context(|_| ErrorKind::CertificateConversionError)?;
}
let key = PKey::private_key_from_pem(stored_cert_bundle.private_key.as_bytes())
.expect("Error processing private key from pem");
let server_cert = &certs[0];
let mut builder = Pkcs12::builder();
builder.ca(ca_certs);
let pkcs_certs = builder
.build("", "", &key, &server_cert)
.with_context(|_| ErrorKind::CertificateConversionError)?;
Ok(pkcs_certs
.to_der()
.with_context(|_| ErrorKind::CertificateConversionError)?)
}
pub fn get_stored_cert_bytes(&self) -> Result<String, Error> {
let stored_cert = self.get_certificate()?;
Ok(stored_cert.cert)
}
pub fn schedule_expiration_timer<F>(
&self,
expiration_callback: F,
) -> impl Future<Item = (), Error = Error>
where
F: FnOnce() -> Result<(), ()> + Sync + Send + 'static,
{
// Now, let's set a timer to expire this certificate
// expire the certificate with 2 minutes remaining in it's lifetime
let when = self.compute_certificate_alarm_time();
// Fail if the cert has already been expired when the call to create
// a timer happens.
if when < (Instant::now() + Duration::from_secs(1)) {
Either::A(future::err(Error::from(
ErrorKind::CertificateTimerCreationError,
)))
} else {
Either::B(
Delay::new(when)
.map_err(|_| Error::from(ErrorKind::CertificateTimerCreationError))
.and_then(move |_| match expiration_callback() {
Ok(_) => Ok(()),
Err(_) => Err(Error::from(ErrorKind::CertificateTimerRuntimeError)),
}),
)
}
}
fn get_certificate(&self) -> Result<Certificate, Error> {
// Try to directly read
let stored_cert = self
.certificate
.read()
.expect("Locking the certificate for read failed.");
match stored_cert.as_ref() {
Some(stored_cert) => Ok(stored_cert.clone()),
None => Err(Error::from(ErrorKind::CertificateNotFound)),
}
}
fn create_cert(&self) -> Result<Certificate, Error> {
// In some use cases, the CA cert might change - to protect against that,
// we will retry once (after attempting to delete) if the cert creation fails.
let cert = if let Ok(val) = self.crypto.create_certificate(&self.props) {
val
} else {
self.crypto
.destroy_certificate(self.props.alias().to_string())
.with_context(|_| ErrorKind::CertificateDeletionError)?;
self.crypto
.create_certificate(&self.props)
.with_context(|_| ErrorKind::CertificateCreationError)?
};
let cert_pem = cert
.pem()
.with_context(|_| ErrorKind::CertificateCreationError)?;
let cert_private_key = cert
.get_private_key()
.with_context(|_| ErrorKind::CertificateCreationError)?;
let pk = match cert_private_key {
Some(pk) => pk,
None => panic!("Unable to acquire a private key."),
};
// Our implementations do not return a ref, and if they did, it would be unusable by Tokio
// a ref simply is a label/alias to a private key, not the actual bits.
let pk_bytes = match pk {
PrivateKey::Ref(_) => panic!(
"A reference private key does not contain the bits needed for the TLS certificate."
),
PrivateKey::Key(KeyBytes::Pem(k)) => k,
};
let cert_str = String::from_utf8(cert_pem.as_ref().to_vec())
.with_context(|_| ErrorKind::CertificateCreationError)?;
let key_str = String::from_utf8(pk_bytes.as_bytes().to_vec())
.with_context(|_| ErrorKind::CertificateCreationError)?;
Ok(Certificate {
cert: cert_str,
private_key: key_str,
})
}
// Determine when to sound the alarm and renew the certificate.
#[allow(clippy::cast_possible_truncation)]
#[allow(clippy::cast_sign_loss)]
#[allow(clippy::cast_precision_loss)]
fn compute_certificate_alarm_time(&self) -> Instant {
self.creation_time
+ Duration::from_secs((*self.props.validity_in_secs() as f64 * 0.95) as u64)
}
#[cfg(test)]
fn has_certificate(&self) -> bool {
!self
.certificate
.read()
.expect("Locking the certificate for read failed.")
.is_none()
}
}
#[cfg(test)]
mod tests {
use super::{CertificateManager, ErrorKind, Future};
use edgelet_core::crypto::{KeyBytes, PrivateKey};
use edgelet_core::{CertificateProperties, CertificateType};
use chrono::{DateTime, Utc};
use edgelet_core::{
Certificate as CoreCertificate, CertificateProperties as CoreCertificateProperties,
CreateCertificate as CoreCreateCertificate, Error as CoreError,
PrivateKey as CorePrivateKey,
};
#[test]
pub fn test_cert_manager_pem_has_cert() {
let crypto = TestCrypto::new().unwrap();
let edgelet_cert_props = CertificateProperties::new(
123_456,
"IOTEDGED_TLS_COMMONNAME".to_string(),
CertificateType::Server,
"iotedge-tls".to_string(),
);
let manager = CertificateManager::new(crypto, edgelet_cert_props).unwrap();
let cert = manager.get_certificate().unwrap();
assert_eq!(cert.cert, "test".to_string());
assert_eq!(manager.has_certificate(), true);
}
#[test]
pub fn test_cert_manager_expired_timer_creation() {
let crypto = TestCrypto::new().unwrap();
let edgelet_cert_props = CertificateProperties::new(
1, // 150 second validity
"IOTEDGED_TLS_COMMONNAME".to_string(),
CertificateType::Server,
"iotedge-tls".to_string(),
);
let manager = CertificateManager::new(crypto, edgelet_cert_props).unwrap();
let _timer = manager.schedule_expiration_timer(|| Ok(()));
}
#[test]
pub fn test_cert_manager_expired_timer_creation_fails() {
let crypto = TestCrypto::new().unwrap();
let edgelet_cert_props = CertificateProperties::new(
50, // 50 second validity
"IOTEDGED_TLS_COMMONNAME".to_string(),
CertificateType::Server,
"iotedge-tls".to_string(),
);
let manager = CertificateManager::new(crypto, edgelet_cert_props).unwrap();
let timer = manager.schedule_expiration_timer(|| Ok(())).wait();
match timer {
Ok(_) => panic!("Should not be okay to create this timer..."),
Err(err) => {
if let ErrorKind::CertificateTimerCreationError = err.kind() {
assert_eq!(true, true);
} else {
panic!(
"Expected a CertificteTimerCreationError type, but got {:?}",
err
);
}
}
}
}
#[derive(Clone)]
struct TestCrypto {
created: bool,
}
impl TestCrypto {
pub fn new() -> Result<Self, CoreError> {
Ok(Self { created: true })
}
}
impl CoreCreateCertificate for TestCrypto {
type Certificate = TestCertificate;
fn create_certificate(
&self,
_properties: &CoreCertificateProperties,
) -> Result<Self::Certificate, CoreError> {
Ok(TestCertificate {})
}
fn destroy_certificate(&self, _alias: String) -> Result<(), CoreError> {
Ok(())
}
fn get_certificate(&self, _alias: String) -> Result<Self::Certificate, CoreError> {
Ok(TestCertificate {})
}
}
struct | TestCertificate | identifier_name |
|
certificate_manager.rs | {
let cert_manager = Self {
certificate: Arc::new(RwLock::new(None)),
crypto,
props,
creation_time: Instant::now(),
};
{
let mut cert = cert_manager
.certificate
.write()
.expect("Locking the certificate for write failed.");
let created_certificate = cert_manager.create_cert()?;
*cert = Some(created_certificate);
}
Ok(cert_manager)
}
// Convenience function since native-tls does not yet support PEM
// and since everything else uses PEM certificates, we want to keep
// the actual storage of the certificate in the PEM format.
#[cfg(unix)]
pub fn get_pkcs12_certificate(&self) -> Result<Vec<u8>, Error> {
let stored_cert_bundle = self.get_certificate()?;
let cert = stored_cert_bundle.cert.as_bytes();
let mut certs =
X509::stack_from_pem(cert).with_context(|_| ErrorKind::CertificateConversionError)?;
let mut ca_certs = Stack::new().with_context(|_| ErrorKind::CertificateConversionError)?;
for cert in certs.split_off(1) {
ca_certs
.push(cert)
.with_context(|_| ErrorKind::CertificateConversionError)?;
}
let key = PKey::private_key_from_pem(stored_cert_bundle.private_key.as_bytes())
.expect("Error processing private key from pem");
let server_cert = &certs[0];
let mut builder = Pkcs12::builder();
builder.ca(ca_certs);
let pkcs_certs = builder
.build("", "", &key, &server_cert)
.with_context(|_| ErrorKind::CertificateConversionError)?;
Ok(pkcs_certs
.to_der()
.with_context(|_| ErrorKind::CertificateConversionError)?)
}
pub fn get_stored_cert_bytes(&self) -> Result<String, Error> {
let stored_cert = self.get_certificate()?;
Ok(stored_cert.cert)
}
pub fn schedule_expiration_timer<F>(
&self,
expiration_callback: F,
) -> impl Future<Item = (), Error = Error>
where
F: FnOnce() -> Result<(), ()> + Sync + Send + 'static,
{
// Now, let's set a timer to expire this certificate
// expire the certificate with 2 minutes remaining in it's lifetime
let when = self.compute_certificate_alarm_time();
// Fail if the cert has already been expired when the call to create
// a timer happens.
if when < (Instant::now() + Duration::from_secs(1)) {
Either::A(future::err(Error::from(
ErrorKind::CertificateTimerCreationError,
)))
} else {
Either::B(
Delay::new(when)
.map_err(|_| Error::from(ErrorKind::CertificateTimerCreationError))
.and_then(move |_| match expiration_callback() {
Ok(_) => Ok(()),
Err(_) => Err(Error::from(ErrorKind::CertificateTimerRuntimeError)),
}),
)
}
}
fn get_certificate(&self) -> Result<Certificate, Error> {
// Try to directly read
let stored_cert = self
.certificate
.read()
.expect("Locking the certificate for read failed.");
match stored_cert.as_ref() {
Some(stored_cert) => Ok(stored_cert.clone()),
None => Err(Error::from(ErrorKind::CertificateNotFound)),
}
}
fn create_cert(&self) -> Result<Certificate, Error> {
// In some use cases, the CA cert might change - to protect against that,
// we will retry once (after attempting to delete) if the cert creation fails.
let cert = if let Ok(val) = self.crypto.create_certificate(&self.props) {
val
} else {
self.crypto
.destroy_certificate(self.props.alias().to_string())
.with_context(|_| ErrorKind::CertificateDeletionError)?;
self.crypto
.create_certificate(&self.props)
.with_context(|_| ErrorKind::CertificateCreationError)?
};
let cert_pem = cert
.pem()
.with_context(|_| ErrorKind::CertificateCreationError)?;
let cert_private_key = cert
.get_private_key()
.with_context(|_| ErrorKind::CertificateCreationError)?;
let pk = match cert_private_key {
Some(pk) => pk,
None => panic!("Unable to acquire a private key."),
};
// Our implementations do not return a ref, and if they did, it would be unusable by Tokio
// a ref simply is a label/alias to a private key, not the actual bits.
let pk_bytes = match pk {
PrivateKey::Ref(_) => panic!(
"A reference private key does not contain the bits needed for the TLS certificate."
),
PrivateKey::Key(KeyBytes::Pem(k)) => k,
};
let cert_str = String::from_utf8(cert_pem.as_ref().to_vec())
.with_context(|_| ErrorKind::CertificateCreationError)?;
let key_str = String::from_utf8(pk_bytes.as_bytes().to_vec())
.with_context(|_| ErrorKind::CertificateCreationError)?;
Ok(Certificate {
cert: cert_str,
private_key: key_str,
})
}
// Determine when to sound the alarm and renew the certificate.
#[allow(clippy::cast_possible_truncation)]
#[allow(clippy::cast_sign_loss)]
#[allow(clippy::cast_precision_loss)]
fn compute_certificate_alarm_time(&self) -> Instant {
self.creation_time
+ Duration::from_secs((*self.props.validity_in_secs() as f64 * 0.95) as u64)
}
#[cfg(test)]
fn has_certificate(&self) -> bool {
!self
.certificate
.read()
.expect("Locking the certificate for read failed.")
.is_none()
}
}
#[cfg(test)]
mod tests {
use super::{CertificateManager, ErrorKind, Future};
use edgelet_core::crypto::{KeyBytes, PrivateKey};
use edgelet_core::{CertificateProperties, CertificateType};
use chrono::{DateTime, Utc};
use edgelet_core::{
Certificate as CoreCertificate, CertificateProperties as CoreCertificateProperties,
CreateCertificate as CoreCreateCertificate, Error as CoreError,
PrivateKey as CorePrivateKey,
};
#[test]
pub fn test_cert_manager_pem_has_cert() {
let crypto = TestCrypto::new().unwrap();
let edgelet_cert_props = CertificateProperties::new(
123_456,
"IOTEDGED_TLS_COMMONNAME".to_string(),
CertificateType::Server,
"iotedge-tls".to_string(),
);
let manager = CertificateManager::new(crypto, edgelet_cert_props).unwrap();
let cert = manager.get_certificate().unwrap();
assert_eq!(cert.cert, "test".to_string());
assert_eq!(manager.has_certificate(), true);
}
#[test]
pub fn test_cert_manager_expired_timer_creation() {
let crypto = TestCrypto::new().unwrap();
let edgelet_cert_props = CertificateProperties::new(
1, // 150 second validity
"IOTEDGED_TLS_COMMONNAME".to_string(),
CertificateType::Server,
"iotedge-tls".to_string(),
);
let manager = CertificateManager::new(crypto, edgelet_cert_props).unwrap();
let _timer = manager.schedule_expiration_timer(|| Ok(()));
}
#[test]
pub fn test_cert_manager_expired_timer_creation_fails() {
let crypto = TestCrypto::new().unwrap();
let edgelet_cert_props = CertificateProperties::new(
50, // 50 second validity
"IOTEDGED_TLS_COMMONNAME".to_string(),
CertificateType::Server,
"iotedge-tls".to_string(),
);
let manager = CertificateManager::new(crypto, edgelet_cert_props).unwrap();
let timer = manager.schedule_expiration_timer(|| Ok(())).wait();
match timer {
Ok(_) => panic!("Should not be okay to create this timer..."),
Err(err) => {
if let ErrorKind::CertificateTimerCreationError = err.kind() {
assert_eq!(true, true);
} else {
panic!(
"Expected a CertificteTimerCreationError type, but got {:?}",
err
);
}
}
}
}
#[derive(Clone)]
struct TestCrypto {
created: bool,
}
impl TestCrypto {
pub fn new() -> Result<Self, CoreError> {
Ok(Self { created: true })
}
}
impl CoreCreateCertificate for TestCrypto {
type Certificate = TestCertificate;
fn create_certificate(
&self,
_properties: &CoreCertificateProperties,
) -> Result<Self::Certificate, CoreError> {
Ok(TestCertificate {})
}
fn destroy_certificate(&self, _alias: String) -> Result<(), CoreError> {
Ok(())
}
fn get_certificate(&self, _alias: String) -> Result<Self::Certificate, CoreError> {
Ok(TestCertificate {})
}
}
struct TestCertificate {}
impl CoreCertificate for TestCertificate {
type Buffer = String;
type KeyBuffer = Vec<u8>;
fn pem(&self) -> Result<Self::Buffer, CoreError> | {
Ok("test".to_string())
} | identifier_body |
|
huifushishichang.js | HttpRequest, textStatus, errorThrown) {
alert(errorThrown);
}
});
function insertHFSSCTable(){
var table = doc.getElementById("HFSSC_table");
var thead = doc.getElementById("HFSSC_table_head");
table.innerHTML = '';
thead.innerHTML = '';
//add table head
var top = doc.getElementById('HFSSC_table_top');
if(HFSSCloadpage != 1){
top.style.display = 'none';
}else{
top.style.display = 'block';
}
for(var rows=0; rows<2; rows++) {
var trHead = doc.createElement("tr");
for (var t = 0; t < HFSSCdataTitle.length-1; t++) {
var th = doc.createElement("th");
var thData;
if(rows == 0){
if(t == 0){
var pp = doc.createElement('div');
//span.innerHTML = '🔝';
th.appendChild(pp);
pp.style.width = '10%';
pp.style.float = 'left';
pp.style.padding = '8px';
//trHead.appendChild(th);
thData = doc.createTextNode(HFSSCdataTitle[t]);
th.appendChild(thData);
th.rowSpan = '2';
th.style.width = '22%';
th.style.verticalAlign = "middle";
th.style.borderRight = '1px #D6D6D6 solid';
th.id = "tdd";
}else if(t == 1){
thData = doc.createTextNode("超过一小时");
th.appendChild(thData);
th.colSpan = '2';
th.style.width = '30%';
}
}else if(rows == 1){
thData = doc.createTextNode(HFSSCdataTitle[t+1]);
th.appendChild(thData);
th.style.width = '15%';
}
th.style.textAlign = "center";
trHead.appendChild(th);
}
thead.appendChild(trHead);
}
// add a row containing total number of operation methods
var tr = doc.createElement("tr");
// allOpe: the total number of operation
// allEmergentOpe: the total number of emergent operation
// allChangeOpe: the total number of changing data operation
var allOpe = 0,
allEmergentOpe = 0,
allChangeOpe = 0;
console.log(HFSSCdataSource.length);
for(var i=0;i<HFSSCdataSource.length;i++){
allOpe += HFSSCdataSource[i][1];
allEmergentOpe += HFSSCdataSource[i][2];
allChangeOpe += HFSSCdataSource[i][3];
}
var data = new Array(4);
data[0] = doc.createTextNode(''),
data[1] = doc.createTextNode("合计"),
data[2] = doc.createTextNode(allOpe),
data[3] = doc.createTextNode(allEmergentOpe);
for(var t=0; t<data.length; t++){
var td = doc.createElement("td");
td.title = data[t];
td.appendChild(data[t]);
if(t==0){
td.style.padding = '8px';
}
if(t>0){
td.style.textAlign = "center";
}
tr.appendChild(td);
}
if(HFSSCdataSource.length != 0){
table.append | ******before detail a, now with td
// add data rows
for(var i=0;i<HFSSCdataSource.length;i++){
var tr = doc.createElement("tr");
var td = doc.createElement('td'),
span = doc.createElement('span');
span.innerHTML = '🔝';
td.appendChild(span);
td.style.width = '2%';
tr.appendChild(td);
tr.onclick = function(){
$(this).find('span').css('visibility', 'visible');
};
var tdIndexTemp = (HFSSCloadpage-1) * HFSSCnumPer + i + 1;
if(HFSSCTopList.indexOf(tdIndexTemp) != -1){
$(td).find('span').css('background-color', 'yellow');
$(td).find('span').css('visibility', 'visible');
}
//var param = { i: i, page: SSHDpage, numPer: SSHDnumPer };
var param = { tdIndexTemp: tdIndexTemp };
$(span).click(param, function(event){
//var ii = event.data.i,
// pp = event.data.page,
// np = event.data.numPer;
//var tdIndex = (pp-1) * np + ii + 1;
var tdIndex = event.data.tdIndexTemp;
//console.log('tdIndex', tdIndex, SSHDTopList.indexOf(tdIndex));
if(HFSSCTopList.indexOf(tdIndex) == -1){
$('#HFSSC_table_top').prepend($(this).parent().parent().clone(true));
$(this).css('background-color', 'yellow');
//$(this).css('visibility', 'hidden');
alert('成功置顶');
HFSSCTopList.push(tdIndex);
}else{
alert('该项已置顶');
}
});
for(var j=0;j<HFSSCdataSource[i].length;j++){
var data = doc.createTextNode(HFSSCdataSource[i][j]);
var td = doc.createElement("td");
if(j>0){
var a = doc.createElement("a");
td.title = HFSSCdataSource[i][j];
a.appendChild(data);
td.appendChild(data);
td.style.textAlign = "center";
}else{
td.title = HFSSCdataSource[i][j];
td.appendChild(data);
}
if(j==0){
td.style.width = '20%';
}else{
td.style.width = '15%';
}
tr.appendChild(td);
}
table.appendChild(tr);
}
HFSSCTotal.innerHTML = HFSSCTotalPage;
}
//��ҳ
var HFSSCbeforePage = doc.getElementById("HFSSCPageBefore"),
HFSSCnextPage = doc.getElementById("HFSSCPageNext"),
HFSSCPageNum = doc.getElementById("HFSSCPageNum");
HFSSCbeforePage.onclick = function(){
if(HFSSCloadpage==1){alert("已经是第一页");}
else{
HFSSCloadpage --;
//console.log(HFSSCloadpage);
var url2 = "http://123.206.134.34:8080/Medicals_war/recovery/morethan1hour?rowCount="+ HFSSCnumPer +"&page="+HFSSCloadpage+"&startTime="+HFSSCurlStartTime+"&endTime="+HFSSCurlEndTime;
$.ajax({
type: "get",
url: url2,
dataType: "json",
jsonp:"callback",
success: function (data) {
HFSSCdataSource = data.data;
HFSSCdataTitle = data.header;
HFSSCTotalPage = data.pageCount;
HFSSCPageNum.placeholder = HFSSCloadpage;
insertHFSSCTable();
},
error: function (XMLHttpRequest, textStatus, errorThrown) {
alert(errorThrown);
}
});
}
}
HFSSCnextPage.onclick = function(){
HFSSCloadpage ++;
var url2 = "http://123.206.134.34:8080/Medicals_war/recovery/morethan1hour?rowCount="+ HFSSCnumPer +"&page="+HFSSCloadpage+"&startTime="+HFSSCurlStartTime+"&endTime="+HFSSCurlEndTime;
if(HFSSCloadpage > HFSSCTotalPage){
HFSSCloadpage --;
alert('已经是最后一页');
}else {
$.ajax({
type: "get",
url: url2,
dataType: "json",
jsonp: "callback",
success: function (data) {
HFSSCdataSource = data.data;
HFSSCdataTitle = data.header;
HFSSCTotalPage = data.pageCount;
HFSSCPageNum.placeholder = HFSSCloadpage;
insertHFSSCTable();
},
error: function (XMLHttpRequest, textStatus, errorThrown) {
alert(errorThrown);
}
});
}
}
// 按日期搜索
HFSSCsubmitDate.onclick = function () {
getDate(HFSSCstartDate,HFSSCendDate);
HFSSCurlStartTime = getDate(HFSSCstartDate,HFSSCendDate)[0],
HFSSCurlEndTime = getDate(HFSSCstartDate,HFSSCendDate)[1];
HFSSCloadpage = 1;
var urlTime = "http://123.206.134.34:8080/Medicals_war/recovery/morethan1hour?rowCount="+ HFSSCnumPer +"&page="+HFSSCloadpage+"&startTime="+HFSSCurlStartTime+"&endTime="+HFSSCurlEndTime;
$.ajax({
type: "get",
url: urlTime,
dataType: "json",
jsonp:"callback",
| Child(tr);
}
// ********* | conditional_block |
huifushishichang.js | , textStatus, errorThrown) {
alert(errorThrown);
}
});
function insertHFSSCTable() | var thData;
if(rows == 0){
if(t == 0){
var pp = doc.createElement('div');
//span.innerHTML = '🔝';
th.appendChild(pp);
pp.style.width = '10%';
pp.style.float = 'left';
pp.style.padding = '8px';
//trHead.appendChild(th);
thData = doc.createTextNode(HFSSCdataTitle[t]);
th.appendChild(thData);
th.rowSpan = '2';
th.style.width = '22%';
th.style.verticalAlign = "middle";
th.style.borderRight = '1px #D6D6D6 solid';
th.id = "tdd";
}else if(t == 1){
thData = doc.createTextNode("超过一小时");
th.appendChild(thData);
th.colSpan = '2';
th.style.width = '30%';
}
}else if(rows == 1){
thData = doc.createTextNode(HFSSCdataTitle[t+1]);
th.appendChild(thData);
th.style.width = '15%';
}
th.style.textAlign = "center";
trHead.appendChild(th);
}
thead.appendChild(trHead);
}
// add a row containing total number of operation methods
var tr = doc.createElement("tr");
// allOpe: the total number of operation
// allEmergentOpe: the total number of emergent operation
// allChangeOpe: the total number of changing data operation
var allOpe = 0,
allEmergentOpe = 0,
allChangeOpe = 0;
console.log(HFSSCdataSource.length);
for(var i=0;i<HFSSCdataSource.length;i++){
allOpe += HFSSCdataSource[i][1];
allEmergentOpe += HFSSCdataSource[i][2];
allChangeOpe += HFSSCdataSource[i][3];
}
var data = new Array(4);
data[0] = doc.createTextNode(''),
data[1] = doc.createTextNode("合计"),
data[2] = doc.createTextNode(allOpe),
data[3] = doc.createTextNode(allEmergentOpe);
for(var t=0; t<data.length; t++){
var td = doc.createElement("td");
td.title = data[t];
td.appendChild(data[t]);
if(t==0){
td.style.padding = '8px';
}
if(t>0){
td.style.textAlign = "center";
}
tr.appendChild(td);
}
if(HFSSCdataSource.length != 0){
table.appendChild(tr);
}
// ***************before detail a, now with td
// add data rows
for(var i=0;i<HFSSCdataSource.length;i++){
var tr = doc.createElement("tr");
var td = doc.createElement('td'),
span = doc.createElement('span');
span.innerHTML = '🔝';
td.appendChild(span);
td.style.width = '2%';
tr.appendChild(td);
tr.onclick = function(){
$(this).find('span').css('visibility', 'visible');
};
var tdIndexTemp = (HFSSCloadpage-1) * HFSSCnumPer + i + 1;
if(HFSSCTopList.indexOf(tdIndexTemp) != -1){
$(td).find('span').css('background-color', 'yellow');
$(td).find('span').css('visibility', 'visible');
}
//var param = { i: i, page: SSHDpage, numPer: SSHDnumPer };
var param = { tdIndexTemp: tdIndexTemp };
$(span).click(param, function(event){
//var ii = event.data.i,
// pp = event.data.page,
// np = event.data.numPer;
//var tdIndex = (pp-1) * np + ii + 1;
var tdIndex = event.data.tdIndexTemp;
//console.log('tdIndex', tdIndex, SSHDTopList.indexOf(tdIndex));
if(HFSSCTopList.indexOf(tdIndex) == -1){
$('#HFSSC_table_top').prepend($(this).parent().parent().clone(true));
$(this).css('background-color', 'yellow');
//$(this).css('visibility', 'hidden');
alert('成功置顶');
HFSSCTopList.push(tdIndex);
}else{
alert('该项已置顶');
}
});
for(var j=0;j<HFSSCdataSource[i].length;j++){
var data = doc.createTextNode(HFSSCdataSource[i][j]);
var td = doc.createElement("td");
if(j>0){
var a = doc.createElement("a");
td.title = HFSSCdataSource[i][j];
a.appendChild(data);
td.appendChild(data);
td.style.textAlign = "center";
}else{
td.title = HFSSCdataSource[i][j];
td.appendChild(data);
}
if(j==0){
td.style.width = '20%';
}else{
td.style.width = '15%';
}
tr.appendChild(td);
}
table.appendChild(tr);
}
HFSSCTotal.innerHTML = HFSSCTotalPage;
}
//��ҳ
var HFSSCbeforePage = doc.g
etElementById("HFSSCPageBefore"),
HFSSCnextPage = doc.getElementById("HFSSCPageNext"),
HFSSCPageNum = doc.getElementById("HFSSCPageNum");
HFSSCbeforePage.onclick = function(){
if(HFSSCloadpage==1){alert("已经是第一页");}
else{
HFSSCloadpage --;
//console.log(HFSSCloadpage);
var url2 = "http://123.206.134.34:8080/Medicals_war/recovery/morethan1hour?rowCount="+ HFSSCnumPer +"&page="+HFSSCloadpage+"&startTime="+HFSSCurlStartTime+"&endTime="+HFSSCurlEndTime;
$.ajax({
type: "get",
url: url2,
dataType: "json",
jsonp:"callback",
success: function (data) {
HFSSCdataSource = data.data;
HFSSCdataTitle = data.header;
HFSSCTotalPage = data.pageCount;
HFSSCPageNum.placeholder = HFSSCloadpage;
insertHFSSCTable();
},
error: function (XMLHttpRequest, textStatus, errorThrown) {
alert(errorThrown);
}
});
}
}
HFSSCnextPage.onclick = function(){
HFSSCloadpage ++;
var url2 = "http://123.206.134.34:8080/Medicals_war/recovery/morethan1hour?rowCount="+ HFSSCnumPer +"&page="+HFSSCloadpage+"&startTime="+HFSSCurlStartTime+"&endTime="+HFSSCurlEndTime;
if(HFSSCloadpage > HFSSCTotalPage){
HFSSCloadpage --;
alert('已经是最后一页');
}else {
$.ajax({
type: "get",
url: url2,
dataType: "json",
jsonp: "callback",
success: function (data) {
HFSSCdataSource = data.data;
HFSSCdataTitle = data.header;
HFSSCTotalPage = data.pageCount;
HFSSCPageNum.placeholder = HFSSCloadpage;
insertHFSSCTable();
},
error: function (XMLHttpRequest, textStatus, errorThrown) {
alert(errorThrown);
}
});
}
}
// 按日期搜索
HFSSCsubmitDate.onclick = function () {
getDate(HFSSCstartDate,HFSSCendDate);
HFSSCurlStartTime = getDate(HFSSCstartDate,HFSSCendDate)[0],
HFSSCurlEndTime = getDate(HFSSCstartDate,HFSSCendDate)[1];
HFSSCloadpage = 1;
var urlTime = "http://123.206.134.34:8080/Medicals_war/recovery/morethan1hour?rowCount="+ HFSSCnumPer +"&page="+HFSSCloadpage+"&startTime="+HFSSCurlStartTime+"&endTime="+HFSSCurlEndTime;
$.ajax({
type: "get",
url: urlTime,
dataType: "json",
jsonp:" | {
var table = doc.getElementById("HFSSC_table");
var thead = doc.getElementById("HFSSC_table_head");
table.innerHTML = '';
thead.innerHTML = '';
//add table head
var top = doc.getElementById('HFSSC_table_top');
if(HFSSCloadpage != 1){
top.style.display = 'none';
}else{
top.style.display = 'block';
}
for(var rows=0; rows<2; rows++) {
var trHead = doc.createElement("tr");
for (var t = 0; t < HFSSCdataTitle.length-1; t++) {
var th = doc.createElement("th");
| identifier_body |
huifushishichang.js | '30%';
}
}else if(rows == 1){
thData = doc.createTextNode(HFSSCdataTitle[t+1]);
th.appendChild(thData);
th.style.width = '15%';
}
th.style.textAlign = "center";
trHead.appendChild(th);
}
thead.appendChild(trHead);
}
// add a row containing total number of operation methods
var tr = doc.createElement("tr");
// allOpe: the total number of operation
// allEmergentOpe: the total number of emergent operation
// allChangeOpe: the total number of changing data operation
var allOpe = 0,
allEmergentOpe = 0,
allChangeOpe = 0;
console.log(HFSSCdataSource.length);
for(var i=0;i<HFSSCdataSource.length;i++){
allOpe += HFSSCdataSource[i][1];
allEmergentOpe += HFSSCdataSource[i][2];
allChangeOpe += HFSSCdataSource[i][3];
}
var data = new Array(4);
data[0] = doc.createTextNode(''),
data[1] = doc.createTextNode("合计"),
data[2] = doc.createTextNode(allOpe),
data[3] = doc.createTextNode(allEmergentOpe);
for(var t=0; t<data.length; t++){
var td = doc.createElement("td");
td.title = data[t];
td.appendChild(data[t]);
if(t==0){
td.style.padding = '8px';
}
if(t>0){
td.style.textAlign = "center";
}
tr.appendChild(td);
}
if(HFSSCdataSource.length != 0){
table.appendChild(tr);
}
// ***************before detail a, now with td
// add data rows
for(var i=0;i<HFSSCdataSource.length;i++){
var tr = doc.createElement("tr");
var td = doc.createElement('td'),
span = doc.createElement('span');
span.innerHTML = '🔝';
td.appendChild(span);
td.style.width = '2%';
tr.appendChild(td);
tr.onclick = function(){
$(this).find('span').css('visibility', 'visible');
};
var tdIndexTemp = (HFSSCloadpage-1) * HFSSCnumPer + i + 1;
if(HFSSCTopList.indexOf(tdIndexTemp) != -1){
$(td).find('span').css('background-color', 'yellow');
$(td).find('span').css('visibility', 'visible');
}
//var param = { i: i, page: SSHDpage, numPer: SSHDnumPer };
var param = { tdIndexTemp: tdIndexTemp };
$(span).click(param, function(event){
//var ii = event.data.i,
// pp = event.data.page,
// np = event.data.numPer;
//var tdIndex = (pp-1) * np + ii + 1;
var tdIndex = event.data.tdIndexTemp;
//console.log('tdIndex', tdIndex, SSHDTopList.indexOf(tdIndex));
if(HFSSCTopList.indexOf(tdIndex) == -1){
$('#HFSSC_table_top').prepend($(this).parent().parent().clone(true));
$(this).css('background-color', 'yellow');
//$(this).css('visibility', 'hidden');
alert('成功置顶');
HFSSCTopList.push(tdIndex);
}else{
alert('该项已置顶');
}
});
for(var j=0;j<HFSSCdataSource[i].length;j++){
var data = doc.createTextNode(HFSSCdataSource[i][j]);
var td = doc.createElement("td");
if(j>0){
var a = doc.createElement("a");
td.title = HFSSCdataSource[i][j];
a.appendChild(data);
td.appendChild(data);
td.style.textAlign = "center";
}else{
td.title = HFSSCdataSource[i][j];
td.appendChild(data);
}
if(j==0){
td.style.width = '20%';
}else{
td.style.width = '15%';
}
tr.appendChild(td);
}
table.appendChild(tr);
}
HFSSCTotal.innerHTML = HFSSCTotalPage;
}
//��ҳ
var HFSSCbeforePage = doc.getElementById("HFSSCPageBefore"),
HFSSCnextPage = doc.getElementById("HFSSCPageNext"),
HFSSCPageNum = doc.getElementById("HFSSCPageNum");
HFSSCbeforePage.onclick = function(){
if(HFSSCloadpage==1){alert("已经是第一页");}
else{
HFSSCloadpage --;
//console.log(HFSSCloadpage);
var url2 = "http://123.206.134.34:8080/Medicals_war/recovery/morethan1hour?rowCount="+ HFSSCnumPer +"&page="+HFSSCloadpage+"&startTime="+HFSSCurlStartTime+"&endTime="+HFSSCurlEndTime;
$.ajax({
type: "get",
url: url2,
dataType: "json",
jsonp:"callback",
success: function (data) {
HFSSCdataSource = data.data;
HFSSCdataTitle = data.header;
HFSSCTotalPage = data.pageCount;
HFSSCPageNum.placeholder = HFSSCloadpage;
insertHFSSCTable();
},
error: function (XMLHttpRequest, textStatus, errorThrown) {
alert(errorThrown);
}
});
}
}
HFSSCnextPage.onclick = function(){
HFSSCloadpage ++;
var url2 = "http://123.206.134.34:8080/Medicals_war/recovery/morethan1hour?rowCount="+ HFSSCnumPer +"&page="+HFSSCloadpage+"&startTime="+HFSSCurlStartTime+"&endTime="+HFSSCurlEndTime;
if(HFSSCloadpage > HFSSCTotalPage){
HFSSCloadpage --;
alert('已经是最后一页');
}else {
$.ajax({
type: "get",
url: url2,
dataType: "json",
jsonp: "callback",
success: function (data) {
HFSSCdataSource = data.data;
HFSSCdataTitle = data.header;
HFSSCTotalPage = data.pageCount;
HFSSCPageNum.placeholder = HFSSCloadpage;
insertHFSSCTable();
},
error: function (XMLHttpRequest, textStatus, errorThrown) {
alert(errorThrown);
}
});
}
}
// 按日期搜索
HFSSCsubmitDate.onclick = function () {
getDate(HFSSCstartDate,HFSSCendDate);
HFSSCurlStartTime = getDate(HFSSCstartDate,HFSSCendDate)[0],
HFSSCurlEndTime = getDate(HFSSCstartDate,HFSSCendDate)[1];
HFSSCloadpage = 1;
var urlTime = "http://123.206.134.34:8080/Medicals_war/recovery/morethan1hour?rowCount="+ HFSSCnumPer +"&page="+HFSSCloadpage+"&startTime="+HFSSCurlStartTime+"&endTime="+HFSSCurlEndTime;
$.ajax({
type: "get",
url: urlTime,
dataType: "json",
jsonp:"callback",
success: function (data) {
HFSSCdataSource = data.data;
HFSSCdataTitle = data.header;
HFSSCTotalPage = data.pageCount;
//console.log(HFSSCdataSource);
doc.getElementById('HFSSC_table_top').innerHTML = '';
HFSSCPageNum.placeholder = 1;
HFSSCTopList.length = 0;
insertHFSSCTable();
},
error: function (XMLHttpRequest, textStatus, errorThrown) {
alert(errorThrown);
}
});
}
function isInteger(obj) {
return typeof obj === 'number' && obj%1 === 0 && obj > 0
}
HFSSCconfirm.onclick = function(){
tempPage = HFSSCloadpage;
HFSSCloadpage = parseFloat(HFSSCassignPage.value);
if(isInteger(HFSSCloadpage)){
console.log(HFSSCloadpage);
if(HFSSCloadpage <= HFSSCTotalPage){
var url2 = "http://123.206.134.34:8080/Medicals_war/recovery/morethan1hour?rowCount="+ HFSSCnumPer +"&page="+HFSSCloadpage+"&startTime="+HFSSCurlStartTime+"&endTime="+HFSSCurlEndTime;
console.log(url2);
$.ajax({
type: "get",
url: url2,
dataType: "json",
jsonp:"callback",
| success: function (data) {
HFSSCdataSource = data.data;
HFSSCdataTitle = data.header;
| random_line_split |
|
huifushishichang.js | ++) {
var th = doc.createElement("th");
var thData;
if(rows == 0){
if(t == 0){
var pp = doc.createElement('div');
//span.innerHTML = '🔝';
th.appendChild(pp);
pp.style.width = '10%';
pp.style.float = 'left';
pp.style.padding = '8px';
//trHead.appendChild(th);
thData = doc.createTextNode(HFSSCdataTitle[t]);
th.appendChild(thData);
th.rowSpan = '2';
th.style.width = '22%';
th.style.verticalAlign = "middle";
th.style.borderRight = '1px #D6D6D6 solid';
th.id = "tdd";
}else if(t == 1){
thData = doc.createTextNode("超过一小时");
th.appendChild(thData);
th.colSpan = '2';
th.style.width = '30%';
}
}else if(rows == 1){
thData = doc.createTextNode(HFSSCdataTitle[t+1]);
th.appendChild(thData);
th.style.width = '15%';
}
th.style.textAlign = "center";
trHead.appendChild(th);
}
thead.appendChild(trHead);
}
// add a row containing total number of operation methods
var tr = doc.createElement("tr");
// allOpe: the total number of operation
// allEmergentOpe: the total number of emergent operation
// allChangeOpe: the total number of changing data operation
var allOpe = 0,
allEmergentOpe = 0,
allChangeOpe = 0;
console.log(HFSSCdataSource.length);
for(var i=0;i<HFSSCdataSource.length;i++){
allOpe += HFSSCdataSource[i][1];
allEmergentOpe += HFSSCdataSource[i][2];
allChangeOpe += HFSSCdataSource[i][3];
}
var data = new Array(4);
data[0] = doc.createTextNode(''),
data[1] = doc.createTextNode("合计"),
data[2] = doc.createTextNode(allOpe),
data[3] = doc.createTextNode(allEmergentOpe);
for(var t=0; t<data.length; t++){
var td = doc.createElement("td");
td.title = data[t];
td.appendChild(data[t]);
if(t==0){
td.style.padding = '8px';
}
if(t>0){
td.style.textAlign = "center";
}
tr.appendChild(td);
}
if(HFSSCdataSource.length != 0){
table.appendChild(tr);
}
// ***************before detail a, now with td
// add data rows
for(var i=0;i<HFSSCdataSource.length;i++){
var tr = doc.createElement("tr");
var td = doc.createElement('td'),
span = doc.createElement('span');
span.innerHTML = '🔝';
td.appendChild(span);
td.style.width = '2%';
tr.appendChild(td);
tr.onclick = function(){
$(this).find('span').css('visibility', 'visible');
};
var tdIndexTemp = (HFSSCloadpage-1) * HFSSCnumPer + i + 1;
if(HFSSCTopList.indexOf(tdIndexTemp) != -1){
$(td).find('span').css('background-color', 'yellow');
$(td).find('span').css('visibility', 'visible');
}
//var param = { i: i, page: SSHDpage, numPer: SSHDnumPer };
var param = { tdIndexTemp: tdIndexTemp };
$(span).click(param, function(event){
//var ii = event.data.i,
// pp = event.data.page,
// np = event.data.numPer;
//var tdIndex = (pp-1) * np + ii + 1;
var tdIndex = event.data.tdIndexTemp;
//console.log('tdIndex', tdIndex, SSHDTopList.indexOf(tdIndex));
if(HFSSCTopList.indexOf(tdIndex) == -1){
$('#HFSSC_table_top').prepend($(this).parent().parent().clone(true));
$(this).css('background-color', 'yellow');
//$(this).css('visibility', 'hidden');
alert('成功置顶');
HFSSCTopList.push(tdIndex);
}else{
alert('该项已置顶');
}
});
for(var j=0;j<HFSSCdataSource[i].length;j++){
var data = doc.createTextNode(HFSSCdataSource[i][j]);
var td = doc.createElement("td");
if(j>0){
var a = doc.createElement("a");
td.title = HFSSCdataSource[i][j];
a.appendChild(data);
td.appendChild(data);
td.style.textAlign = "center";
}else{
td.title = HFSSCdataSource[i][j];
td.appendChild(data);
}
if(j==0){
td.style.width = '20%';
}else{
td.style.width = '15%';
}
tr.appendChild(td);
}
table.appendChild(tr);
}
HFSSCTotal.innerHTML = HFSSCTotalPage;
}
//��ҳ
var HFSSCbeforePage = doc.getElementById("HFSSCPageBefore"),
HFSSCnextPage = doc.getElementById("HFSSCPageNext"),
HFSSCPageNum = doc.getElementById("HFSSCPageNum");
HFSSCbeforePage.onclick = function(){
if(HFSSCloadpage==1){alert("已经是第一页");}
else{
HFSSCloadpage --;
//console.log(HFSSCloadpage);
var url2 = "http://123.206.134.34:8080/Medicals_war/recovery/morethan1hour?rowCount="+ HFSSCnumPer +"&page="+HFSSCloadpage+"&startTime="+HFSSCurlStartTime+"&endTime="+HFSSCurlEndTime;
$.ajax({
type: "get",
url: url2,
dataType: "json",
jsonp:"callback",
success: function (data) {
HFSSCdataSource = data.data;
HFSSCdataTitle = data.header;
HFSSCTotalPage = data.pageCount;
HFSSCPageNum.placeholder = HFSSCloadpage;
insertHFSSCTable();
},
error: function (XMLHttpRequest, textStatus, errorThrown) {
alert(errorThrown);
}
});
}
}
HFSSCnextPage.onclick = function(){
HFSSCloadpage ++;
var url2 = "http://123.206.134.34:8080/Medicals_war/recovery/morethan1hour?rowCount="+ HFSSCnumPer +"&page="+HFSSCloadpage+"&startTime="+HFSSCurlStartTime+"&endTime="+HFSSCurlEndTime;
if(HFSSCloadpage > HFSSCTotalPage){
HFSSCloadpage --;
alert('已经是最后一页');
}else {
$.ajax({
type: "get",
url: url2,
dataType: "json",
jsonp: "callback",
success: function (data) {
HFSSCdataSource = data.data;
HFSSCdataTitle = data.header;
HFSSCTotalPage = data.pageCount;
HFSSCPageNum.placeholder = HFSSCloadpage;
insertHFSSCTable();
},
error: function (XMLHttpRequest, textStatus, errorThrown) {
alert(errorThrown);
}
});
}
}
// 按日期搜索
HFSSCsubmitDate.onclick = function () {
getDate(HFSSCstartDate,HFSSCendDate);
HFSSCurlStartTime = getDate(HFSSCstartDate,HFSSCendDate)[0],
HFSSCurlEndTime = getDate(HFSSCstartDate,HFSSCendDate)[1];
HFSSCloadpage = 1;
var urlTime = "http://123.206.134.34:8080/Medicals_war/recovery/morethan1hour?rowCount="+ HFSSCnumPer +"&page="+HFSSCloadpage+"&startTime="+HFSSCurlStartTime+"&endTime="+HFSSCurlEndTime;
$.ajax({
type: "get",
url: urlTime,
dataType: "json",
jsonp:"callback",
success: function (data) {
HFSSCdataSource = data.data;
HFSSCdataTitle = data.header;
HFSSCTotalPage = data.pageCount;
//console.log(HFSSCdataSource);
doc.getElementById('HFSSC_table_top').innerHTML = '';
HFSSCPageNum.placeholder = 1;
HFSSCTopList.length = 0;
insertHFSSCTable();
},
error: function (XMLHttpRequest, textStatus, errorThrown) {
alert(errorThrown);
}
});
}
function isInteger(obj) {
return typeof obj === 'number' && obj%1 === 0 && obj > 0
}
|
HFSSCc | identifier_name |
|
device-stats.js | = 17;
var magnetic_zId = 18;
var gyroscope_xId = 19;
var gyroscope_yId = 20;
var gyroscope_zId = 21;
var lightId = 22;
var pressureId = 23;
var proximityId = 24;
var gravity_xId = 25;
var gravity_yId = 26;
var gravity_zId = 27;
var rotation_xId = 28;
var rotation_yId = 29;
var rotation_zId = 30;
var batteryData = [];
var lightData = [];
var pressureData = [];
var proximityData = [];
var accelerometer_xData = [];
var accelerometer_yData = [];
var accelerometer_zData = [];
var magnetic_xData = [];
var magnetic_yData = [];
var magnetic_zData = [];
var gyroscope_xData = [];
var gyroscope_yData = [];
var gyroscope_zData = [];
var gravity_xData = [];
var gravity_yData = [];
var gravity_zData = [];
var rotation_xData = [];
var rotation_yData = [];
var rotation_zData = [];
var graphMap = {};
var graphSettingsMap = {};
var palette = new Rickshaw.Color.Palette({scheme: "munin"});
var elemTop;
$(window).load(function () {
graphMap["battery"]=lineGraph("battery", batteryData);
graphMap["light"]=lineGraph("light", lightData);
graphMap["pressure"]=lineGraph("pressure", pressureData);
graphMap["proximity"]=lineGraph("proximity", proximityData);
graphMap["accelerometer"]=threeDlineGraph("accelerometer", accelerometer_xData, accelerometer_yData, accelerometer_zData);
graphMap["magnetic"]=threeDlineGraph("magnetic", magnetic_xData, magnetic_yData, magnetic_zData);
graphMap["gyroscope"]=threeDlineGraph("gyroscope", gyroscope_xData, gyroscope_yData, gyroscope_zData);
graphMap["gravity"]=threeDlineGraph("gravity", gravity_xData, gravity_yData, gravity_zData);
graphMap["rotation"]=threeDlineGraph("rotation", rotation_xData, rotation_yData, rotation_zData);
var websocketUrl = $("#stat-section").data("websocketurl");
connect(websocketUrl)
});
window.onbeforeunload = function() {
disconnect();
};
function threeDlineGraph(type, xChartData, yChartData, zChartData) {
var tNow = new Date().getTime() / 1000;
for (var i = 0; i < 30; i++) {
xChartData.push({
x: tNow - (30 - i) * 15,
y: parseFloat(0)
});
yChartData.push({
x: tNow - (30 - i) * 15,
y: parseFloat(0)
});
zChartData.push({
x: tNow - (30 - i) * 15,
y: parseFloat(0)
});
}
var $elem = $("#chart-" + type);
var graph = new Rickshaw.Graph({
element: $elem[0],
width: $elem.width() - 100,
height: 300,
renderer: "line",
interpolation: "linear",
padding: {top: 0.2, left: 0.0, right: 0.0, bottom: 0.2},
xScale: d3.time.scale(),
series: [
{'color': palette.color(), 'data': xChartData, 'name': "x - " + type},
{'color': palette.color(), 'data': yChartData, 'name': "y - " + type},
{'color': palette.color(), 'data': zChartData, 'name': "z - " + type}
]
});
var xAxis = new Rickshaw.Graph.Axis.Time({
graph: graph
});
xAxis.render();
new Rickshaw.Graph.Axis.Y({
graph: graph,
orientation: 'left',
height: 300,
tickFormat: Rickshaw.Fixtures.Number.formatKMBT,
element: document.getElementById("y-axis-"+type)
});
new Rickshaw.Graph.Legend({
graph: graph,
element: document.getElementById('legend-' + type)
});
var detail = new Rickshaw.Graph.HoverDetail({
graph: graph
});
new Rickshaw.Graph.HoverDetail({
graph: graph,
formatter: function (series, x, y) {
var date = '<span class="date">' + moment(x * 1000).format('Do MMM YYYY h:mm:ss a') + '</span>';
var swatch = '<span class="detail_swatch" style="background-color: ' + series.color + '"></span>';
return swatch + series.name + ": " + parseInt(y) + '<br>' + date;
}
});
graph.render();
return graph;
}
function lineGraph(type, chartData) {
var tNow = new Date().getTime() / 1000;
for (var i = 0; i < 30; i++) {
chartData.push({
x: tNow - (30 - i) * 15,
y: parseFloat(0)
});
}
var $elem = $("#chart-" + type);
var graph = new Rickshaw.Graph({
element: $elem[0],
width: $elem.width() - 100,
height: 300,
renderer: "line",
interpolation: "linear",
padding: {top: 0.2, left: 0.0, right: 0.0, bottom: 0.2},
xScale: d3.time.scale(),
series: [{
'color': palette.color(),
'data': chartData,
'name': type
}]
});
var xAxis = new Rickshaw.Graph.Axis.Time({
graph: graph
});
xAxis.render();
new Rickshaw.Graph.Axis.Y({
graph: graph,
orientation: 'left',
height: 300,
tickFormat: Rickshaw.Fixtures.Number.formatKMBT,
element: document.getElementById('y-axis-'+type)
});
new Rickshaw.Graph.Legend({
graph: graph,
element: document.getElementById('legend-' + type)
});
new Rickshaw.Graph.HoverDetail({
graph: graph,
formatter: function (series, x, y) {
var date = '<span class="date">' + moment(x * 1000).format('Do MMM YYYY h:mm:ss a') + '</span>';
var swatch = '<span class="detail_swatch" style="background-color: ' + series.color + '"></span>';
return swatch + series.name + ": " + parseInt(y) + '<br>' + date;
}
});
graph.render();
return graph;
}
//websocket connection
function connect(target) {
if ('WebSocket' in window) {
ws = new WebSocket(target);
} else if ('MozWebSocket' in window) {
ws = new MozWebSocket(target);
} else |
if (ws) {
ws.onmessage = function (event) {
var dataPoint = JSON.parse(event.data);
if (dataPoint) {
var time = parseInt(dataPoint[4]) / 1000;
switch (dataPoint[typeId]) {
case "battery":
graphUpdate(batteryData, time, dataPoint[batteryId]);
graphMap["battery"].update();
break;
case "light":
graphUpdate(lightData, time, dataPoint[lightId]);
graphMap["light"].update();
break;
case "pressure":
graphUpdate(pressureData, time, dataPoint[pressureId]);
graphMap["pressure"].update();
break;
case "proximity":
graphUpdate(proximityData, time, dataPoint[proximityId]);
graphMap["proximity"].update();
break;
case "accelerometer":
graphUpdate(accelerometer_xData, time, dataPoint[accelerometer_xId]);
graphUpdate(accelerometer_yData, time, dataPoint[accelerometer_yId]);
graphUpdate(accelerometer_zData, time, dataPoint[accelerometer_zId]);
graphMap["accelerometer"].update();
break;
case "magnetic":
graphUpdate(magnetic_xData, time, dataPoint[magnetic_xId]);
graphUpdate(magnetic_yData, time, dataPoint[magnetic_yId]);
graphUpdate(magnetic_zData, time, dataPoint[magnetic_zId]);
graphMap["magnetic"].update();
break;
case "gyroscope":
graphUpdate(gyroscope_xData, time, dataPoint[gyroscope_xId]);
graphUpdate(gyroscope_yData, time, dataPoint[gyroscope_yId]);
graphUpdate(gyroscope_zData, time, dataPoint[gyroscope_zId]);
graphMap["gyroscope"].update();
break;
case "rotation":
graphUpdate(magnetic_xData, time, dataPoint[rotation_xId]);
graphUpdate(magnetic_yData, time, dataPoint[rotation_yId]);
graphUpdate(magnetic_zData, time, dataPoint[rotation_zId]);
graphMap["rotation"].update();
break;
case "gravity":
graphUpdate(gr | {
console.log('WebSocket is not supported by this browser.');
} | conditional_block |
device-stats.js | Id = 17;
var magnetic_zId = 18;
var gyroscope_xId = 19;
var gyroscope_yId = 20;
var gyroscope_zId = 21;
var lightId = 22;
var pressureId = 23;
var proximityId = 24;
var gravity_xId = 25;
var gravity_yId = 26;
var gravity_zId = 27;
var rotation_xId = 28;
var rotation_yId = 29;
var rotation_zId = 30;
var batteryData = [];
var lightData = [];
var pressureData = [];
var proximityData = [];
var accelerometer_xData = [];
var accelerometer_yData = [];
var accelerometer_zData = [];
var magnetic_xData = [];
var magnetic_yData = [];
var magnetic_zData = [];
var gyroscope_xData = [];
var gyroscope_yData = [];
var gyroscope_zData = [];
var gravity_xData = [];
var gravity_yData = [];
var gravity_zData = [];
|
var graphMap = {};
var graphSettingsMap = {};
var palette = new Rickshaw.Color.Palette({scheme: "munin"});
var elemTop;
$(window).load(function () {
graphMap["battery"]=lineGraph("battery", batteryData);
graphMap["light"]=lineGraph("light", lightData);
graphMap["pressure"]=lineGraph("pressure", pressureData);
graphMap["proximity"]=lineGraph("proximity", proximityData);
graphMap["accelerometer"]=threeDlineGraph("accelerometer", accelerometer_xData, accelerometer_yData, accelerometer_zData);
graphMap["magnetic"]=threeDlineGraph("magnetic", magnetic_xData, magnetic_yData, magnetic_zData);
graphMap["gyroscope"]=threeDlineGraph("gyroscope", gyroscope_xData, gyroscope_yData, gyroscope_zData);
graphMap["gravity"]=threeDlineGraph("gravity", gravity_xData, gravity_yData, gravity_zData);
graphMap["rotation"]=threeDlineGraph("rotation", rotation_xData, rotation_yData, rotation_zData);
var websocketUrl = $("#stat-section").data("websocketurl");
connect(websocketUrl)
});
window.onbeforeunload = function() {
disconnect();
};
function threeDlineGraph(type, xChartData, yChartData, zChartData) {
var tNow = new Date().getTime() / 1000;
for (var i = 0; i < 30; i++) {
xChartData.push({
x: tNow - (30 - i) * 15,
y: parseFloat(0)
});
yChartData.push({
x: tNow - (30 - i) * 15,
y: parseFloat(0)
});
zChartData.push({
x: tNow - (30 - i) * 15,
y: parseFloat(0)
});
}
var $elem = $("#chart-" + type);
var graph = new Rickshaw.Graph({
element: $elem[0],
width: $elem.width() - 100,
height: 300,
renderer: "line",
interpolation: "linear",
padding: {top: 0.2, left: 0.0, right: 0.0, bottom: 0.2},
xScale: d3.time.scale(),
series: [
{'color': palette.color(), 'data': xChartData, 'name': "x - " + type},
{'color': palette.color(), 'data': yChartData, 'name': "y - " + type},
{'color': palette.color(), 'data': zChartData, 'name': "z - " + type}
]
});
var xAxis = new Rickshaw.Graph.Axis.Time({
graph: graph
});
xAxis.render();
new Rickshaw.Graph.Axis.Y({
graph: graph,
orientation: 'left',
height: 300,
tickFormat: Rickshaw.Fixtures.Number.formatKMBT,
element: document.getElementById("y-axis-"+type)
});
new Rickshaw.Graph.Legend({
graph: graph,
element: document.getElementById('legend-' + type)
});
var detail = new Rickshaw.Graph.HoverDetail({
graph: graph
});
new Rickshaw.Graph.HoverDetail({
graph: graph,
formatter: function (series, x, y) {
var date = '<span class="date">' + moment(x * 1000).format('Do MMM YYYY h:mm:ss a') + '</span>';
var swatch = '<span class="detail_swatch" style="background-color: ' + series.color + '"></span>';
return swatch + series.name + ": " + parseInt(y) + '<br>' + date;
}
});
graph.render();
return graph;
}
function lineGraph(type, chartData) {
var tNow = new Date().getTime() / 1000;
for (var i = 0; i < 30; i++) {
chartData.push({
x: tNow - (30 - i) * 15,
y: parseFloat(0)
});
}
var $elem = $("#chart-" + type);
var graph = new Rickshaw.Graph({
element: $elem[0],
width: $elem.width() - 100,
height: 300,
renderer: "line",
interpolation: "linear",
padding: {top: 0.2, left: 0.0, right: 0.0, bottom: 0.2},
xScale: d3.time.scale(),
series: [{
'color': palette.color(),
'data': chartData,
'name': type
}]
});
var xAxis = new Rickshaw.Graph.Axis.Time({
graph: graph
});
xAxis.render();
new Rickshaw.Graph.Axis.Y({
graph: graph,
orientation: 'left',
height: 300,
tickFormat: Rickshaw.Fixtures.Number.formatKMBT,
element: document.getElementById('y-axis-'+type)
});
new Rickshaw.Graph.Legend({
graph: graph,
element: document.getElementById('legend-' + type)
});
new Rickshaw.Graph.HoverDetail({
graph: graph,
formatter: function (series, x, y) {
var date = '<span class="date">' + moment(x * 1000).format('Do MMM YYYY h:mm:ss a') + '</span>';
var swatch = '<span class="detail_swatch" style="background-color: ' + series.color + '"></span>';
return swatch + series.name + ": " + parseInt(y) + '<br>' + date;
}
});
graph.render();
return graph;
}
//websocket connection
function connect(target) {
if ('WebSocket' in window) {
ws = new WebSocket(target);
} else if ('MozWebSocket' in window) {
ws = new MozWebSocket(target);
} else {
console.log('WebSocket is not supported by this browser.');
}
if (ws) {
ws.onmessage = function (event) {
var dataPoint = JSON.parse(event.data);
if (dataPoint) {
var time = parseInt(dataPoint[4]) / 1000;
switch (dataPoint[typeId]) {
case "battery":
graphUpdate(batteryData, time, dataPoint[batteryId]);
graphMap["battery"].update();
break;
case "light":
graphUpdate(lightData, time, dataPoint[lightId]);
graphMap["light"].update();
break;
case "pressure":
graphUpdate(pressureData, time, dataPoint[pressureId]);
graphMap["pressure"].update();
break;
case "proximity":
graphUpdate(proximityData, time, dataPoint[proximityId]);
graphMap["proximity"].update();
break;
case "accelerometer":
graphUpdate(accelerometer_xData, time, dataPoint[accelerometer_xId]);
graphUpdate(accelerometer_yData, time, dataPoint[accelerometer_yId]);
graphUpdate(accelerometer_zData, time, dataPoint[accelerometer_zId]);
graphMap["accelerometer"].update();
break;
case "magnetic":
graphUpdate(magnetic_xData, time, dataPoint[magnetic_xId]);
graphUpdate(magnetic_yData, time, dataPoint[magnetic_yId]);
graphUpdate(magnetic_zData, time, dataPoint[magnetic_zId]);
graphMap["magnetic"].update();
break;
case "gyroscope":
graphUpdate(gyroscope_xData, time, dataPoint[gyroscope_xId]);
graphUpdate(gyroscope_yData, time, dataPoint[gyroscope_yId]);
graphUpdate(gyroscope_zData, time, dataPoint[gyroscope_zId]);
graphMap["gyroscope"].update();
break;
case "rotation":
graphUpdate(magnetic_xData, time, dataPoint[rotation_xId]);
graphUpdate(magnetic_yData, time, dataPoint[rotation_yId]);
graphUpdate(magnetic_zData, time, dataPoint[rotation_zId]);
graphMap["rotation"].update();
break;
case "gravity":
graphUpdate(gr | var rotation_xData = [];
var rotation_yData = [];
var rotation_zData = []; | random_line_split |
device-stats.js | Id = 17;
var magnetic_zId = 18;
var gyroscope_xId = 19;
var gyroscope_yId = 20;
var gyroscope_zId = 21;
var lightId = 22;
var pressureId = 23;
var proximityId = 24;
var gravity_xId = 25;
var gravity_yId = 26;
var gravity_zId = 27;
var rotation_xId = 28;
var rotation_yId = 29;
var rotation_zId = 30;
var batteryData = [];
var lightData = [];
var pressureData = [];
var proximityData = [];
var accelerometer_xData = [];
var accelerometer_yData = [];
var accelerometer_zData = [];
var magnetic_xData = [];
var magnetic_yData = [];
var magnetic_zData = [];
var gyroscope_xData = [];
var gyroscope_yData = [];
var gyroscope_zData = [];
var gravity_xData = [];
var gravity_yData = [];
var gravity_zData = [];
var rotation_xData = [];
var rotation_yData = [];
var rotation_zData = [];
var graphMap = {};
var graphSettingsMap = {};
var palette = new Rickshaw.Color.Palette({scheme: "munin"});
var elemTop;
$(window).load(function () {
graphMap["battery"]=lineGraph("battery", batteryData);
graphMap["light"]=lineGraph("light", lightData);
graphMap["pressure"]=lineGraph("pressure", pressureData);
graphMap["proximity"]=lineGraph("proximity", proximityData);
graphMap["accelerometer"]=threeDlineGraph("accelerometer", accelerometer_xData, accelerometer_yData, accelerometer_zData);
graphMap["magnetic"]=threeDlineGraph("magnetic", magnetic_xData, magnetic_yData, magnetic_zData);
graphMap["gyroscope"]=threeDlineGraph("gyroscope", gyroscope_xData, gyroscope_yData, gyroscope_zData);
graphMap["gravity"]=threeDlineGraph("gravity", gravity_xData, gravity_yData, gravity_zData);
graphMap["rotation"]=threeDlineGraph("rotation", rotation_xData, rotation_yData, rotation_zData);
var websocketUrl = $("#stat-section").data("websocketurl");
connect(websocketUrl)
});
window.onbeforeunload = function() {
disconnect();
};
function threeDlineGraph(type, xChartData, yChartData, zChartData) | element: $elem[0],
width: $elem.width() - 100,
height: 300,
renderer: "line",
interpolation: "linear",
padding: {top: 0.2, left: 0.0, right: 0.0, bottom: 0.2},
xScale: d3.time.scale(),
series: [
{'color': palette.color(), 'data': xChartData, 'name': "x - " + type},
{'color': palette.color(), 'data': yChartData, 'name': "y - " + type},
{'color': palette.color(), 'data': zChartData, 'name': "z - " + type}
]
});
var xAxis = new Rickshaw.Graph.Axis.Time({
graph: graph
});
xAxis.render();
new Rickshaw.Graph.Axis.Y({
graph: graph,
orientation: 'left',
height: 300,
tickFormat: Rickshaw.Fixtures.Number.formatKMBT,
element: document.getElementById("y-axis-"+type)
});
new Rickshaw.Graph.Legend({
graph: graph,
element: document.getElementById('legend-' + type)
});
var detail = new Rickshaw.Graph.HoverDetail({
graph: graph
});
new Rickshaw.Graph.HoverDetail({
graph: graph,
formatter: function (series, x, y) {
var date = '<span class="date">' + moment(x * 1000).format('Do MMM YYYY h:mm:ss a') + '</span>';
var swatch = '<span class="detail_swatch" style="background-color: ' + series.color + '"></span>';
return swatch + series.name + ": " + parseInt(y) + '<br>' + date;
}
});
graph.render();
return graph;
}
function lineGraph(type, chartData) {
var tNow = new Date().getTime() / 1000;
for (var i = 0; i < 30; i++) {
chartData.push({
x: tNow - (30 - i) * 15,
y: parseFloat(0)
});
}
var $elem = $("#chart-" + type);
var graph = new Rickshaw.Graph({
element: $elem[0],
width: $elem.width() - 100,
height: 300,
renderer: "line",
interpolation: "linear",
padding: {top: 0.2, left: 0.0, right: 0.0, bottom: 0.2},
xScale: d3.time.scale(),
series: [{
'color': palette.color(),
'data': chartData,
'name': type
}]
});
var xAxis = new Rickshaw.Graph.Axis.Time({
graph: graph
});
xAxis.render();
new Rickshaw.Graph.Axis.Y({
graph: graph,
orientation: 'left',
height: 300,
tickFormat: Rickshaw.Fixtures.Number.formatKMBT,
element: document.getElementById('y-axis-'+type)
});
new Rickshaw.Graph.Legend({
graph: graph,
element: document.getElementById('legend-' + type)
});
new Rickshaw.Graph.HoverDetail({
graph: graph,
formatter: function (series, x, y) {
var date = '<span class="date">' + moment(x * 1000).format('Do MMM YYYY h:mm:ss a') + '</span>';
var swatch = '<span class="detail_swatch" style="background-color: ' + series.color + '"></span>';
return swatch + series.name + ": " + parseInt(y) + '<br>' + date;
}
});
graph.render();
return graph;
}
//websocket connection
function connect(target) {
if ('WebSocket' in window) {
ws = new WebSocket(target);
} else if ('MozWebSocket' in window) {
ws = new MozWebSocket(target);
} else {
console.log('WebSocket is not supported by this browser.');
}
if (ws) {
ws.onmessage = function (event) {
var dataPoint = JSON.parse(event.data);
if (dataPoint) {
var time = parseInt(dataPoint[4]) / 1000;
switch (dataPoint[typeId]) {
case "battery":
graphUpdate(batteryData, time, dataPoint[batteryId]);
graphMap["battery"].update();
break;
case "light":
graphUpdate(lightData, time, dataPoint[lightId]);
graphMap["light"].update();
break;
case "pressure":
graphUpdate(pressureData, time, dataPoint[pressureId]);
graphMap["pressure"].update();
break;
case "proximity":
graphUpdate(proximityData, time, dataPoint[proximityId]);
graphMap["proximity"].update();
break;
case "accelerometer":
graphUpdate(accelerometer_xData, time, dataPoint[accelerometer_xId]);
graphUpdate(accelerometer_yData, time, dataPoint[accelerometer_yId]);
graphUpdate(accelerometer_zData, time, dataPoint[accelerometer_zId]);
graphMap["accelerometer"].update();
break;
case "magnetic":
graphUpdate(magnetic_xData, time, dataPoint[magnetic_xId]);
graphUpdate(magnetic_yData, time, dataPoint[magnetic_yId]);
graphUpdate(magnetic_zData, time, dataPoint[magnetic_zId]);
graphMap["magnetic"].update();
break;
case "gyroscope":
graphUpdate(gyroscope_xData, time, dataPoint[gyroscope_xId]);
graphUpdate(gyroscope_yData, time, dataPoint[gyroscope_yId]);
graphUpdate(gyroscope_zData, time, dataPoint[gyroscope_zId]);
graphMap["gyroscope"].update();
break;
case "rotation":
graphUpdate(magnetic_xData, time, dataPoint[rotation_xId]);
graphUpdate(magnetic_yData, time, dataPoint[rotation_yId]);
graphUpdate(magnetic_zData, time, dataPoint[rotation_zId]);
graphMap["rotation"].update();
break;
case "gravity":
graphUpdate(gr | {
var tNow = new Date().getTime() / 1000;
for (var i = 0; i < 30; i++) {
xChartData.push({
x: tNow - (30 - i) * 15,
y: parseFloat(0)
});
yChartData.push({
x: tNow - (30 - i) * 15,
y: parseFloat(0)
});
zChartData.push({
x: tNow - (30 - i) * 15,
y: parseFloat(0)
});
}
var $elem = $("#chart-" + type);
var graph = new Rickshaw.Graph({ | identifier_body |
device-stats.js | = 25;
var gravity_yId = 26;
var gravity_zId = 27;
var rotation_xId = 28;
var rotation_yId = 29;
var rotation_zId = 30;
var batteryData = [];
var lightData = [];
var pressureData = [];
var proximityData = [];
var accelerometer_xData = [];
var accelerometer_yData = [];
var accelerometer_zData = [];
var magnetic_xData = [];
var magnetic_yData = [];
var magnetic_zData = [];
var gyroscope_xData = [];
var gyroscope_yData = [];
var gyroscope_zData = [];
var gravity_xData = [];
var gravity_yData = [];
var gravity_zData = [];
var rotation_xData = [];
var rotation_yData = [];
var rotation_zData = [];
var graphMap = {};
var graphSettingsMap = {};
var palette = new Rickshaw.Color.Palette({scheme: "munin"});
var elemTop;
$(window).load(function () {
graphMap["battery"]=lineGraph("battery", batteryData);
graphMap["light"]=lineGraph("light", lightData);
graphMap["pressure"]=lineGraph("pressure", pressureData);
graphMap["proximity"]=lineGraph("proximity", proximityData);
graphMap["accelerometer"]=threeDlineGraph("accelerometer", accelerometer_xData, accelerometer_yData, accelerometer_zData);
graphMap["magnetic"]=threeDlineGraph("magnetic", magnetic_xData, magnetic_yData, magnetic_zData);
graphMap["gyroscope"]=threeDlineGraph("gyroscope", gyroscope_xData, gyroscope_yData, gyroscope_zData);
graphMap["gravity"]=threeDlineGraph("gravity", gravity_xData, gravity_yData, gravity_zData);
graphMap["rotation"]=threeDlineGraph("rotation", rotation_xData, rotation_yData, rotation_zData);
var websocketUrl = $("#stat-section").data("websocketurl");
connect(websocketUrl)
});
window.onbeforeunload = function() {
disconnect();
};
function threeDlineGraph(type, xChartData, yChartData, zChartData) {
var tNow = new Date().getTime() / 1000;
for (var i = 0; i < 30; i++) {
xChartData.push({
x: tNow - (30 - i) * 15,
y: parseFloat(0)
});
yChartData.push({
x: tNow - (30 - i) * 15,
y: parseFloat(0)
});
zChartData.push({
x: tNow - (30 - i) * 15,
y: parseFloat(0)
});
}
var $elem = $("#chart-" + type);
var graph = new Rickshaw.Graph({
element: $elem[0],
width: $elem.width() - 100,
height: 300,
renderer: "line",
interpolation: "linear",
padding: {top: 0.2, left: 0.0, right: 0.0, bottom: 0.2},
xScale: d3.time.scale(),
series: [
{'color': palette.color(), 'data': xChartData, 'name': "x - " + type},
{'color': palette.color(), 'data': yChartData, 'name': "y - " + type},
{'color': palette.color(), 'data': zChartData, 'name': "z - " + type}
]
});
var xAxis = new Rickshaw.Graph.Axis.Time({
graph: graph
});
xAxis.render();
new Rickshaw.Graph.Axis.Y({
graph: graph,
orientation: 'left',
height: 300,
tickFormat: Rickshaw.Fixtures.Number.formatKMBT,
element: document.getElementById("y-axis-"+type)
});
new Rickshaw.Graph.Legend({
graph: graph,
element: document.getElementById('legend-' + type)
});
var detail = new Rickshaw.Graph.HoverDetail({
graph: graph
});
new Rickshaw.Graph.HoverDetail({
graph: graph,
formatter: function (series, x, y) {
var date = '<span class="date">' + moment(x * 1000).format('Do MMM YYYY h:mm:ss a') + '</span>';
var swatch = '<span class="detail_swatch" style="background-color: ' + series.color + '"></span>';
return swatch + series.name + ": " + parseInt(y) + '<br>' + date;
}
});
graph.render();
return graph;
}
function lineGraph(type, chartData) {
var tNow = new Date().getTime() / 1000;
for (var i = 0; i < 30; i++) {
chartData.push({
x: tNow - (30 - i) * 15,
y: parseFloat(0)
});
}
var $elem = $("#chart-" + type);
var graph = new Rickshaw.Graph({
element: $elem[0],
width: $elem.width() - 100,
height: 300,
renderer: "line",
interpolation: "linear",
padding: {top: 0.2, left: 0.0, right: 0.0, bottom: 0.2},
xScale: d3.time.scale(),
series: [{
'color': palette.color(),
'data': chartData,
'name': type
}]
});
var xAxis = new Rickshaw.Graph.Axis.Time({
graph: graph
});
xAxis.render();
new Rickshaw.Graph.Axis.Y({
graph: graph,
orientation: 'left',
height: 300,
tickFormat: Rickshaw.Fixtures.Number.formatKMBT,
element: document.getElementById('y-axis-'+type)
});
new Rickshaw.Graph.Legend({
graph: graph,
element: document.getElementById('legend-' + type)
});
new Rickshaw.Graph.HoverDetail({
graph: graph,
formatter: function (series, x, y) {
var date = '<span class="date">' + moment(x * 1000).format('Do MMM YYYY h:mm:ss a') + '</span>';
var swatch = '<span class="detail_swatch" style="background-color: ' + series.color + '"></span>';
return swatch + series.name + ": " + parseInt(y) + '<br>' + date;
}
});
graph.render();
return graph;
}
//websocket connection
function connect(target) {
if ('WebSocket' in window) {
ws = new WebSocket(target);
} else if ('MozWebSocket' in window) {
ws = new MozWebSocket(target);
} else {
console.log('WebSocket is not supported by this browser.');
}
if (ws) {
ws.onmessage = function (event) {
var dataPoint = JSON.parse(event.data);
if (dataPoint) {
var time = parseInt(dataPoint[4]) / 1000;
switch (dataPoint[typeId]) {
case "battery":
graphUpdate(batteryData, time, dataPoint[batteryId]);
graphMap["battery"].update();
break;
case "light":
graphUpdate(lightData, time, dataPoint[lightId]);
graphMap["light"].update();
break;
case "pressure":
graphUpdate(pressureData, time, dataPoint[pressureId]);
graphMap["pressure"].update();
break;
case "proximity":
graphUpdate(proximityData, time, dataPoint[proximityId]);
graphMap["proximity"].update();
break;
case "accelerometer":
graphUpdate(accelerometer_xData, time, dataPoint[accelerometer_xId]);
graphUpdate(accelerometer_yData, time, dataPoint[accelerometer_yId]);
graphUpdate(accelerometer_zData, time, dataPoint[accelerometer_zId]);
graphMap["accelerometer"].update();
break;
case "magnetic":
graphUpdate(magnetic_xData, time, dataPoint[magnetic_xId]);
graphUpdate(magnetic_yData, time, dataPoint[magnetic_yId]);
graphUpdate(magnetic_zData, time, dataPoint[magnetic_zId]);
graphMap["magnetic"].update();
break;
case "gyroscope":
graphUpdate(gyroscope_xData, time, dataPoint[gyroscope_xId]);
graphUpdate(gyroscope_yData, time, dataPoint[gyroscope_yId]);
graphUpdate(gyroscope_zData, time, dataPoint[gyroscope_zId]);
graphMap["gyroscope"].update();
break;
case "rotation":
graphUpdate(magnetic_xData, time, dataPoint[rotation_xId]);
graphUpdate(magnetic_yData, time, dataPoint[rotation_yId]);
graphUpdate(magnetic_zData, time, dataPoint[rotation_zId]);
graphMap["rotation"].update();
break;
case "gravity":
graphUpdate(gravity_xData, time, dataPoint[gravity_xId]);
graphUpdate(gravity_yData, time, dataPoint[gravity_yId]);
graphUpdate(gravity_zData, time, dataPoint[gravity_zId]);
graphMap["gravity"].update();
break;
}
}
};
}
}
function | graphUpdate | identifier_name |
|
db_helpher.go | (1).FindAll()
return data
}
log.Error("mysql not connect\r\n")
return empty
}
//插入数据
func (m *Model) Insert(param map[string]interface{}) (num int, err error) {
if m.db == nil {
log.Error("mysql not connect\r\n")
return 0, errors.New("IN Insert, mysql not connect")
} | }
for key, value := range param {
keys = append(keys, key)
switch value.(type) {
case int, int64, int32:
values = append(values, strconv.FormatInt(int64(value.(int)), 10))
case uint64, uint32:
values = append(values, strconv.FormatUint(value.(uint64), 10))
case string:
values = append(values, "'" + value.(string) + "'")
//case float32, float64:
// values = append(values, strconv.FormatFloat(value.(float64), 'f', -1, 64))
}
}
fileValue := strings.Join(values, ",")
fileds := "`" + strings.Join(keys, "`,`") + "`"
sql := fmt.Sprintf("INSERT INTO %v (%v) VALUES (%v);", m.tablename, fileds, fileValue)
var query = strings.TrimSpace(sql)
fmt.Printf("insert sql :%s\n", query)
//result, err := m.db.Exec(sql)
result, err := m.db.Exec(query)
if err != nil {
defer func() {
if err := recover(); err != nil {
fmt.Printf("SQL syntax errors ")
}
}()
err = errors.New("inster sql failure")
log.Error("inster sql failure.error :%s", err)
return 0, err
}
//i, err := result.LastInsertId()
i, err := result.RowsAffected()
s, _ := strconv.Atoi(strconv.FormatInt(i, 10))
if err != nil {
err = errors.New("insert failure")
}
return s, err
}
//指定字段
func (m *Model) Fileds(param ...string) *Model {
m.param = param
return m
}
//更新表数据
func (m *Model) Update(param map[string]interface{}) (num int, err error) {
if m.db == nil {
return 0, errors.New("mysql not connect")
}
var setValue []string
for key, value := range param {
switch value.(type) {
case int, int64, int32:
set := fmt.Sprintf("%v = %v", key, value.(int))
setValue = append(setValue, set)
case string:
set := fmt.Sprintf("%v = '%v'", key, value.(string))
setValue = append(setValue, set)
//case float32, float64:
// set := fmt.Sprintf("%v = '%v'", key, strconv.FormatFloat(value.(float64), 'f', -1, 64))
// setValue = append(setValue, set)
}
}
setData := strings.Join(setValue, ",")
sql := fmt.Sprintf("UPDATE %v SET %v %v", m.tablename, setData, m.where)
fmt.Printf("update_sql :%s\n", sql)
result, err := m.db.Exec(sql)
if err != nil {
defer func() {
if err := recover(); err != nil {
fmt.Printf("SQL syntax errors ")
}
}()
err = errors.New("update sql failure")
return 0, err
}
i, err := result.RowsAffected()
if err != nil {
err = errors.New("update failure")
log.Error("update tabledata error:%s", err)
return 0, err
}
s, _ := strconv.Atoi(strconv.FormatInt(i, 10))
return s, err
}
//删除数据
func (m *Model) Delete(param string) (num int, err error) {
if m.db == nil {
return 0, errors.New("mysql not connect")
}
h := m.Where(param).FindOne()
if len(h) == 0 {
return 0, errors.New("no Value")
}
sql := fmt.Sprintf("DELETE FROM %v WHERE %v", m.tablename, param)
result, err := m.db.Exec(sql)
if err != nil {
defer func() {
if err := recover(); err != nil {
fmt.Printf("SQL syntax errors: %+v", err)
log.Error("SQL syntax errors:%+v", err)
}
}()
err = errors.New("delete sql failure")
return 0, err
}
i, err := result.RowsAffected()
s, _ := strconv.Atoi(strconv.FormatInt(i, 10))
if i == 0 {
err = errors.New("delete failure")
}
return s, err
}
//执行自定义sql语句
func (m *Model) Query(sql string) interface{} {
if m.db == nil {
return errors.New("mysql not connect")
}
var query = strings.TrimSpace(sql)
s, err := regexp.MatchString(`(?i)^(select|call)`, query)
if nil == err && s {
result, _ := m.db.Query(sql)
defer result.Close()
c := QueryResult(result)
return c
}
exec, err := regexp.MatchString(`(?i)^(update|delete)`, query)
if nil == err && exec {
m_exec, err := m.db.Exec(query)
if err != nil {
return err
}
num, _ := m_exec.RowsAffected()
id := strconv.FormatInt(num, 10)
return id
}
insert, err := regexp.MatchString(`(?i)^insert`, query)
if nil == err && insert {
m_exec, err := m.db.Exec(query)
if err != nil {
return err
}
num, _ := m_exec.LastInsertId()
id := strconv.FormatInt(num, 10)
return id
}
result, _ := m.db.Exec(query)
return result
}
//返回sql语句执行结果
func QueryResult(rows *sql.Rows) map[int]map[string]string {
var result = make(map[int]map[string]string)
columns, _ := rows.Columns()
values := make([]sql.RawBytes, len(columns))
scanargs := make([]interface{}, len(values))
for i := range values {
scanargs[i] = &values[i]
}
var n = 1
for rows.Next() {
result[n] = make(map[string]string)
err := rows.Scan(scanargs...)
if err != nil {
fmt.Println(err)
}
for i, v := range values {
result[n][columns[i]] = string(v)
}
n++
}
return result
}
//指定待查询表名
func (m *Model) SetTable(tablename string) *Model {
m.tablename = tablename
return m
}
//设置where条件
func (m *Model) Where(param string) *Model {
m.where = fmt.Sprintf(" where %v", param)
return m
}
/*
//设置自增主键字段
func (m *Model) SetPk(pk string) *Model {
m.pk = pk
return m
}*/
//设置排序方式
func (m *Model) OrderBy(param string) *Model {
m.orderby = fmt.Sprintf("ORDER BY %v", param)
return m
}
//设置返回结果个数
func (m *Model) Limit(size ...int) *Model {
var end int
start := size[0]
//fmt.Printf("=========len(size): %d=========\n", len(size))
if len(size) > 1 {
end = size[1]
m.limit = fmt.Sprintf("Limit %d,%d", start, end)
return m
}
m.limit = fmt.Sprintf("Limit %d", start)
return m
}
/*
//左连接
func (m *Model) LeftJoin(table, condition string) *Model {
m.join = fmt.Sprintf("LEFT JOIN %v ON %v", table, condition)
return m
}
//右连接
func (m *Model) RightJoin(table, condition string) *Model {
m.join = fmt.Sprintf("RIGHT JOIN %v ON %v", table, condition)
return m
}
//内连接
func (m *Model) Join(table, condition string) *Model {
m.join = fmt.Sprintf("INNER JOIN %v ON %v", table, condition)
return m
}
//外连接
func (m *Model) FullJoin(table, condition string) *Model {
m.join = fmt.Sprintf("FULL JOIN %v ON %v", table, condition)
return m
}
*/
//将结果输出到屏幕
func Print(slice map[int]map[string]string) {
for _, v := range slice {
for key, value := range v {
fmt.Println(key, value)
}
fmt.Println("---------------")
}
}
//关闭数据库
//func (m *Model) DbClose() {
// m.db.Close()
//}
//计算秒数时间差
func getSecondDiffer(start_time string, end_time string) int64 {
var second int64
t1, err := time.ParseInLocation(FormatNormalTime, start_time, time.Local)
| var keys []string
var values []string
if len(m.pk) != 0 {
delete(param, m.pk) | random_line_split |
db_helpher.go | ([]string{"AppID = '" + appid + StatusIsZero}, "")).FindOne()
info := data[1]
if nil == info {
fmt.Println("Invalid Authentication input information")
return MSG_INVALID_INPUTINFO
}
//不能从缓存中取,否则造成eth和db的循环引用
//从数据库中读取用户信息并加密,判断解析后的公共参数是否与从数据库取出并加密后的哈希值相等,相等则鉴权成功
plaintext := strings.Join([]string{info[AppID] + ":" + info[PassWord]+ ":" + info[TimeStamp]}, "")
hash := sha256.New()
hash.Write([]byte(plaintext))
md := hash.Sum(nil)
mdStr := hex.EncodeToString(md)
if sign != mdStr {
fmt.Println("Verification failed!")
return MSG_VERIFY_FAILED
}
fmt.Println("Verification success!")
return MSG_SUCCESS
}
//创建新用户
func NewUserDBInsert (username string, password string, account string, keypath string) {
tm := time.Now().Format(FormatNormalTime)
var newuser = make(map[string]interface{})
newuser[UserName] = username
newuser[PassWord] = password
newuser[AccountStatus] = MSG_ACTIVE
newuser[WalletAddress] = account
newuser[TimeStamp] = tm
newuser[Keypath] = keypath
newuser[Nounce] = 0
t := Connect.SetTable(UserInfo)
data := t.Fileds(UserName, PassWord, AccountStatus, WalletAddress, TimeStamp, Keypath, Nounce).Where(strings.Join([]string{UsernameEqual + username + "'"}, "")).FindOne()
if len(data) == 0{
_,err := t.Insert(newuser)
if err != nil {
log.Error("openaccount insert value to UserInfo failed, error :%s", err)
}
}
}
//更新账户余额和交易状态
func UpdateBalanceTXstatus(serial_num string, transferCurrency string, transferor string, receiptor string, TXstatus int, amount int64) (bool) {
var value = make(map[string]interface{})
if MSG_CONFIRMED == TXstatus {
value[TXStatus] = MSG_CONFIRMED
n, err := Connect.SetTable(TransactionDetail).Where(strings.Join([]string{"serial_number = '" + serial_num + "'"}, "")).Update(value)
fmt.Printf("n :%d\n", n)
if err != nil {
fmt.Printf("UpdateBalance TXstatus(1) MSG_CONFIRMED to transaction_detail failed: %s\n", err)
return false
}
UpdateBalance(transferCurrency, transferor, -1*amount)
UpdateBalance(transferCurrency, receiptor, amount)
} else if MSG_AWAIT_CONFIRM == TXstatus{
value[TXStatus] = MSG_OVERTIME
_, err := Connect.SetTable(TransactionDetail).Where(strings.Join([]string{"serial_num = '" + serial_num + "'"}, "")).Update(value)
if err != nil {
fmt.Println("UpdateBalance TXstatus(0) MSG_AWAIT_CONFIRM to transaction_detail failed:")
}
fmt.Println("The transaction has been automatically cancelled for timeout.")
return false
}else {
fmt.Println("An unknown error occurred and the transaction has been cancelled.")
return false
}
return true
}
//查找操作前账户余额
func QueryBalance(tablename string, username string) (balance_before int){
var data map[int]map[string]string
var result string
for key := range Currencies {
switch key {
case ethernet: //查找以太币账户余额
data = Connect.SetTable(tablename).Fileds("Ethernet_Current_Balance").Where(strings.Join([]string{UsernameEqual + username + StatusIsZero}, "")).FindOne()
if len(data) != 0 {
result = data[1]["Ethernet_Current_Balance"]
ethBalance, err := strconv.Atoi(result)
fmt.Printf("ethBalance :%d\n", ethBalance)
if (nil == err) && (0 != ethBalance){
balance_before = ethBalance
fmt.Println("Ethernet_Current_Balance :", ethBalance)
return balance_before
}
}
case bit: //查找比特币账户余额
data = Connect.SetTable(tablename).Fileds("Bitcoin_Current_Balance").Where(strings.Join([]string{UsernameEqual + username + StatusIsZero}, "")).FindOne()
if len(data) != 0 {
result = data[1]["Bitcoin_Current_Balance"]
bitBalance, err := strconv.Atoi(result)
fmt.Printf("bitBalance :%d\n", bitBalance)
if (err == nil) && (0 != bitBalance){
balance_before = bitBalance
fmt.Println("Bitcoin_Current_Balance :", bitBalance)
return balance_before
}
}
default:
return 0
}
}
return balance_before
}
//更新交易后账户余额 正数增加,负数减少
func UpdateBalance(transferCurrency string, username string, amount int64){
var sqlstr = ""
switch transferCurrency {
case ethernet: //查找以太币账户余额
if amount >= 0 {
sqlstr = fmt.Sprintf("UPDATE UserInfo SET Ethernet_Current_Balance = CAST( (CAST(Ethernet_Current_Balance AS UNSIGNED) + %d) AS CHAR ) WHERE Username = '%s'; ", amount, username)
fmt.Printf("sqlstr :%s\n", sqlstr)
} else {
amount = amount * -1
sqlstr = fmt.Sprintf("UPDATE UserInfo SET Ethernet_Current_Balance = CAST( (CAST(Ethernet_Current_Balance AS UNSIGNED) - %d) AS CHAR ) WHERE Username = '%s' AND CAST(Ethernet_Current_Balance AS UNSIGNED) > %d;",
amount, username, amount)
fmt.Printf("sqlstr :%s\n", sqlstr)
}
case bit: //查找比特币账户余额
if amount >= 0 {
sqlstr = fmt.Sprintf("UPDATE UserInfo SET Bitcoin_Current_Balance = CAST( (CAST(Bitcoin_Current_Balance AS UNSIGNED) + %d) AS CHAR ) WHERE Username = '%s'; ", amount, username)
fmt.Printf("sqlstr :%s\n", sqlstr)
} else {
amount = amount * -1
sqlstr = fmt.Sprintf("UPDATE UserInfo SET Bitcoin_Current_Balance = CAST( (CAST(Bitcoin_Current_Balance AS UNSIGNED) - %d) AS CHAR ) WHERE Username = '%s' AND CAST(Bitcoin_Current_Balance AS UNSIGNED) > %d;",
amount, username, amount)
fmt.Printf("sqlstr :%s\n", sqlstr)
}
}
result := Connect.Query(sqlstr)
switch result.(type) {
case string :
afrowstr := result.(string)
lid, err := strconv.Atoi(afrowstr)
if err != nil {
log.Error("convert update line id err:%s", err.Error())
}
if lid < 0 {
log.Error("update balance[%d] fail", lid)
}
default:
log.Error("UpdateBalance:unexpected return %+v", result)
}
}
//判断该交易是同一秒内的第几个交易
func TransaOrder(tm string) (int){
if trans_order == tm{
counter = counter + 1
}else{
trans_order = tm
counter = 1
}
return counter
}
//生成交易流水号
func GetSerialNum()(string) {
//获取当前时间
current := time.Now().Format(FormatNormalTime)
tm := strings.Replace(current, " ", "", -1)
tm = strings.Replace(tm, "-", "", -1)
tm = strings.Replace(tm, ":", "", -1)
//获取当前秒内交易顺序号
num := TransaOrder(tm)
subNum := strconv.Itoa(num)
//获取交易流水号(一秒内交易次数大于等于十万时,直接拼接不再补零)
length := maxByte - len([]rune(subNum))
if length > 0 {
for i := 0; i < length; i++ {
subNum = strings.Join([]string{ "0" + subNum}, "")
}
}
serial_number := tm + subNum
fmt.Printf("serial_number: %s\n", serial_number)
return serial_number
}
//将交易记录插入交易明表
// add 2018-7-4 shangwj 交易明细表增加 以太坊交易hash值 txhash
func InsertDetail(Currency string, SourceUser string, DestUser string, transferAmount uint64, serialNumber string, txhash string) (err error) {
switch Currency {
case ethernet: //查找以太币账户余额
Currency = ETHERNETCOIN
fmt.Printf("Currency: %s\n", Currency)
case bit: //查找比特币账户余额
Currency = BITCOIN
fmt.Printf("Currency: %s\n", Currency)
//转账参数校验已经校验过币种,此处不用再校验
}
var value= make(map[string]interface{})
value["serial_number"] = serialNumber
value["currency"] = Currency
value["transferor"] = SourceUser
value["receiptor"] = DestUser
value["transfer_amount"] = transferAmo | unt
value[TXStatus] = MSG_AWAIT_CONFIRM
value["Txhash"] = txhash //add 2018-7-4 sh | identifier_body |
|
db_helpher.go | //case float32, float64:
// set := fmt.Sprintf("%v = '%v'", key, strconv.FormatFloat(value.(float64), 'f', -1, 64))
// setValue = append(setValue, set)
}
}
setData := strings.Join(setValue, ",")
sql := fmt.Sprintf("UPDATE %v SET %v %v", m.tablename, setData, m.where)
fmt.Printf("update_sql :%s\n", sql)
result, err := m.db.Exec(sql)
if err != nil {
defer func() {
if err := recover(); err != nil {
fmt.Printf("SQL syntax errors ")
}
}()
err = errors.New("update sql failure")
return 0, err
}
i, err := result.RowsAffected()
if err != nil {
err = errors.New("update failure")
log.Error("update tabledata error:%s", err)
return 0, err
}
s, _ := strconv.Atoi(strconv.FormatInt(i, 10))
return s, err
}
//删除数据
func (m *Model) Delete(param string) (num int, err error) {
if m.db == nil {
return 0, errors.New("mysql not connect")
}
h := m.Where(param).FindOne()
if len(h) == 0 {
return 0, errors.New("no Value")
}
sql := fmt.Sprintf("DELETE FROM %v WHERE %v", m.tablename, param)
result, err := m.db.Exec(sql)
if err != nil {
defer func() {
if err := recover(); err != nil {
fmt.Printf("SQL syntax errors: %+v", err)
log.Error("SQL syntax errors:%+v", err)
}
}()
err = errors.New("delete sql failure")
return 0, err
}
i, err := result.RowsAffected()
s, _ := strconv.Atoi(strconv.FormatInt(i, 10))
if i == 0 {
err = errors.New("delete failure")
}
return s, err
}
//执行自定义sql语句
func (m *Model) Query(sql string) interface{} {
if m.db == nil {
return errors.New("mysql not connect")
}
var query = strings.TrimSpace(sql)
s, err := regexp.MatchString(`(?i)^(select|call)`, query)
if nil == err && s {
result, _ := m.db.Query(sql)
defer result.Close()
c := QueryResult(result)
return c
}
exec, err := regexp.MatchString(`(?i)^(update|delete)`, query)
if nil == err && exec {
m_exec, err := m.db.Exec(query)
if err != nil {
return err
}
num, _ := m_exec.RowsAffected()
id := strconv.FormatInt(num, 10)
return id
}
insert, err := regexp.MatchString(`(?i)^insert`, query)
if nil == err && insert {
m_exec, err := m.db.Exec(query)
if err != nil {
return err
}
num, _ := m_exec.LastInsertId()
id := strconv.FormatInt(num, 10)
return id
}
result, _ := m.db.Exec(query)
return result
}
//返回sql语句执行结果
func QueryResult(rows *sql.Rows) map[int]map[string]string {
var result = make(map[int]map[string]string)
columns, _ := rows.Columns()
values := make([]sql.RawBytes, len(columns))
scanargs := make([]interface{}, len(values))
for i := range values {
scanargs[i] = &values[i]
}
var n = 1
for rows.Next() {
result[n] = make(map[string]string)
err := rows.Scan(scanargs...)
if err != nil {
fmt.Println(err)
}
for i, v := range values {
result[n][columns[i]] = string(v)
}
n++
}
return result
}
//指定待查询表名
func (m *Model) SetTable(tablename string) *Model {
m.tablename = tablename
return m
}
//设置where条件
func (m *Model) Where(param string) *Model {
m.where = fmt.Sprintf(" where %v", param)
return m
}
/*
//设置自增主键字段
func (m *Model) SetPk(pk string) *Model {
m.pk = pk
return m
}*/
//设置排序方式
func (m *Model) OrderBy(param string) *Model {
m.orderby = fmt.Sprintf("ORDER BY %v", param)
return m
}
//设置返回结果个数
func (m *Model) Limit(size ...int) *Model {
var end int
start := size[0]
//fmt.Printf("=========len(size): %d=========\n", len(size))
if len(size) > 1 {
end = size[1]
m.limit = fmt.Sprintf("Limit %d,%d", start, end)
return m
}
m.limit = fmt.Sprintf("Limit %d", start)
return m
}
/*
//左连接
func (m *Model) LeftJoin(table, condition string) *Model {
m.join = fmt.Sprintf("LEFT JOIN %v ON %v", table, condition)
return m
}
//右连接
func (m *Model) RightJoin(table, condition string) *Model {
m.join = fmt.Sprintf("RIGHT JOIN %v ON %v", table, condition)
return m
}
//内连接
func (m *Model) Join(table, condition string) *Model {
m.join = fmt.Sprintf("INNER JOIN %v ON %v", table, condition)
return m
}
//外连接
func (m *Model) FullJoin(table, condition string) *Model {
m.join = fmt.Sprintf("FULL JOIN %v ON %v", table, condition)
return m
}
*/
//将结果输出到屏幕
func Print(slice map[int]map[string]string) {
for _, v := range slice {
for key, value := range v {
fmt.Println(key, value)
}
fmt.Println("---------------")
}
}
//关闭数据库
//func (m *Model) DbClose() {
// m.db.Close()
//}
//计算秒数时间差
func getSecondDiffer(start_time string, end_time string) int64 {
var second int64
t1, err := time.ParseInLocation(FormatNormalTime, start_time, time.Local)
t2, err := time.ParseInLocation(FormatNormalTime, end_time, time.Local)
if err == nil && t1.Before(t2) {
second = t2.Unix() - t1.Unix()
return second
} else {
return second
}
}
//计算分钟时间差
func getMinDiffer(start_time string, end_time string) int64 {
var minute int64
t1, err := time.ParseInLocation(FormatNormalTime, start_time, time.Local)
t2, err := time.ParseInLocation(FormatNormalTime, end_time, time.Local)
if err == nil && t1.Before(t2) {
diff := t2.Unix() - t1.Unix()
minute = diff / 60
return minute
} else {
return minute
}
}
//计算小时时间差
func getHourDiffer(start_time string, end_time string) int64 {
var hour int64
t1, err := time.ParseInLocation(FormatNormalTime, start_time, time.Local)
t2, err := time.ParseInLocation(FormatNormalTime, end_time, time.Local)
if err == nil && t1.Before(t2) {
diff := t2.Unix() - t1.Unix()
hour = diff / 3600
return hour
} else {
return hour
}
}
//计算天数时间差
func getDayDiffer(start_time string, end_time string) int64 {
var day int64
t1, err := time.ParseInLocation(FormatNormalTime, start_time, time.Local)
t2, err := time.ParseInLocation(FormatNormalTime, end_time, time.Local)
if err == nil && t1.Before(t2) {
diff := t2.Unix() - t1.Unix()
day = diff / 86400
return day
} else {
return day
}
}
//用户鉴权
func Authentication (c *gin.Context) (msg string){
appid := c.Query("app_id")
sign := c.Query("sign")
timestamp := c.Query("timestamp")
current := time.Now().Format(FormatNormalTime)
day := getDayDiffer(timestamp, current)
if day >= MSG_LICENSE_TIME {
return MSG_EXPIRED_USER
}
data := Connect.SetTable(UserInfo).Fileds(AppID, PassWord, TimeStamp).Where(strings.Join([]string{"AppID = '" + appid + StatusIsZero}, "")).FindOne()
info := data[1]
if nil == info {
fmt.Println("Invalid Authentication input information")
return MSG_INVALID_INPUTINFO
}
//不能从缓存中取,否则造成eth和db的循环引用
//从数据库中读取用户信息并加密,判断解析后的公共参数是否与从数据库取出并加密后的哈希值相等,相等则鉴权成功
plainte | xt := string | identifier_name |
|
db_helpher.go | append(setValue, set)
case string:
set := fmt.Sprintf("%v = '%v'", key, value.(string))
setValue = append(setValue, set)
//case float32, float64:
// set := fmt.Sprintf("%v = '%v'", key, strconv.FormatFloat(value.(float64), 'f', -1, 64))
// setValue = append(setValue, set)
}
}
setData := strings.Join(setValue, ",")
sql := fmt.Sprintf("UPDATE %v SET %v %v", m.tablename, setData, m.where)
fmt.Printf("update_sql :%s\n", sql)
result, err := m.db.Exec(sql)
if err != nil {
defer func() {
if err := recover(); err != nil {
fmt.Printf("SQL syntax errors ")
}
}()
err = errors.New("update sql failure")
return 0, err
}
i, err := result.RowsAffected()
if err != nil {
err = errors.New("update failure")
log.Error("update tabledata error:%s", err)
return 0, err
}
s, _ := strconv.Atoi(strconv.FormatInt(i, 10))
return s, err
}
//删除数据
func (m *Model) Delete(param string) (num int, err error) {
if m.db == nil {
return 0, errors.New("mysql not connect")
}
h := m.Where(param).FindOne()
if len(h) == 0 {
return 0, errors.New("no Value")
}
sql := fmt.Sprintf("DELETE FROM %v WHERE %v", m.tablename, param)
result, err := m.db.Exec(sql)
if err != nil {
defer func() {
if err := recover(); err != nil {
fmt.Printf("SQL syntax errors: %+v", err)
log.Error("SQL syntax errors:%+v", err)
}
}()
err = errors.New("delete sql failure")
return 0, err
}
i, err := result.RowsAffected()
s, _ := strconv.Atoi(strconv.FormatInt(i, 10))
if i == 0 {
err = errors.New("delete failure")
}
return s, err
}
//执行自定义sql语句
func (m *Model) Query(sql string) interface{} {
if m.db == nil {
return errors.New("mysql not connect")
}
var query = strings.TrimSpace(sql)
s, err := regexp.MatchString(`(?i)^(select|call)`, query)
if nil == err && s {
result, _ := m.db.Query(sql)
defer result.Close()
c := QueryResult(result)
return c
}
exec, err := regexp.MatchString(`(?i)^(update|delete)`, query)
if nil == err && exec {
m_exec, err := m.db.Exec(query)
if err != nil {
return err
}
num, _ := m_exec.RowsAffected()
id := strconv.FormatInt(num, 10)
return id
}
insert, err := regexp.MatchString(`(?i)^insert`, query)
if nil == err && insert {
m_exec, err := m.db.Exec(query)
if err != nil {
return err
}
num, _ := m_exec.LastInsertId()
id := strconv.FormatInt(num, 10)
return id
}
result, _ := m.db.Exec(query)
return result
}
//返回sql语句执行结果
func QueryResult(rows *sql.Rows) map[int]map[string]string {
var result = make(map[int]map[string]string)
columns, _ := rows.Columns()
values := make([]sql.RawBytes, len(columns))
scanargs := make([]interface{}, len(values))
for i := range values {
scanargs[i] = &values[i]
}
var n = 1
for rows.Next() {
result[n] = make(map[string]string)
err := rows.Scan(scanargs...)
if err != nil {
fmt.Println(err)
}
for i, v := range values {
result[n][columns[i]] = string(v)
}
n++
}
return result
}
//指定待查询表名
func (m *Model) SetTable(tablename string) *Model {
m.tablename = tablename
return m
}
//设置where条件
func (m *Model) Where(param string) *Model {
m.where = fmt.Sprintf(" where %v", param)
return m
}
/*
//设置自增主键字段
func (m *Model) SetPk(pk string) *Model {
m.pk = pk
return m
}*/
//设置排序方式
func (m *Model) OrderBy(param string) *Model {
m.orderby = fmt.Sprintf("ORDER BY %v", param)
return m
}
//设置返回结果个数
func (m *Model) Limit(size ...int) *Model {
var end int
start := size[0]
//fmt.Printf("=========len(size): %d=========\n", len(size))
if len(size) > 1 {
end = size[1]
m.limit = fmt.Sprintf("Limit %d,%d", start, end)
return m
}
m.limit = fmt.Sprintf("Limit %d", start)
return m
}
/*
//左连接
func (m *Model) LeftJoin(table, condition string) *Model {
m.join = fmt.Sprintf("LEFT JOIN %v ON %v", table, condition)
return m
}
//右连接
func (m *Model) RightJoin(table, condition string) *Model {
m.join = fmt.Sprintf("RIGHT JOIN %v ON %v", table, condition)
return m
}
//内连接
func (m *Model) Join(table, condition string) *Model {
m.join = fmt.Sprintf("INNER JOIN %v ON %v", table, condition)
return m
}
//外连接
func (m *Model) FullJoin(table, condition string) *Model {
m.join = fmt.Sprintf("FULL JOIN %v ON %v", table, condition)
return m
}
*/
//将结果输出到屏幕
func Print(slice map[int]map[string]string) {
for _, v := range slice {
for key, value := range v {
fmt.Println(key, value)
}
fmt.Println("---------------")
}
}
//关闭数据库
//func (m *Model) DbClose() {
// m.db.Close()
//}
//计算秒数时间差
func getSecondDiffer(start_time string, end_time string) int64 {
var second int64
t1, err := time.ParseInLocation(FormatNormalTime, start_time, time.Local)
t2, err := time.ParseInLocation(FormatNormalTime, end_time, time.Local)
if err == nil && t1.Before(t2) {
second = t2.Unix() - t1.Unix()
return second
} else {
return second
}
}
//计算分钟时间差
func getMinDiffer(start_time string, end_time string) int64 {
var minute int64
t1, err := time.ParseInLocation(FormatNormalTime, start_time, time.Local)
t2, err := time.ParseInLocation(FormatNormalTime, end_time, time.Local)
if err == nil && t1.Before(t2) {
diff := t2.Unix() - t1.Unix()
minute = diff / 60
return minute
} else {
return minute
}
}
//计算小时时间差
func getHourDiffer(start_time string, end_time string) int64 {
var hour int64
t1, err := time.ParseInLocation(FormatNormalTime, start_time, time.Local)
t2, err := time.ParseInLocation(FormatNormalTime, end_time, time.Local)
if err == nil && t1.Before(t2) {
diff := t2.Unix() - t1.Unix()
hour = diff / 3600
return hour
} else {
return hour
}
}
//计算天数时间差
func getDayDiffer(start_time string, end_time string) int64 {
var day int64
t1, err := time.ParseInLocation(FormatNormalTime, start_time, time.Local)
t2, err := time.ParseInLocation(FormatNormalTime, end_time, time.Local)
if err == nil && t1.Before(t2) {
diff := t2.Unix() - t1.Unix()
day = diff / 86400
return day
} else {
return day
}
}
//用户鉴权
func Authentication (c *gin.Context) (msg string){
appid := c.Query("app_id")
sign := c.Query("sign")
timestamp := c.Query("timestamp")
current := time.Now().Format(FormatNormalTime)
day := getDayDiffer(timestamp, current)
if day >= MSG_LICENSE_TIME {
return MSG_EXPIRED_USER
}
data := Connect.SetTable(UserInfo).Fileds(AppID, PassWord, TimeStamp).Where(strings.Join([]string{"AppID = '" + appid + StatusIsZero}, "")).FindOne()
info := data[1]
if nil == info {
fmt.Println("Invalid Authentication input informatio | n")
return MSG_INVALID_INPUTINFO
}
//不能从缓存中取,否则造成eth和db的循环引用
//从数据 | conditional_block |
|
server.py | test results sent by connected clients. A ClientHandler object is created
for each new client that connects. Clients communicate with the server using a
string-based messaging protocol that is defined in client_api.py. Clients are also
tracked using sequential client ids that are assigned upon connection and sent to the
client upon the client's request.
The server logs client connections and messages to the console and to a file saved
to ./server_logs named 'server_log_<date&time>'. Once all clients have finished
running, the server writes a report displaying statistics for each client including
how long they ran, file write information, performance stats, and status into the
log file. If clients drop out before finishing that is logged.
Example usage of this class is shown in the "if __name__ == '__main__':" block at
the end of this file.
"""
class Server(asyncore.dispatcher):
"""Server class that logs performance data from multiple, concurrent test clients.
Args:
host (str): address where test server will run.
port (int): network port the server will run on.
"""
def __init__(self, host, port):
asyncore.dispatcher.__init__(self)
self.host = host
self.port = port
self.client_id = config["first_client_id"]
self.client_list = {}
self.start_time = ''
self.end_time = ''
self.init_log_file()
self.init_server_socket()
def init_log_file(self):
"""Initializes the server's log file for client data."""
try:
os.makedirs(config["server_log_path"])
except OSError:
if not os.path.isdir(config["server_log_path"]):
raise
server_log_file = logging.FileHandler(
config["server_log_path"] + 'server_log_' + time.strftime('%Y-%m-%d_%H.%M.%S') + '.txt')
server_log_file.setLevel(logging.DEBUG)
server_log_file.setFormatter(file_formatter)
server_log.addHandler(server_log_file)
def init_server_socket(self):
"""Create, bind, and configure socket for server."""
server_log.info('Initializing server on {}:{}'.format(self.host, self.port))
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((self.host, self.port))
self.listen(5)
server_log.info('Initialization complete!')
def start_server(self):
self.start_time = time.strftime('%Y-%m-%d_%H:%M:%S')
self.run_loop()
def handle_accept(self):
"""Handles a client connection - Creates a ClientHandler instance for it.
The ClientHandler is stored according to client_id in the client_list dictionary."""
pair = self.accept()
if pair is not None:
sock, addr = pair
server_log.info('Client connection from {}, assigning client id {}'.format(repr(addr), self.client_id))
handler = ClientHandler(sock, addr, self.client_id)
self.client_list.update({self.client_id: handler})
self.client_id += 1
def handle_close(self):
server_log.info('Server shutting down...')
self.close()
def run_loop(self):
"""Run asyncore.loop until all clients are closed"""
server_log.info('Server now accepting client connections.')
while not self.clients_done():
asyncore.loop(timeout=config["server_timeout"], count=config["server_loop_count"])
def clients_done(self):
"""Returns True if all clients have completed their tests and at least one client has connected."""
if not self.client_list:
return False
elif len(asyncore.socket_map) > 1:
return False
else:
return True
def write_report(self):
"""Writes out a report that displays data for all clients that ran."""
self.end_time = time.strftime('%Y-%m-%d_%H:%M:%S')
server_log.info('')
server_log.info('=========================================================')
server_log.info('All test clients completed!')
server_log.info(' Start time: {}'.format(self.start_time))
server_log.info(' End time: {}'.format(self.end_time))
server_log.info('')
server_log.info('Total of {} client(s) ran. Data for each client:'.format(len(self.client_list)))
for client in self.client_list.values():
server_log.info('---------------------------------------------------------')
server_log.info(' Client {}'.format(client.client_id))
server_log.info(' Test status: {}'.format(client.status))
server_log.info(' Time ran: {:.2f} sec'.format(client.time_ran))
server_log.info(' Avg CPU usage: {:.2f}%'.format(client.cpu_avg))
server_log.info(' Avg MEM usage: {:.2f}%'.format(client.mem_avg))
server_log.info(' Files written: {}'.format(client.files_written))
server_log.info(' File size: {}'.format(client.file_size))
server_log.info(' Chunk size: {}'.format(client.chunk_size))
server_log.info('=========================================================')
server_log.info('')
class ClientHandler(asynchat.async_chat):
"""Class instantiated to keep track of each client that connects to the server.
Args:
sock (int): socket on which the client is connected.
addr (int): address on which the client is connected.
id (int): unique identifier for client.
"""
def __init__(self, sock, addr, client_id):
asynchat.async_chat.__init__(self, sock=sock)
self.addr = addr
self.client_id = client_id
self.set_terminator(client_api["terminator"])
self.start_time = 0
self.end_time = 0
self.time_ran = 0
self.num_stat_reports = 0
self.cpu_avg = 0
self.mem_avg = 0
self.cpu_total = 0
self.mem_total = 0
self.chunk_size = 0
self.file_size = 0
self.files_written = 0
self.status = 'NOT STARTED'
self.msg_buffer = []
self.msg = ''
self.msg_split = []
self.msg_handler = { client_api["get_client_id"]: self.handle_get_client_id,
client_api["ready"]: self.handle_ready,
client_api["start"]: self.handle_start,
client_api["done"]: self.handle_done,
client_api["heartbeat"]: self.handle_heartbeat,
client_api["send_perf_stats"]: self.handle_perf_stats,
client_api["send_file_stats"]: self.handle_file_stats,
client_api["file_rollover"]: self.handle_file_rollover, }
def collect_incoming_data(self, data):
"""Buffer incoming message"""
self.msg_buffer.append(data)
def found_terminator(self):
"""Processes the incoming message by looking up the handler in the message dictionary."""
self.msg = ''.join(self.msg_buffer)
self.msg_split = self.msg.split(client_api["delimiter"])
cmd = self.msg_split[0]
try:
self.msg_handler[cmd]()
except KeyError as e:
server_log.info('Unhandled command received from client id {}: {}'.format(self.client_id, cmd))
except Exception as e:
server_log.info('Exception raised in server when receiving message from client: {!r}'.format(e))
raise e
finally:
self.msg_buffer = []
self.msg = ''
self.msg_split = []
def handle_close(self):
"""Sets test status and closes connection."""
self.end_time = time.time()
self.time_ran = self.end_time - self.start_time
if self.status != 'PASS':
server_log.info('Client {} aborted!'.format(self.client_id))
self.status = 'ABORTED'
self.close()
## MESSAGE HANDLERS:
def handle_get_client_id(self):
|
def handle_ready(self):
server_log.info(str(self.client_id) + ': Client ready, sending test request')
self.push(client_api["run_tests"] + client_api["terminator"])
def handle_start(self):
server_log.info(str(self.client_id) + ': Client started running tests')
self.status = 'RUNNING'
self.start_time = time.time()
def handle_done(self):
server_log.info(str(self.client_id) + ': Client finished running tests')
self.status = 'PASS'
self.handle_close()
def handle_heartbeat(self):
server_log.info(str(self.client_id) + ': Heartbeat received')
def handle_perf_stats(self):
if len(self.msg_split) == 3:
cpu = self.msg_split[1]
mem = self.msg_split[2]
server_log.info(str(self.client_id) + ': Performance stats received. CPU: {} Mem: {}'.format(cpu, mem))
else:
server_log.info(str(self.client_id) + ': Invalid performance stats received')
return False
self.num_stat_reports += 1
self.cpu_total += float(cpu)
self.mem_total += float(mem)
self.cpu_avg = self.cpu_total / self.num_stat_reports
self.mem_avg = self.mem_total / self.num_stat_reports
return True
def handle_file_stats(self):
if len(self.msg_split) == 3:
self.chunk_size = int(self.msg_split[1])
self.file_size = int(self.msg_split[2])
server_log.info(str(self.client_id) + | server_log.info(str(self.client_id) + ': Sending client id')
self.push(client_api["set_client_id"] + client_api["delimiter"] + str(self.client_id) + client_api["terminator"]) | identifier_body |
server.py | test results sent by connected clients. A ClientHandler object is created
for each new client that connects. Clients communicate with the server using a
string-based messaging protocol that is defined in client_api.py. Clients are also
tracked using sequential client ids that are assigned upon connection and sent to the
client upon the client's request.
The server logs client connections and messages to the console and to a file saved
to ./server_logs named 'server_log_<date&time>'. Once all clients have finished
running, the server writes a report displaying statistics for each client including
how long they ran, file write information, performance stats, and status into the
log file. If clients drop out before finishing that is logged.
Example usage of this class is shown in the "if __name__ == '__main__':" block at
the end of this file.
"""
class Server(asyncore.dispatcher):
"""Server class that logs performance data from multiple, concurrent test clients.
Args:
host (str): address where test server will run.
port (int): network port the server will run on.
"""
def __init__(self, host, port):
asyncore.dispatcher.__init__(self)
self.host = host
self.port = port
self.client_id = config["first_client_id"]
self.client_list = {}
self.start_time = ''
self.end_time = ''
self.init_log_file()
self.init_server_socket()
def init_log_file(self):
"""Initializes the server's log file for client data."""
try:
os.makedirs(config["server_log_path"])
except OSError:
if not os.path.isdir(config["server_log_path"]):
raise
server_log_file = logging.FileHandler(
config["server_log_path"] + 'server_log_' + time.strftime('%Y-%m-%d_%H.%M.%S') + '.txt')
server_log_file.setLevel(logging.DEBUG)
server_log_file.setFormatter(file_formatter)
server_log.addHandler(server_log_file)
def init_server_socket(self):
"""Create, bind, and configure socket for server."""
server_log.info('Initializing server on {}:{}'.format(self.host, self.port))
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((self.host, self.port))
self.listen(5)
server_log.info('Initialization complete!')
def start_server(self):
self.start_time = time.strftime('%Y-%m-%d_%H:%M:%S')
self.run_loop()
def handle_accept(self):
"""Handles a client connection - Creates a ClientHandler instance for it.
The ClientHandler is stored according to client_id in the client_list dictionary."""
pair = self.accept()
if pair is not None:
sock, addr = pair
server_log.info('Client connection from {}, assigning client id {}'.format(repr(addr), self.client_id))
handler = ClientHandler(sock, addr, self.client_id)
self.client_list.update({self.client_id: handler})
self.client_id += 1
def handle_close(self):
server_log.info('Server shutting down...')
self.close()
def run_loop(self):
"""Run asyncore.loop until all clients are closed"""
server_log.info('Server now accepting client connections.')
while not self.clients_done():
asyncore.loop(timeout=config["server_timeout"], count=config["server_loop_count"])
def clients_done(self):
"""Returns True if all clients have completed their tests and at least one client has connected."""
if not self.client_list:
return False
elif len(asyncore.socket_map) > 1:
|
else:
return True
def write_report(self):
"""Writes out a report that displays data for all clients that ran."""
self.end_time = time.strftime('%Y-%m-%d_%H:%M:%S')
server_log.info('')
server_log.info('=========================================================')
server_log.info('All test clients completed!')
server_log.info(' Start time: {}'.format(self.start_time))
server_log.info(' End time: {}'.format(self.end_time))
server_log.info('')
server_log.info('Total of {} client(s) ran. Data for each client:'.format(len(self.client_list)))
for client in self.client_list.values():
server_log.info('---------------------------------------------------------')
server_log.info(' Client {}'.format(client.client_id))
server_log.info(' Test status: {}'.format(client.status))
server_log.info(' Time ran: {:.2f} sec'.format(client.time_ran))
server_log.info(' Avg CPU usage: {:.2f}%'.format(client.cpu_avg))
server_log.info(' Avg MEM usage: {:.2f}%'.format(client.mem_avg))
server_log.info(' Files written: {}'.format(client.files_written))
server_log.info(' File size: {}'.format(client.file_size))
server_log.info(' Chunk size: {}'.format(client.chunk_size))
server_log.info('=========================================================')
server_log.info('')
class ClientHandler(asynchat.async_chat):
"""Class instantiated to keep track of each client that connects to the server.
Args:
sock (int): socket on which the client is connected.
addr (int): address on which the client is connected.
id (int): unique identifier for client.
"""
def __init__(self, sock, addr, client_id):
asynchat.async_chat.__init__(self, sock=sock)
self.addr = addr
self.client_id = client_id
self.set_terminator(client_api["terminator"])
self.start_time = 0
self.end_time = 0
self.time_ran = 0
self.num_stat_reports = 0
self.cpu_avg = 0
self.mem_avg = 0
self.cpu_total = 0
self.mem_total = 0
self.chunk_size = 0
self.file_size = 0
self.files_written = 0
self.status = 'NOT STARTED'
self.msg_buffer = []
self.msg = ''
self.msg_split = []
self.msg_handler = { client_api["get_client_id"]: self.handle_get_client_id,
client_api["ready"]: self.handle_ready,
client_api["start"]: self.handle_start,
client_api["done"]: self.handle_done,
client_api["heartbeat"]: self.handle_heartbeat,
client_api["send_perf_stats"]: self.handle_perf_stats,
client_api["send_file_stats"]: self.handle_file_stats,
client_api["file_rollover"]: self.handle_file_rollover, }
def collect_incoming_data(self, data):
"""Buffer incoming message"""
self.msg_buffer.append(data)
def found_terminator(self):
"""Processes the incoming message by looking up the handler in the message dictionary."""
self.msg = ''.join(self.msg_buffer)
self.msg_split = self.msg.split(client_api["delimiter"])
cmd = self.msg_split[0]
try:
self.msg_handler[cmd]()
except KeyError as e:
server_log.info('Unhandled command received from client id {}: {}'.format(self.client_id, cmd))
except Exception as e:
server_log.info('Exception raised in server when receiving message from client: {!r}'.format(e))
raise e
finally:
self.msg_buffer = []
self.msg = ''
self.msg_split = []
def handle_close(self):
"""Sets test status and closes connection."""
self.end_time = time.time()
self.time_ran = self.end_time - self.start_time
if self.status != 'PASS':
server_log.info('Client {} aborted!'.format(self.client_id))
self.status = 'ABORTED'
self.close()
## MESSAGE HANDLERS:
def handle_get_client_id(self):
server_log.info(str(self.client_id) + ': Sending client id')
self.push(client_api["set_client_id"] + client_api["delimiter"] + str(self.client_id) + client_api["terminator"])
def handle_ready(self):
server_log.info(str(self.client_id) + ': Client ready, sending test request')
self.push(client_api["run_tests"] + client_api["terminator"])
def handle_start(self):
server_log.info(str(self.client_id) + ': Client started running tests')
self.status = 'RUNNING'
self.start_time = time.time()
def handle_done(self):
server_log.info(str(self.client_id) + ': Client finished running tests')
self.status = 'PASS'
self.handle_close()
def handle_heartbeat(self):
server_log.info(str(self.client_id) + ': Heartbeat received')
def handle_perf_stats(self):
if len(self.msg_split) == 3:
cpu = self.msg_split[1]
mem = self.msg_split[2]
server_log.info(str(self.client_id) + ': Performance stats received. CPU: {} Mem: {}'.format(cpu, mem))
else:
server_log.info(str(self.client_id) + ': Invalid performance stats received')
return False
self.num_stat_reports += 1
self.cpu_total += float(cpu)
self.mem_total += float(mem)
self.cpu_avg = self.cpu_total / self.num_stat_reports
self.mem_avg = self.mem_total / self.num_stat_reports
return True
def handle_file_stats(self):
if len(self.msg_split) == 3:
self.chunk_size = int(self.msg_split[1])
self.file_size = int(self.msg_split[2])
server_log.info(str(self.client_id) + | return False | conditional_block |
server.py |
client upon the client's request.
The server logs client connections and messages to the console and to a file saved
to ./server_logs named 'server_log_<date&time>'. Once all clients have finished
running, the server writes a report displaying statistics for each client including
how long they ran, file write information, performance stats, and status into the
log file. If clients drop out before finishing that is logged.
Example usage of this class is shown in the "if __name__ == '__main__':" block at
the end of this file.
"""
class Server(asyncore.dispatcher):
"""Server class that logs performance data from multiple, concurrent test clients.
Args:
host (str): address where test server will run.
port (int): network port the server will run on.
"""
def __init__(self, host, port):
asyncore.dispatcher.__init__(self)
self.host = host
self.port = port
self.client_id = config["first_client_id"]
self.client_list = {}
self.start_time = ''
self.end_time = ''
self.init_log_file()
self.init_server_socket()
def init_log_file(self):
"""Initializes the server's log file for client data."""
try:
os.makedirs(config["server_log_path"])
except OSError:
if not os.path.isdir(config["server_log_path"]):
raise
server_log_file = logging.FileHandler(
config["server_log_path"] + 'server_log_' + time.strftime('%Y-%m-%d_%H.%M.%S') + '.txt')
server_log_file.setLevel(logging.DEBUG)
server_log_file.setFormatter(file_formatter)
server_log.addHandler(server_log_file)
def init_server_socket(self):
"""Create, bind, and configure socket for server."""
server_log.info('Initializing server on {}:{}'.format(self.host, self.port))
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((self.host, self.port))
self.listen(5)
server_log.info('Initialization complete!')
def start_server(self):
self.start_time = time.strftime('%Y-%m-%d_%H:%M:%S')
self.run_loop()
def handle_accept(self):
"""Handles a client connection - Creates a ClientHandler instance for it.
The ClientHandler is stored according to client_id in the client_list dictionary."""
pair = self.accept()
if pair is not None:
sock, addr = pair
server_log.info('Client connection from {}, assigning client id {}'.format(repr(addr), self.client_id))
handler = ClientHandler(sock, addr, self.client_id)
self.client_list.update({self.client_id: handler})
self.client_id += 1
def handle_close(self):
server_log.info('Server shutting down...')
self.close()
def run_loop(self):
"""Run asyncore.loop until all clients are closed"""
server_log.info('Server now accepting client connections.')
while not self.clients_done():
asyncore.loop(timeout=config["server_timeout"], count=config["server_loop_count"])
def clients_done(self):
"""Returns True if all clients have completed their tests and at least one client has connected."""
if not self.client_list:
return False
elif len(asyncore.socket_map) > 1:
return False
else:
return True
def write_report(self):
"""Writes out a report that displays data for all clients that ran."""
self.end_time = time.strftime('%Y-%m-%d_%H:%M:%S')
server_log.info('')
server_log.info('=========================================================')
server_log.info('All test clients completed!')
server_log.info(' Start time: {}'.format(self.start_time))
server_log.info(' End time: {}'.format(self.end_time))
server_log.info('')
server_log.info('Total of {} client(s) ran. Data for each client:'.format(len(self.client_list)))
for client in self.client_list.values():
server_log.info('---------------------------------------------------------')
server_log.info(' Client {}'.format(client.client_id))
server_log.info(' Test status: {}'.format(client.status))
server_log.info(' Time ran: {:.2f} sec'.format(client.time_ran))
server_log.info(' Avg CPU usage: {:.2f}%'.format(client.cpu_avg))
server_log.info(' Avg MEM usage: {:.2f}%'.format(client.mem_avg))
server_log.info(' Files written: {}'.format(client.files_written))
server_log.info(' File size: {}'.format(client.file_size))
server_log.info(' Chunk size: {}'.format(client.chunk_size))
server_log.info('=========================================================')
server_log.info('')
class ClientHandler(asynchat.async_chat):
"""Class instantiated to keep track of each client that connects to the server.
Args:
sock (int): socket on which the client is connected.
addr (int): address on which the client is connected.
id (int): unique identifier for client.
"""
def __init__(self, sock, addr, client_id):
asynchat.async_chat.__init__(self, sock=sock)
self.addr = addr
self.client_id = client_id
self.set_terminator(client_api["terminator"])
self.start_time = 0
self.end_time = 0
self.time_ran = 0
self.num_stat_reports = 0
self.cpu_avg = 0
self.mem_avg = 0
self.cpu_total = 0
self.mem_total = 0
self.chunk_size = 0
self.file_size = 0
self.files_written = 0
self.status = 'NOT STARTED'
self.msg_buffer = []
self.msg = ''
self.msg_split = []
self.msg_handler = { client_api["get_client_id"]: self.handle_get_client_id,
client_api["ready"]: self.handle_ready,
client_api["start"]: self.handle_start,
client_api["done"]: self.handle_done,
client_api["heartbeat"]: self.handle_heartbeat,
client_api["send_perf_stats"]: self.handle_perf_stats,
client_api["send_file_stats"]: self.handle_file_stats,
client_api["file_rollover"]: self.handle_file_rollover, }
def collect_incoming_data(self, data):
"""Buffer incoming message"""
self.msg_buffer.append(data)
def found_terminator(self):
"""Processes the incoming message by looking up the handler in the message dictionary."""
self.msg = ''.join(self.msg_buffer)
self.msg_split = self.msg.split(client_api["delimiter"])
cmd = self.msg_split[0]
try:
self.msg_handler[cmd]()
except KeyError as e:
server_log.info('Unhandled command received from client id {}: {}'.format(self.client_id, cmd))
except Exception as e:
server_log.info('Exception raised in server when receiving message from client: {!r}'.format(e))
raise e
finally:
self.msg_buffer = []
self.msg = ''
self.msg_split = []
def handle_close(self):
"""Sets test status and closes connection."""
self.end_time = time.time()
self.time_ran = self.end_time - self.start_time
if self.status != 'PASS':
server_log.info('Client {} aborted!'.format(self.client_id))
self.status = 'ABORTED'
self.close()
## MESSAGE HANDLERS:
def handle_get_client_id(self):
server_log.info(str(self.client_id) + ': Sending client id')
self.push(client_api["set_client_id"] + client_api["delimiter"] + str(self.client_id) + client_api["terminator"])
def handle_ready(self):
server_log.info(str(self.client_id) + ': Client ready, sending test request')
self.push(client_api["run_tests"] + client_api["terminator"])
def handle_start(self):
server_log.info(str(self.client_id) + ': Client started running tests')
self.status = 'RUNNING'
self.start_time = time.time()
def handle_done(self):
server_log.info(str(self.client_id) + ': Client finished running tests')
self.status = 'PASS'
self.handle_close()
def handle_heartbeat(self):
server_log.info(str(self.client_id) + ': Heartbeat received')
def handle_perf_stats(self):
if len(self.msg_split) == 3:
cpu = self.msg_split[1]
mem = self.msg_split[2]
server_log.info(str(self.client_id) + ': Performance stats received. CPU: {} Mem: {}'.format(cpu, mem))
else:
server_log.info(str(self.client_id) + ': Invalid performance stats received')
return False
self.num_stat_reports += 1
self.cpu_total += float(cpu)
self.mem_total += float(mem)
self.cpu_avg = self.cpu_total / self.num_stat_reports
self.mem_avg = self.mem_total / self.num_stat_reports
return True
def handle_file_stats(self):
if len(self.msg_split) == 3:
self.chunk_size = int(self.msg_split[1])
self.file_size = int(self.msg_split[2])
server_log.info(str(self.client_id) + ': File stats received. \
Chunk size: {} File size: {}'.format(self.chunk_size, self.file_size))
return True
else:
server_log.info(str(self.client_id) + ': Invalid file stats received')
return False
def | handle_file_rollover | identifier_name |
|
server.py | test results sent by connected clients. A ClientHandler object is created
for each new client that connects. Clients communicate with the server using a
string-based messaging protocol that is defined in client_api.py. Clients are also
tracked using sequential client ids that are assigned upon connection and sent to the
client upon the client's request.
The server logs client connections and messages to the console and to a file saved
to ./server_logs named 'server_log_<date&time>'. Once all clients have finished
running, the server writes a report displaying statistics for each client including
how long they ran, file write information, performance stats, and status into the
log file. If clients drop out before finishing that is logged.
Example usage of this class is shown in the "if __name__ == '__main__':" block at
the end of this file.
"""
class Server(asyncore.dispatcher):
"""Server class that logs performance data from multiple, concurrent test clients.
Args:
host (str): address where test server will run.
port (int): network port the server will run on.
"""
def __init__(self, host, port):
asyncore.dispatcher.__init__(self)
self.host = host
self.port = port
self.client_id = config["first_client_id"]
self.client_list = {}
self.start_time = ''
self.end_time = ''
self.init_log_file()
self.init_server_socket()
def init_log_file(self):
"""Initializes the server's log file for client data."""
try:
os.makedirs(config["server_log_path"])
except OSError:
if not os.path.isdir(config["server_log_path"]):
raise
server_log_file = logging.FileHandler(
config["server_log_path"] + 'server_log_' + time.strftime('%Y-%m-%d_%H.%M.%S') + '.txt')
server_log_file.setLevel(logging.DEBUG)
server_log_file.setFormatter(file_formatter)
server_log.addHandler(server_log_file)
def init_server_socket(self):
"""Create, bind, and configure socket for server."""
server_log.info('Initializing server on {}:{}'.format(self.host, self.port))
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((self.host, self.port))
self.listen(5)
server_log.info('Initialization complete!')
def start_server(self):
self.start_time = time.strftime('%Y-%m-%d_%H:%M:%S')
self.run_loop()
def handle_accept(self):
"""Handles a client connection - Creates a ClientHandler instance for it.
The ClientHandler is stored according to client_id in the client_list dictionary."""
pair = self.accept()
if pair is not None:
sock, addr = pair
server_log.info('Client connection from {}, assigning client id {}'.format(repr(addr), self.client_id))
handler = ClientHandler(sock, addr, self.client_id)
self.client_list.update({self.client_id: handler})
self.client_id += 1
def handle_close(self):
server_log.info('Server shutting down...')
self.close()
def run_loop(self):
"""Run asyncore.loop until all clients are closed"""
server_log.info('Server now accepting client connections.')
while not self.clients_done():
asyncore.loop(timeout=config["server_timeout"], count=config["server_loop_count"])
def clients_done(self):
"""Returns True if all clients have completed their tests and at least one client has connected."""
if not self.client_list:
return False
elif len(asyncore.socket_map) > 1:
return False
else:
return True
def write_report(self):
"""Writes out a report that displays data for all clients that ran."""
self.end_time = time.strftime('%Y-%m-%d_%H:%M:%S')
server_log.info('')
server_log.info('=========================================================')
server_log.info('All test clients completed!')
server_log.info(' Start time: {}'.format(self.start_time))
server_log.info(' End time: {}'.format(self.end_time))
server_log.info('')
server_log.info('Total of {} client(s) ran. Data for each client:'.format(len(self.client_list)))
for client in self.client_list.values():
server_log.info('---------------------------------------------------------')
server_log.info(' Client {}'.format(client.client_id))
server_log.info(' Test status: {}'.format(client.status))
server_log.info(' Time ran: {:.2f} sec'.format(client.time_ran))
server_log.info(' Avg CPU usage: {:.2f}%'.format(client.cpu_avg))
server_log.info(' Avg MEM usage: {:.2f}%'.format(client.mem_avg))
server_log.info(' Files written: {}'.format(client.files_written))
server_log.info(' File size: {}'.format(client.file_size))
server_log.info(' Chunk size: {}'.format(client.chunk_size))
server_log.info('=========================================================')
server_log.info('')
class ClientHandler(asynchat.async_chat):
"""Class instantiated to keep track of each client that connects to the server.
Args:
sock (int): socket on which the client is connected.
addr (int): address on which the client is connected.
id (int): unique identifier for client.
"""
def __init__(self, sock, addr, client_id):
asynchat.async_chat.__init__(self, sock=sock)
self.addr = addr
self.client_id = client_id
self.set_terminator(client_api["terminator"])
self.start_time = 0
self.end_time = 0
self.time_ran = 0
self.num_stat_reports = 0
self.cpu_avg = 0
self.mem_avg = 0
self.cpu_total = 0
self.mem_total = 0
self.chunk_size = 0
self.file_size = 0
self.files_written = 0
self.status = 'NOT STARTED'
self.msg_buffer = []
self.msg = ''
self.msg_split = []
self.msg_handler = { client_api["get_client_id"]: self.handle_get_client_id,
client_api["ready"]: self.handle_ready,
client_api["start"]: self.handle_start,
client_api["done"]: self.handle_done,
client_api["heartbeat"]: self.handle_heartbeat,
client_api["send_perf_stats"]: self.handle_perf_stats,
client_api["send_file_stats"]: self.handle_file_stats,
client_api["file_rollover"]: self.handle_file_rollover, }
def collect_incoming_data(self, data):
"""Buffer incoming message"""
self.msg_buffer.append(data)
def found_terminator(self):
"""Processes the incoming message by looking up the handler in the message dictionary."""
| except KeyError as e:
server_log.info('Unhandled command received from client id {}: {}'.format(self.client_id, cmd))
except Exception as e:
server_log.info('Exception raised in server when receiving message from client: {!r}'.format(e))
raise e
finally:
self.msg_buffer = []
self.msg = ''
self.msg_split = []
def handle_close(self):
"""Sets test status and closes connection."""
self.end_time = time.time()
self.time_ran = self.end_time - self.start_time
if self.status != 'PASS':
server_log.info('Client {} aborted!'.format(self.client_id))
self.status = 'ABORTED'
self.close()
## MESSAGE HANDLERS:
def handle_get_client_id(self):
server_log.info(str(self.client_id) + ': Sending client id')
self.push(client_api["set_client_id"] + client_api["delimiter"] + str(self.client_id) + client_api["terminator"])
def handle_ready(self):
server_log.info(str(self.client_id) + ': Client ready, sending test request')
self.push(client_api["run_tests"] + client_api["terminator"])
def handle_start(self):
server_log.info(str(self.client_id) + ': Client started running tests')
self.status = 'RUNNING'
self.start_time = time.time()
def handle_done(self):
server_log.info(str(self.client_id) + ': Client finished running tests')
self.status = 'PASS'
self.handle_close()
def handle_heartbeat(self):
server_log.info(str(self.client_id) + ': Heartbeat received')
def handle_perf_stats(self):
if len(self.msg_split) == 3:
cpu = self.msg_split[1]
mem = self.msg_split[2]
server_log.info(str(self.client_id) + ': Performance stats received. CPU: {} Mem: {}'.format(cpu, mem))
else:
server_log.info(str(self.client_id) + ': Invalid performance stats received')
return False
self.num_stat_reports += 1
self.cpu_total += float(cpu)
self.mem_total += float(mem)
self.cpu_avg = self.cpu_total / self.num_stat_reports
self.mem_avg = self.mem_total / self.num_stat_reports
return True
def handle_file_stats(self):
if len(self.msg_split) == 3:
self.chunk_size = int(self.msg_split[1])
self.file_size = int(self.msg_split[2])
server_log.info(str(self.client_id) + | self.msg = ''.join(self.msg_buffer)
self.msg_split = self.msg.split(client_api["delimiter"])
cmd = self.msg_split[0]
try:
self.msg_handler[cmd]()
| random_line_split |
model_new2_plot.py | # ##############
dt = 10
time_axis = range(0,t_final+dt,dt)
time_axis = np.array(time_axis)
def retrieve_result( filename_lin, filename_final ):
fdata = h5py.File( filename_lin, "r" )
n_f = fdata[ "n_f" ].value
n_t = fdata[ "n_t" ].value
n_u = fdata[ "n_u" ].value
n_p = fdata[ "n_p" ].value
num_t = fdata[ "num_t" ].value
num_u = fdata[ "num_u" ].value
num_p = fdata[ "num_p" ].value
n_e1 = fdata[ "n_e1" ].value
n_e2 = fdata[ "n_e2" ].value
n_e3 = fdata[ "n_e3" ].value
t_range = fdata[ "t_range" ].value
v_range = fdata[ "v_range" ].value
p_range = fdata[ "p_range" ].value
vbc_point = fdata[ "vbc_point" ].value
vbc_point2 = fdata[ "vbc_point2" ].value
vbc2_point = fdata[ "vbc2_point" ].value
vbc2_point2 = fdata[ "vbc2_point2" ].value
tq_point = fdata[ "tq_point" ].value
tq_point2 = fdata[ "tq_point2" ].value
tq_point3 = fdata[ "tq_point3" ].value
# ipdb.set_trace()
final_array = np.load( filename_final )
return ( n_f, n_t, n_u, n_p,
num_t, num_u, num_p,
n_e1, n_e2, n_e3,
t_range, v_range, p_range,
vbc_point, vbc_point2, vbc2_point, vbc2_point2,
tq_point, tq_point2, tq_point3,
final_array )
( n_f, n_t, n_u, n_p,
num_t, num_u, num_p,
n_e1, n_e2, n_e3,
t_range, v_range, p_range,
vbc_point, vbc_point2, vbc2_point, vbc2_point2,
tq_point, tq_point2, tq_point3,
final_array ) = retrieve_result( "model_new2_lin.data",
(drt + "/results1.npy") )
final_array2 = np.load( (drt2 + "/results1.npy") )
# #############
# n_f = n_f/3 # for MPC only
# #############
num_lp = 1
n_total = n_f*( num_t+1+1 ) + num_u + num_p + ( 1 + 1 )*2
n_constraint = n_f*n_e1 + n_e2 + n_e3
tidx = np.arange( 0, n_f*num_t ).reshape( ( n_f, num_t ) ) # temperature indx
uidx = ( tidx.size +
np.arange( 0, num_u ) ) # velocity indx
pidx = ( tidx.size + uidx.size +
np.arange( 0, num_p ) ) # pressure indx
vidx = ( tidx.size + uidx.size + pidx.size +
np.arange( 0, n_f ) ) # heater control, indx
vuidx = ( tidx.size + uidx.size + pidx.size + vidx.size +
np.arange( 0, 1 ) ) # velocity control 1, indx
vu2idx = ( tidx.size + uidx.size + pidx.size + vidx.size + vuidx.size +
np.arange( 0, 1 ) ) # velocity control 2, indx
v2idx = ( tidx.size + uidx.size + pidx.size +
vidx.size + vuidx.size + vu2idx.size +
np.arange( 0, n_f ) ) # heater control, indx
v2uidx = ( tidx.size + uidx.size + pidx.size +
vidx.size + vuidx.size + vu2idx.size + v2idx.size +
np.arange(0,1) ) # velocity control 1 of N2, indx
v2u2idx = ( tidx.size + uidx.size + pidx.size +
vidx.size + vuidx.size + vu2idx.size +
v2idx.size + v2uidx.size +
np.arange(0,1) ) # velocity control 2 of N2, indx
e1idx = np.arange( 0, n_f*n_e1 ).reshape( ( n_f, n_e1 ) )
e2idx = ( e1idx.size +
np.arange( 0, n_e2 ) )
e3idx = ( e1idx.size + e2idx.size +
np.arange( 0, n_e3 ) )
tqidx = [] # index for target area
for i in tq_point:
tqidx.append( t_range.tolist().index(i) )
tqidx = np.array( tqidx )
tq2idx = [] # indx for target area 2
for i in tq_point2:
tq2idx.append( t_range.tolist().index(i) )
tq2idx = np.array( tq2idx )
tq3idx = [] # indx for target area 3
for i in tq_point3:
tq3idx.append( t_range.tolist().index(i) )
tq3idx = np.array( tq3idx )
finalT = np.zeros( (n_f+1,n_t) )
for i in range(1,n_f+1):
finalT[ i,t_range ] = final_array[tidx[i-1,:]]
finalU = np.zeros( (n_u,) )
finalU[v_range] = final_array[uidx]
finalU[vbc_point] = final_array[vuidx]
finalU[vbc_point2] = final_array[vu2idx]
finalU[vbc2_point] = final_array[v2uidx]
finalU[vbc2_point2] = final_array[v2u2idx]
finalP = np.zeros( (n_p,) )
finalP[p_range] = final_array[pidx]
# finalV = np.zeros( (n_f+1,) )
finalV = 1000.0*final_array[vidx]
finalV2 = 1000.0*final_array[v2idx]
final2V = 1000.0*final_array2[vidx]
final2V2 = 1000.0*final_array2[v2idx]
finalVU = final_array[vuidx]
finalVU2 = final_array[vu2idx]
finalV2U = final_array[v2uidx]
finalV2U2 = final_array[v2u2idx]
eng_p = finalP.max()
eng_f1 = eng_p * 2.0/0.1 * t_final**2 * (finalVU**2 + finalVU2**2)**0.5
eng_h1 = np.sum(finalV) * dt
eng_f2 = eng_p * 2.0/0.1 * t_final**2 * (finalV2U**2 + finalV2U2**2)**0.5
eng_h2 = np.sum(finalV2) * dt
# tem = np.mean( final_array[ tidx[ 0:n_f/3, tqidx ] ] ) + np.mean( final_array[ tidx[ n_f/3:2*n_f/3, tq2idx ] ] ) + np.mean( final_array[ tidx[ 2*n_f/3:, tq3idx ] ] )
# tem = tem/3
# import ipdb; ipdb.set_trace()
# plot controls for the two cases
'''
plt.figure()
heat1_moving = np.zeros( (n_f+1,) )
heat1_moving[1:] = finalV
heat1_moving[0] = finalV[0]
heat2_moving = np.zeros( (n_f+1,) )
heat2_moving[1:] = finalV2
heat2_moving[0] = finalV2[0]
heat1_whole = np.zeros( (n_f+1,) )
heat1_whole[1:] = final2V
heat1_whole[0] = final2V[0]
heat2_whole = np.zeros( (n_f+1,) )
heat2_whole[1:] = final2V2
heat2_whole[0] = final2V2[0]
plt.rcParams['ps.useafm'] = True
plt.rcParams['pdf.use14corefonts'] = True
plt.rcParams['text.usetex'] = True
line1, = plt.step(time_axis,heat1_moving, color='b')
line2, = plt.step(time_axis,heat2_moving,color='b',linestyle="--")
line3, = plt.step(time_axis,heat1_whole,color='r')
line4, = plt.step(time_axis,heat2 | T = FunctionSpace(mesh, "CG", 1)
t_final = 300
# ##############
# t_final = t_final/3 # only works for MPC | random_line_split |
|
model_new2_plot.py | ( filename_lin, filename_final ):
fdata = h5py.File( filename_lin, "r" )
n_f = fdata[ "n_f" ].value
n_t = fdata[ "n_t" ].value
n_u = fdata[ "n_u" ].value
n_p = fdata[ "n_p" ].value
num_t = fdata[ "num_t" ].value
num_u = fdata[ "num_u" ].value
num_p = fdata[ "num_p" ].value
n_e1 = fdata[ "n_e1" ].value
n_e2 = fdata[ "n_e2" ].value
n_e3 = fdata[ "n_e3" ].value
t_range = fdata[ "t_range" ].value
v_range = fdata[ "v_range" ].value
p_range = fdata[ "p_range" ].value
vbc_point = fdata[ "vbc_point" ].value
vbc_point2 = fdata[ "vbc_point2" ].value
vbc2_point = fdata[ "vbc2_point" ].value
vbc2_point2 = fdata[ "vbc2_point2" ].value
tq_point = fdata[ "tq_point" ].value
tq_point2 = fdata[ "tq_point2" ].value
tq_point3 = fdata[ "tq_point3" ].value
# ipdb.set_trace()
final_array = np.load( filename_final )
return ( n_f, n_t, n_u, n_p,
num_t, num_u, num_p,
n_e1, n_e2, n_e3,
t_range, v_range, p_range,
vbc_point, vbc_point2, vbc2_point, vbc2_point2,
tq_point, tq_point2, tq_point3,
final_array )
( n_f, n_t, n_u, n_p,
num_t, num_u, num_p,
n_e1, n_e2, n_e3,
t_range, v_range, p_range,
vbc_point, vbc_point2, vbc2_point, vbc2_point2,
tq_point, tq_point2, tq_point3,
final_array ) = retrieve_result( "model_new2_lin.data",
(drt + "/results1.npy") )
final_array2 = np.load( (drt2 + "/results1.npy") )
# #############
# n_f = n_f/3 # for MPC only
# #############
num_lp = 1
n_total = n_f*( num_t+1+1 ) + num_u + num_p + ( 1 + 1 )*2
n_constraint = n_f*n_e1 + n_e2 + n_e3
tidx = np.arange( 0, n_f*num_t ).reshape( ( n_f, num_t ) ) # temperature indx
uidx = ( tidx.size +
np.arange( 0, num_u ) ) # velocity indx
pidx = ( tidx.size + uidx.size +
np.arange( 0, num_p ) ) # pressure indx
vidx = ( tidx.size + uidx.size + pidx.size +
np.arange( 0, n_f ) ) # heater control, indx
vuidx = ( tidx.size + uidx.size + pidx.size + vidx.size +
np.arange( 0, 1 ) ) # velocity control 1, indx
vu2idx = ( tidx.size + uidx.size + pidx.size + vidx.size + vuidx.size +
np.arange( 0, 1 ) ) # velocity control 2, indx
v2idx = ( tidx.size + uidx.size + pidx.size +
vidx.size + vuidx.size + vu2idx.size +
np.arange( 0, n_f ) ) # heater control, indx
v2uidx = ( tidx.size + uidx.size + pidx.size +
vidx.size + vuidx.size + vu2idx.size + v2idx.size +
np.arange(0,1) ) # velocity control 1 of N2, indx
v2u2idx = ( tidx.size + uidx.size + pidx.size +
vidx.size + vuidx.size + vu2idx.size +
v2idx.size + v2uidx.size +
np.arange(0,1) ) # velocity control 2 of N2, indx
e1idx = np.arange( 0, n_f*n_e1 ).reshape( ( n_f, n_e1 ) )
e2idx = ( e1idx.size +
np.arange( 0, n_e2 ) )
e3idx = ( e1idx.size + e2idx.size +
np.arange( 0, n_e3 ) )
tqidx = [] # index for target area
for i in tq_point:
tqidx.append( t_range.tolist().index(i) )
tqidx = np.array( tqidx )
tq2idx = [] # indx for target area 2
for i in tq_point2:
tq2idx.append( t_range.tolist().index(i) )
tq2idx = np.array( tq2idx )
tq3idx = [] # indx for target area 3
for i in tq_point3:
tq3idx.append( t_range.tolist().index(i) )
tq3idx = np.array( tq3idx )
finalT = np.zeros( (n_f+1,n_t) )
for i in range(1,n_f+1):
finalT[ i,t_range ] = final_array[tidx[i-1,:]]
finalU = np.zeros( (n_u,) )
finalU[v_range] = final_array[uidx]
finalU[vbc_point] = final_array[vuidx]
finalU[vbc_point2] = final_array[vu2idx]
finalU[vbc2_point] = final_array[v2uidx]
finalU[vbc2_point2] = final_array[v2u2idx]
finalP = np.zeros( (n_p,) )
finalP[p_range] = final_array[pidx]
# finalV = np.zeros( (n_f+1,) )
finalV = 1000.0*final_array[vidx]
finalV2 = 1000.0*final_array[v2idx]
final2V = 1000.0*final_array2[vidx]
final2V2 = 1000.0*final_array2[v2idx]
finalVU = final_array[vuidx]
finalVU2 = final_array[vu2idx]
finalV2U = final_array[v2uidx]
finalV2U2 = final_array[v2u2idx]
eng_p = finalP.max()
eng_f1 = eng_p * 2.0/0.1 * t_final**2 * (finalVU**2 + finalVU2**2)**0.5
eng_h1 = np.sum(finalV) * dt
eng_f2 = eng_p * 2.0/0.1 * t_final**2 * (finalV2U**2 + finalV2U2**2)**0.5
eng_h2 = np.sum(finalV2) * dt
# tem = np.mean( final_array[ tidx[ 0:n_f/3, tqidx ] ] ) + np.mean( final_array[ tidx[ n_f/3:2*n_f/3, tq2idx ] ] ) + np.mean( final_array[ tidx[ 2*n_f/3:, tq3idx ] ] )
# tem = tem/3
# import ipdb; ipdb.set_trace()
# plot controls for the two cases
'''
plt.figure()
heat1_moving = np.zeros( (n_f+1,) )
heat1_moving[1:] = finalV
heat1_moving[0] = finalV[0]
heat2_moving = np.zeros( (n_f+1,) )
heat2_moving[1:] = finalV2
heat2_moving[0] = finalV2[0]
heat1_whole = np.zeros( (n_f+1,) )
heat1_whole[1:] = final2V
heat1_whole[0] = final2V[0]
heat2_whole = np.zeros( (n_f+1,) )
heat2_whole[1:] = final2V2
heat2_whole[0] = final2V2[0]
plt.rcParams['ps.useafm'] = True
plt.rcParams['pdf.use14corefonts'] = True
plt.rcParams['text.usetex'] = True
line1, = plt.step(time_axis,heat1_moving, color='b')
line2, = plt.step(time_axis,heat2_moving,color='b',linestyle="--")
line3, = plt.step(time_axis,heat1_whole,color='r')
line4, = plt.step(time_axis,heat2_whole,color='r',linestyle='--')
plt.xlabel('Time (s)')
plt.ylim(0.0,300)
plt.grid()
plt.savefig((drt + '/linear_heat.pdf'), dpi=1000, format='pdf')
plt.close()
# import ipdb; ipdb.set_trace()
| retrieve_result | identifier_name |
|
model_new2_plot.py | tq_point3 = fdata[ "tq_point3" ].value
# ipdb.set_trace()
final_array = np.load( filename_final )
return ( n_f, n_t, n_u, n_p,
num_t, num_u, num_p,
n_e1, n_e2, n_e3,
t_range, v_range, p_range,
vbc_point, vbc_point2, vbc2_point, vbc2_point2,
tq_point, tq_point2, tq_point3,
final_array )
( n_f, n_t, n_u, n_p,
num_t, num_u, num_p,
n_e1, n_e2, n_e3,
t_range, v_range, p_range,
vbc_point, vbc_point2, vbc2_point, vbc2_point2,
tq_point, tq_point2, tq_point3,
final_array ) = retrieve_result( "model_new2_lin.data",
(drt + "/results1.npy") )
final_array2 = np.load( (drt2 + "/results1.npy") )
# #############
# n_f = n_f/3 # for MPC only
# #############
num_lp = 1
n_total = n_f*( num_t+1+1 ) + num_u + num_p + ( 1 + 1 )*2
n_constraint = n_f*n_e1 + n_e2 + n_e3
tidx = np.arange( 0, n_f*num_t ).reshape( ( n_f, num_t ) ) # temperature indx
uidx = ( tidx.size +
np.arange( 0, num_u ) ) # velocity indx
pidx = ( tidx.size + uidx.size +
np.arange( 0, num_p ) ) # pressure indx
vidx = ( tidx.size + uidx.size + pidx.size +
np.arange( 0, n_f ) ) # heater control, indx
vuidx = ( tidx.size + uidx.size + pidx.size + vidx.size +
np.arange( 0, 1 ) ) # velocity control 1, indx
vu2idx = ( tidx.size + uidx.size + pidx.size + vidx.size + vuidx.size +
np.arange( 0, 1 ) ) # velocity control 2, indx
v2idx = ( tidx.size + uidx.size + pidx.size +
vidx.size + vuidx.size + vu2idx.size +
np.arange( 0, n_f ) ) # heater control, indx
v2uidx = ( tidx.size + uidx.size + pidx.size +
vidx.size + vuidx.size + vu2idx.size + v2idx.size +
np.arange(0,1) ) # velocity control 1 of N2, indx
v2u2idx = ( tidx.size + uidx.size + pidx.size +
vidx.size + vuidx.size + vu2idx.size +
v2idx.size + v2uidx.size +
np.arange(0,1) ) # velocity control 2 of N2, indx
e1idx = np.arange( 0, n_f*n_e1 ).reshape( ( n_f, n_e1 ) )
e2idx = ( e1idx.size +
np.arange( 0, n_e2 ) )
e3idx = ( e1idx.size + e2idx.size +
np.arange( 0, n_e3 ) )
tqidx = [] # index for target area
for i in tq_point:
tqidx.append( t_range.tolist().index(i) )
tqidx = np.array( tqidx )
tq2idx = [] # indx for target area 2
for i in tq_point2:
tq2idx.append( t_range.tolist().index(i) )
tq2idx = np.array( tq2idx )
tq3idx = [] # indx for target area 3
for i in tq_point3:
tq3idx.append( t_range.tolist().index(i) )
tq3idx = np.array( tq3idx )
finalT = np.zeros( (n_f+1,n_t) )
for i in range(1,n_f+1):
finalT[ i,t_range ] = final_array[tidx[i-1,:]]
finalU = np.zeros( (n_u,) )
finalU[v_range] = final_array[uidx]
finalU[vbc_point] = final_array[vuidx]
finalU[vbc_point2] = final_array[vu2idx]
finalU[vbc2_point] = final_array[v2uidx]
finalU[vbc2_point2] = final_array[v2u2idx]
finalP = np.zeros( (n_p,) )
finalP[p_range] = final_array[pidx]
# finalV = np.zeros( (n_f+1,) )
finalV = 1000.0*final_array[vidx]
finalV2 = 1000.0*final_array[v2idx]
final2V = 1000.0*final_array2[vidx]
final2V2 = 1000.0*final_array2[v2idx]
finalVU = final_array[vuidx]
finalVU2 = final_array[vu2idx]
finalV2U = final_array[v2uidx]
finalV2U2 = final_array[v2u2idx]
eng_p = finalP.max()
eng_f1 = eng_p * 2.0/0.1 * t_final**2 * (finalVU**2 + finalVU2**2)**0.5
eng_h1 = np.sum(finalV) * dt
eng_f2 = eng_p * 2.0/0.1 * t_final**2 * (finalV2U**2 + finalV2U2**2)**0.5
eng_h2 = np.sum(finalV2) * dt
# tem = np.mean( final_array[ tidx[ 0:n_f/3, tqidx ] ] ) + np.mean( final_array[ tidx[ n_f/3:2*n_f/3, tq2idx ] ] ) + np.mean( final_array[ tidx[ 2*n_f/3:, tq3idx ] ] )
# tem = tem/3
# import ipdb; ipdb.set_trace()
# plot controls for the two cases
'''
plt.figure()
heat1_moving = np.zeros( (n_f+1,) )
heat1_moving[1:] = finalV
heat1_moving[0] = finalV[0]
heat2_moving = np.zeros( (n_f+1,) )
heat2_moving[1:] = finalV2
heat2_moving[0] = finalV2[0]
heat1_whole = np.zeros( (n_f+1,) )
heat1_whole[1:] = final2V
heat1_whole[0] = final2V[0]
heat2_whole = np.zeros( (n_f+1,) )
heat2_whole[1:] = final2V2
heat2_whole[0] = final2V2[0]
plt.rcParams['ps.useafm'] = True
plt.rcParams['pdf.use14corefonts'] = True
plt.rcParams['text.usetex'] = True
line1, = plt.step(time_axis,heat1_moving, color='b')
line2, = plt.step(time_axis,heat2_moving,color='b',linestyle="--")
line3, = plt.step(time_axis,heat1_whole,color='r')
line4, = plt.step(time_axis,heat2_whole,color='r',linestyle='--')
plt.xlabel('Time (s)')
plt.ylim(0.0,300)
plt.grid()
plt.savefig((drt + '/linear_heat.pdf'), dpi=1000, format='pdf')
plt.close()
# import ipdb; ipdb.set_trace()
'''
# plot velocity in matplot
plt.figure | fdata = h5py.File( filename_lin, "r" )
n_f = fdata[ "n_f" ].value
n_t = fdata[ "n_t" ].value
n_u = fdata[ "n_u" ].value
n_p = fdata[ "n_p" ].value
num_t = fdata[ "num_t" ].value
num_u = fdata[ "num_u" ].value
num_p = fdata[ "num_p" ].value
n_e1 = fdata[ "n_e1" ].value
n_e2 = fdata[ "n_e2" ].value
n_e3 = fdata[ "n_e3" ].value
t_range = fdata[ "t_range" ].value
v_range = fdata[ "v_range" ].value
p_range = fdata[ "p_range" ].value
vbc_point = fdata[ "vbc_point" ].value
vbc_point2 = fdata[ "vbc_point2" ].value
vbc2_point = fdata[ "vbc2_point" ].value
vbc2_point2 = fdata[ "vbc2_point2" ].value
tq_point = fdata[ "tq_point" ].value
tq_point2 = fdata[ "tq_point2" ].value | identifier_body |
|
model_new2_plot.py | 2_lin.data",
(drt + "/results1.npy") )
final_array2 = np.load( (drt2 + "/results1.npy") )
# #############
# n_f = n_f/3 # for MPC only
# #############
num_lp = 1
n_total = n_f*( num_t+1+1 ) + num_u + num_p + ( 1 + 1 )*2
n_constraint = n_f*n_e1 + n_e2 + n_e3
tidx = np.arange( 0, n_f*num_t ).reshape( ( n_f, num_t ) ) # temperature indx
uidx = ( tidx.size +
np.arange( 0, num_u ) ) # velocity indx
pidx = ( tidx.size + uidx.size +
np.arange( 0, num_p ) ) # pressure indx
vidx = ( tidx.size + uidx.size + pidx.size +
np.arange( 0, n_f ) ) # heater control, indx
vuidx = ( tidx.size + uidx.size + pidx.size + vidx.size +
np.arange( 0, 1 ) ) # velocity control 1, indx
vu2idx = ( tidx.size + uidx.size + pidx.size + vidx.size + vuidx.size +
np.arange( 0, 1 ) ) # velocity control 2, indx
v2idx = ( tidx.size + uidx.size + pidx.size +
vidx.size + vuidx.size + vu2idx.size +
np.arange( 0, n_f ) ) # heater control, indx
v2uidx = ( tidx.size + uidx.size + pidx.size +
vidx.size + vuidx.size + vu2idx.size + v2idx.size +
np.arange(0,1) ) # velocity control 1 of N2, indx
v2u2idx = ( tidx.size + uidx.size + pidx.size +
vidx.size + vuidx.size + vu2idx.size +
v2idx.size + v2uidx.size +
np.arange(0,1) ) # velocity control 2 of N2, indx
e1idx = np.arange( 0, n_f*n_e1 ).reshape( ( n_f, n_e1 ) )
e2idx = ( e1idx.size +
np.arange( 0, n_e2 ) )
e3idx = ( e1idx.size + e2idx.size +
np.arange( 0, n_e3 ) )
tqidx = [] # index for target area
for i in tq_point:
tqidx.append( t_range.tolist().index(i) )
tqidx = np.array( tqidx )
tq2idx = [] # indx for target area 2
for i in tq_point2:
tq2idx.append( t_range.tolist().index(i) )
tq2idx = np.array( tq2idx )
tq3idx = [] # indx for target area 3
for i in tq_point3:
tq3idx.append( t_range.tolist().index(i) )
tq3idx = np.array( tq3idx )
finalT = np.zeros( (n_f+1,n_t) )
for i in range(1,n_f+1):
finalT[ i,t_range ] = final_array[tidx[i-1,:]]
finalU = np.zeros( (n_u,) )
finalU[v_range] = final_array[uidx]
finalU[vbc_point] = final_array[vuidx]
finalU[vbc_point2] = final_array[vu2idx]
finalU[vbc2_point] = final_array[v2uidx]
finalU[vbc2_point2] = final_array[v2u2idx]
finalP = np.zeros( (n_p,) )
finalP[p_range] = final_array[pidx]
# finalV = np.zeros( (n_f+1,) )
finalV = 1000.0*final_array[vidx]
finalV2 = 1000.0*final_array[v2idx]
final2V = 1000.0*final_array2[vidx]
final2V2 = 1000.0*final_array2[v2idx]
finalVU = final_array[vuidx]
finalVU2 = final_array[vu2idx]
finalV2U = final_array[v2uidx]
finalV2U2 = final_array[v2u2idx]
eng_p = finalP.max()
eng_f1 = eng_p * 2.0/0.1 * t_final**2 * (finalVU**2 + finalVU2**2)**0.5
eng_h1 = np.sum(finalV) * dt
eng_f2 = eng_p * 2.0/0.1 * t_final**2 * (finalV2U**2 + finalV2U2**2)**0.5
eng_h2 = np.sum(finalV2) * dt
# tem = np.mean( final_array[ tidx[ 0:n_f/3, tqidx ] ] ) + np.mean( final_array[ tidx[ n_f/3:2*n_f/3, tq2idx ] ] ) + np.mean( final_array[ tidx[ 2*n_f/3:, tq3idx ] ] )
# tem = tem/3
# import ipdb; ipdb.set_trace()
# plot controls for the two cases
'''
plt.figure()
heat1_moving = np.zeros( (n_f+1,) )
heat1_moving[1:] = finalV
heat1_moving[0] = finalV[0]
heat2_moving = np.zeros( (n_f+1,) )
heat2_moving[1:] = finalV2
heat2_moving[0] = finalV2[0]
heat1_whole = np.zeros( (n_f+1,) )
heat1_whole[1:] = final2V
heat1_whole[0] = final2V[0]
heat2_whole = np.zeros( (n_f+1,) )
heat2_whole[1:] = final2V2
heat2_whole[0] = final2V2[0]
plt.rcParams['ps.useafm'] = True
plt.rcParams['pdf.use14corefonts'] = True
plt.rcParams['text.usetex'] = True
line1, = plt.step(time_axis,heat1_moving, color='b')
line2, = plt.step(time_axis,heat2_moving,color='b',linestyle="--")
line3, = plt.step(time_axis,heat1_whole,color='r')
line4, = plt.step(time_axis,heat2_whole,color='r',linestyle='--')
plt.xlabel('Time (s)')
plt.ylim(0.0,300)
plt.grid()
plt.savefig((drt + '/linear_heat.pdf'), dpi=1000, format='pdf')
plt.close()
# import ipdb; ipdb.set_trace()
'''
# plot velocity in matplot
plt.figure()
plt.rcParams['ps.useafm'] = True
plt.rcParams['pdf.use14corefonts'] = True
plt.rcParams['text.usetex'] = True
########################
contalpha = 0.5
wallthick = 0.5
wallalpha = 0.25
wallcolor = '#2e3436'
heateralpha = 0.4
heatercolor = '#3465A4'
omegazdict = { 'width': 2,
'height': 2,
'boxstyle': patches.BoxStyle('Round', pad=0.15),
'linewidth': 1.0,
'color': 'black',
'zorder': 15,
'fill': False }
heaterdict = { 'width': 1,
'height': 1,
'boxstyle': patches.BoxStyle('Round',pad=0.15),
'linewidth': 1.0,
'edgecolor': 'black',
'alpha': heateralpha,
'facecolor': heatercolor,
'zorder': 5,
'fill': True }
walldict = { 'fill': True,
'color': wallcolor,
'linewidth': 0,
'zorder': 5,
'alpha': wallalpha }
#############
XU = V.dofmap().tabulate_all_coordinates(mesh)
v_dim = V.dim()
XU.resize((V.dim(),2))
xu_cor = XU[::2,0]
# xv_cor = XU[1::2,0]
yu_cor = XU[::2,1]
# yv_cor = XU[1::2,1]
dx = 0.3
dy = 0.3
( xm, ym ) = np.meshgrid( np.arange( xu_cor.min(), xu_cor.max(), dx ),
np.arange( yu_cor.min(), yu_cor.max(), dy ) )
# linear interplation
u_x = finalU[::2]
u_y = finalU[1::2]
ipdb.set_trace()
for i in range( len( u_x ) ):
| u_x[i] = np.sign( u_x[i] ) * abs( u_x[i] )**(0.7)
u_y[i] = np.sign( u_y[i] ) * abs( u_y[i] )**(0.7) | conditional_block |
|
full_section_then_mfovs_thumbs_blobs.py | blob_detector = BlobDetector2D.create_detector(**blob_detector_args)
# threadLocal.blob_detector = blob_detector
all_kps_descs = [[], []]
for tile in mfov.tiles():
thumb_img_fname = "thumbnail_{}.jpg".format(os.path.splitext(os.path.basename(tile.img_fname))[0])
thumb_img_fname = os.path.join(os.path.dirname(tile.img_fname), thumb_img_fname)
# Read the tile
thumb_img = cv2.imread(thumb_img_fname, 0)
kps, descs = blob_detector.detectAndCompute(thumb_img)
if len(kps) == 0:
continue
kps_pts = np.empty((len(kps), 2), dtype=np.float64)
for kp_i, kp in enumerate(kps):
kps_pts[kp_i][:] = kp.pt
# upsample the thumbnail coordinates to original tile coordinates
us_x = tile.width / thumb_img.shape[1]
us_y = tile.height / thumb_img.shape[0]
kps_pts[:, 0] *= us_x
kps_pts[:, 1] *= us_y
# Apply the transformation to the points
assert(len(tile.transforms) == 1)
model = tile.transforms[0]
kps_pts = model.apply(kps_pts)
all_kps_descs[0].extend(kps_pts)
all_kps_descs[1].extend(descs)
logger.report_event("Found {} blobs in section {}, mfov {}".format(len(all_kps_descs[0]), mfov.layer, mfov.mfov_index), log_level=logging.INFO)
return mfov.mfov_index, all_kps_descs
def compute_section_blobs(self, sec, sec_cache, pool):
# Create nested caches is needed
if "pre_match_blobs" not in sec_cache:
#sec_cache.create_dict("pre_match_blobs")
sec_cache["pre_match_blobs"] = {}
total_features_num = 0
# create the mfovs blob computation jobs
async_results = []
for mfov in sec.mfovs():
if mfov in sec_cache["pre_match_blobs"]:
continue
res = pool.apply_async(PreMatch3DFullSectionThenMfovsThumbsBlobs.detect_mfov_blobs, (self._kwargs.get("blob_detector", {}), mfov))
async_results.append(res)
for res in async_results:
mfov_index, mfov_kps_descs = res.get()
#sec_cache["pre_match_blobs"].create_dict(mfov_index)
sec_cache["pre_match_blobs"][mfov_index] = mfov_kps_descs
total_features_num += len(mfov_kps_descs[0])
return total_features_num
@staticmethod
def collect_all_features(sec_cache):
# TODO - need to see if pre-allocation can improve speed
all_kps_arrays = [kps_descs[0] for kps_descs in sec_cache["pre_match_blobs"].values() if len(kps_descs[0]) > 0]
all_descs_arrays = [kps_descs[1] for kps_descs in sec_cache["pre_match_blobs"].values() if len(kps_descs[1]) > 0]
return np.vstack(all_kps_arrays), np.vstack(all_descs_arrays)
@staticmethod
def get_overlapping_mfovs(mfov1, sec2, sec1_to_sec2_model, sec2_rtree):
# TODO - for single beam data, it might be better to take the boundaries of all tiles in mfov1,
# and return their overlapping mfovs on sec2
# Take mfov1's center
|
@staticmethod
def match_mfovs_features(matcher_params, sec1_cache, sec2_cache, mfovs1, mfovs2):
"""
Matches the features in mfovs1 (of sec1) to the features in mfovs2 (of sec2).
This method is run by a process that loads the matcher from its local thread storage.
"""
thread_local_store = ThreadLocalStorageLRU()
if 'matcher' in thread_local_store.keys():
matcher = thread_local_store['matcher']
else:
# Initialize the matcher, and store it in the local thread storage
matcher = FeaturesMatcher(BlobDetector2D.create_matcher, **matcher_params)
thread_local_store['matcher'] = matcher
# matcher = getattr(threadLocal, 'matcher', None)
# if matcher is None:
# # Initialize the matcher, and store it in the local thread storage
# matcher = FeaturesMatcher(BlobDetector2D.create_matcher, **matcher_params)
# threadLocal.matcher = matcher
def get_kps_descs(mfovs, sec_cache):
mfovs = list(mfovs)
if len(mfovs) == 1:
mfovs_kps = np.array(sec_cache["pre_match_blobs"][mfovs[0]][0])
mfovs_descs = np.array(sec_cache["pre_match_blobs"][mfovs[0]][1])
else:
mfovs_kps_arrays = []
mfovs_descs_arrays = []
for mfov in mfovs:
kps_descs = sec_cache["pre_match_blobs"][mfov]
if len(kps_descs[0]) > 0:
mfovs_kps_arrays.append(kps_descs[0])
mfovs_descs_arrays.append(kps_descs[1])
if len(mfovs_kps_arrays) == 0:
mfovs_kps = np.array([])
mfovs_descs = np.array([])
elif len(mfovs_kps_arrays) == 1:
mfovs_kps = mfovs_kps_arrays[0]
mfovs_descs = mfovs_descs_arrays[0]
else:
mfovs_kps = np.vstack(mfovs_kps_arrays)
mfovs_descs = np.vstack(mfovs_descs_arrays)
return np.array(mfovs_kps), np.array(mfovs_descs)
mfovs1_kps, mfovs1_descs = get_kps_descs(mfovs1, sec1_cache)
mfovs2_kps, mfovs2_descs = get_kps_descs(mfovs2, sec2_cache)
model, filtered_matches = matcher.match_and_filter(mfovs1_kps, mfovs1_descs, mfovs2_kps, mfovs2_descs)
return mfovs1, model, filtered_matches
def pre_match_sections(self, sec1, sec2, sec1_cache, sec2_cache, pool):
"""
Performs a section to section pre-matching by detecting blobs in each section,
then performing a global section matching, and then a per-mfov (of sec1) refinement of the matches.
Returns a map between an mfov of sec1, and a tuple that holds its transformation model to sec2, and the filtered_matches
"""
pre_match_res = {}
# dispatch blob computation
sec1_features_num = self.compute_section_blobs(sec1, sec1_cache, pool)
sec2_features_num = self.compute_section_blobs(sec2, sec2_cache, pool)
# compute a section to section global affine transform
# collect all features for each section
sec1_kps, sec1_descs = PreMatch3DFullSectionThenMfovsThumbsBlobs.collect_all_features(sec1_cache)
sec2_kps, sec2_descs = PreMatch3DFullSectionThenMfovsThumbsBlobs.collect_all_features(sec2_cache)
global_model, global_filtered_matches = self._matcher.match_and_filter(sec1_kps, sec1_descs, sec2_kps, sec2_descs)
if global_model is None:
logger.report_event("No global model found between section {} (all mfovs) and section {} (all mfovs)".format(sec1.canonical_section_name, sec2.canonical_section_name), log_level=logging.WARNING)
# TODO - write to log, and return None
return None
logger.report_event("Global model found between section {} (all mfovs) and section {} (all mfovs):\n{}".format(sec1.canonical_section_name, sec2.canonical_section_name, global | mfov1_center = np.array([
(mfov1.bbox[0] + mfov1.bbox[1]) / 2,
(mfov1.bbox[2] + mfov1.bbox[3]) / 2
])
# Add the triangle points
sec1_points = PreMatch3DFullSectionThenMfovsThumbsBlobs.OVERLAP_DELTAS + mfov1_center
sec1_on_sec2_points = sec1_to_sec2_model.apply(sec1_points)
overlapping_mfovs = set()
for sec1_on_sec2_point in sec1_on_sec2_points:
rect_res = sec2_rtree.search([sec1_on_sec2_point[0], sec1_on_sec2_point[0] + 1, sec1_on_sec2_point[1], sec1_on_sec2_point[1] + 1])
for other_t in rect_res:
overlapping_mfovs.add(other_t.mfov_index)
return overlapping_mfovs | identifier_body |
full_section_then_mfovs_thumbs_blobs.py | blob_detector = BlobDetector2D.create_detector(**blob_detector_args)
# threadLocal.blob_detector = blob_detector
all_kps_descs = [[], []]
for tile in mfov.tiles():
thumb_img_fname = "thumbnail_{}.jpg".format(os.path.splitext(os.path.basename(tile.img_fname))[0])
thumb_img_fname = os.path.join(os.path.dirname(tile.img_fname), thumb_img_fname)
# Read the tile
thumb_img = cv2.imread(thumb_img_fname, 0)
kps, descs = blob_detector.detectAndCompute(thumb_img)
if len(kps) == 0:
continue
kps_pts = np.empty((len(kps), 2), dtype=np.float64)
for kp_i, kp in enumerate(kps):
kps_pts[kp_i][:] = kp.pt
# upsample the thumbnail coordinates to original tile coordinates
us_x = tile.width / thumb_img.shape[1]
us_y = tile.height / thumb_img.shape[0]
kps_pts[:, 0] *= us_x
kps_pts[:, 1] *= us_y
# Apply the transformation to the points
assert(len(tile.transforms) == 1)
model = tile.transforms[0]
kps_pts = model.apply(kps_pts)
all_kps_descs[0].extend(kps_pts)
all_kps_descs[1].extend(descs)
logger.report_event("Found {} blobs in section {}, mfov {}".format(len(all_kps_descs[0]), mfov.layer, mfov.mfov_index), log_level=logging.INFO)
return mfov.mfov_index, all_kps_descs
def compute_section_blobs(self, sec, sec_cache, pool):
# Create nested caches is needed
if "pre_match_blobs" not in sec_cache:
#sec_cache.create_dict("pre_match_blobs")
sec_cache["pre_match_blobs"] = {}
total_features_num = 0
# create the mfovs blob computation jobs
async_results = []
for mfov in sec.mfovs():
if mfov in sec_cache["pre_match_blobs"]:
continue
res = pool.apply_async(PreMatch3DFullSectionThenMfovsThumbsBlobs.detect_mfov_blobs, (self._kwargs.get("blob_detector", {}), mfov))
async_results.append(res)
for res in async_results:
mfov_index, mfov_kps_descs = res.get()
#sec_cache["pre_match_blobs"].create_dict(mfov_index)
sec_cache["pre_match_blobs"][mfov_index] = mfov_kps_descs
total_features_num += len(mfov_kps_descs[0])
return total_features_num
@staticmethod
def collect_all_features(sec_cache):
# TODO - need to see if pre-allocation can improve speed
all_kps_arrays = [kps_descs[0] for kps_descs in sec_cache["pre_match_blobs"].values() if len(kps_descs[0]) > 0]
all_descs_arrays = [kps_descs[1] for kps_descs in sec_cache["pre_match_blobs"].values() if len(kps_descs[1]) > 0]
return np.vstack(all_kps_arrays), np.vstack(all_descs_arrays)
@staticmethod
def get_overlapping_mfovs(mfov1, sec2, sec1_to_sec2_model, sec2_rtree):
# TODO - for single beam data, it might be better to take the boundaries of all tiles in mfov1,
# and return their overlapping mfovs on sec2
# Take mfov1's center
mfov1_center = np.array([
(mfov1.bbox[0] + mfov1.bbox[1]) / 2,
(mfov1.bbox[2] + mfov1.bbox[3]) / 2
])
# Add the triangle points
sec1_points = PreMatch3DFullSectionThenMfovsThumbsBlobs.OVERLAP_DELTAS + mfov1_center
sec1_on_sec2_points = sec1_to_sec2_model.apply(sec1_points)
overlapping_mfovs = set()
for sec1_on_sec2_point in sec1_on_sec2_points:
rect_res = sec2_rtree.search([sec1_on_sec2_point[0], sec1_on_sec2_point[0] + 1, sec1_on_sec2_point[1], sec1_on_sec2_point[1] + 1])
for other_t in rect_res:
overlapping_mfovs.add(other_t.mfov_index)
return overlapping_mfovs
@staticmethod
def | (matcher_params, sec1_cache, sec2_cache, mfovs1, mfovs2):
"""
Matches the features in mfovs1 (of sec1) to the features in mfovs2 (of sec2).
This method is run by a process that loads the matcher from its local thread storage.
"""
thread_local_store = ThreadLocalStorageLRU()
if 'matcher' in thread_local_store.keys():
matcher = thread_local_store['matcher']
else:
# Initialize the matcher, and store it in the local thread storage
matcher = FeaturesMatcher(BlobDetector2D.create_matcher, **matcher_params)
thread_local_store['matcher'] = matcher
# matcher = getattr(threadLocal, 'matcher', None)
# if matcher is None:
# # Initialize the matcher, and store it in the local thread storage
# matcher = FeaturesMatcher(BlobDetector2D.create_matcher, **matcher_params)
# threadLocal.matcher = matcher
def get_kps_descs(mfovs, sec_cache):
mfovs = list(mfovs)
if len(mfovs) == 1:
mfovs_kps = np.array(sec_cache["pre_match_blobs"][mfovs[0]][0])
mfovs_descs = np.array(sec_cache["pre_match_blobs"][mfovs[0]][1])
else:
mfovs_kps_arrays = []
mfovs_descs_arrays = []
for mfov in mfovs:
kps_descs = sec_cache["pre_match_blobs"][mfov]
if len(kps_descs[0]) > 0:
mfovs_kps_arrays.append(kps_descs[0])
mfovs_descs_arrays.append(kps_descs[1])
if len(mfovs_kps_arrays) == 0:
mfovs_kps = np.array([])
mfovs_descs = np.array([])
elif len(mfovs_kps_arrays) == 1:
mfovs_kps = mfovs_kps_arrays[0]
mfovs_descs = mfovs_descs_arrays[0]
else:
mfovs_kps = np.vstack(mfovs_kps_arrays)
mfovs_descs = np.vstack(mfovs_descs_arrays)
return np.array(mfovs_kps), np.array(mfovs_descs)
mfovs1_kps, mfovs1_descs = get_kps_descs(mfovs1, sec1_cache)
mfovs2_kps, mfovs2_descs = get_kps_descs(mfovs2, sec2_cache)
model, filtered_matches = matcher.match_and_filter(mfovs1_kps, mfovs1_descs, mfovs2_kps, mfovs2_descs)
return mfovs1, model, filtered_matches
def pre_match_sections(self, sec1, sec2, sec1_cache, sec2_cache, pool):
"""
Performs a section to section pre-matching by detecting blobs in each section,
then performing a global section matching, and then a per-mfov (of sec1) refinement of the matches.
Returns a map between an mfov of sec1, and a tuple that holds its transformation model to sec2, and the filtered_matches
"""
pre_match_res = {}
# dispatch blob computation
sec1_features_num = self.compute_section_blobs(sec1, sec1_cache, pool)
sec2_features_num = self.compute_section_blobs(sec2, sec2_cache, pool)
# compute a section to section global affine transform
# collect all features for each section
sec1_kps, sec1_descs = PreMatch3DFullSectionThenMfovsThumbsBlobs.collect_all_features(sec1_cache)
sec2_kps, sec2_descs = PreMatch3DFullSectionThenMfovsThumbsBlobs.collect_all_features(sec2_cache)
global_model, global_filtered_matches = self._matcher.match_and_filter(sec1_kps, sec1_descs, sec2_kps, sec2_descs)
if global_model is None:
logger.report_event("No global model found between section {} (all mfovs) and section {} (all mfovs)".format(sec1.canonical_section_name, sec2.canonical_section_name), log_level=logging.WARNING)
# TODO - write to log, and return None
return None
logger.report_event("Global model found between section {} (all mfovs) and section {} (all mfovs):\n{}".format(sec1.canonical_section_name, sec2.canonical_section_name, | match_mfovs_features | identifier_name |
full_section_then_mfovs_thumbs_blobs.py | (self, sec, sec_cache, pool):
# Create nested caches is needed
if "pre_match_blobs" not in sec_cache:
#sec_cache.create_dict("pre_match_blobs")
sec_cache["pre_match_blobs"] = {}
total_features_num = 0
# create the mfovs blob computation jobs
async_results = []
for mfov in sec.mfovs():
if mfov in sec_cache["pre_match_blobs"]:
continue
res = pool.apply_async(PreMatch3DFullSectionThenMfovsThumbsBlobs.detect_mfov_blobs, (self._kwargs.get("blob_detector", {}), mfov))
async_results.append(res)
for res in async_results:
mfov_index, mfov_kps_descs = res.get()
#sec_cache["pre_match_blobs"].create_dict(mfov_index)
sec_cache["pre_match_blobs"][mfov_index] = mfov_kps_descs
total_features_num += len(mfov_kps_descs[0])
return total_features_num
@staticmethod
def collect_all_features(sec_cache):
# TODO - need to see if pre-allocation can improve speed
all_kps_arrays = [kps_descs[0] for kps_descs in sec_cache["pre_match_blobs"].values() if len(kps_descs[0]) > 0]
all_descs_arrays = [kps_descs[1] for kps_descs in sec_cache["pre_match_blobs"].values() if len(kps_descs[1]) > 0]
return np.vstack(all_kps_arrays), np.vstack(all_descs_arrays)
@staticmethod
def get_overlapping_mfovs(mfov1, sec2, sec1_to_sec2_model, sec2_rtree):
# TODO - for single beam data, it might be better to take the boundaries of all tiles in mfov1,
# and return their overlapping mfovs on sec2
# Take mfov1's center
mfov1_center = np.array([
(mfov1.bbox[0] + mfov1.bbox[1]) / 2,
(mfov1.bbox[2] + mfov1.bbox[3]) / 2
])
# Add the triangle points
sec1_points = PreMatch3DFullSectionThenMfovsThumbsBlobs.OVERLAP_DELTAS + mfov1_center
sec1_on_sec2_points = sec1_to_sec2_model.apply(sec1_points)
overlapping_mfovs = set()
for sec1_on_sec2_point in sec1_on_sec2_points:
rect_res = sec2_rtree.search([sec1_on_sec2_point[0], sec1_on_sec2_point[0] + 1, sec1_on_sec2_point[1], sec1_on_sec2_point[1] + 1])
for other_t in rect_res:
overlapping_mfovs.add(other_t.mfov_index)
return overlapping_mfovs
@staticmethod
def match_mfovs_features(matcher_params, sec1_cache, sec2_cache, mfovs1, mfovs2):
"""
Matches the features in mfovs1 (of sec1) to the features in mfovs2 (of sec2).
This method is run by a process that loads the matcher from its local thread storage.
"""
thread_local_store = ThreadLocalStorageLRU()
if 'matcher' in thread_local_store.keys():
matcher = thread_local_store['matcher']
else:
# Initialize the matcher, and store it in the local thread storage
matcher = FeaturesMatcher(BlobDetector2D.create_matcher, **matcher_params)
thread_local_store['matcher'] = matcher
# matcher = getattr(threadLocal, 'matcher', None)
# if matcher is None:
# # Initialize the matcher, and store it in the local thread storage
# matcher = FeaturesMatcher(BlobDetector2D.create_matcher, **matcher_params)
# threadLocal.matcher = matcher
def get_kps_descs(mfovs, sec_cache):
mfovs = list(mfovs)
if len(mfovs) == 1:
mfovs_kps = np.array(sec_cache["pre_match_blobs"][mfovs[0]][0])
mfovs_descs = np.array(sec_cache["pre_match_blobs"][mfovs[0]][1])
else:
mfovs_kps_arrays = []
mfovs_descs_arrays = []
for mfov in mfovs:
kps_descs = sec_cache["pre_match_blobs"][mfov]
if len(kps_descs[0]) > 0:
mfovs_kps_arrays.append(kps_descs[0])
mfovs_descs_arrays.append(kps_descs[1])
if len(mfovs_kps_arrays) == 0:
mfovs_kps = np.array([])
mfovs_descs = np.array([])
elif len(mfovs_kps_arrays) == 1:
mfovs_kps = mfovs_kps_arrays[0]
mfovs_descs = mfovs_descs_arrays[0]
else:
mfovs_kps = np.vstack(mfovs_kps_arrays)
mfovs_descs = np.vstack(mfovs_descs_arrays)
return np.array(mfovs_kps), np.array(mfovs_descs)
mfovs1_kps, mfovs1_descs = get_kps_descs(mfovs1, sec1_cache)
mfovs2_kps, mfovs2_descs = get_kps_descs(mfovs2, sec2_cache)
model, filtered_matches = matcher.match_and_filter(mfovs1_kps, mfovs1_descs, mfovs2_kps, mfovs2_descs)
return mfovs1, model, filtered_matches
def pre_match_sections(self, sec1, sec2, sec1_cache, sec2_cache, pool):
"""
Performs a section to section pre-matching by detecting blobs in each section,
then performing a global section matching, and then a per-mfov (of sec1) refinement of the matches.
Returns a map between an mfov of sec1, and a tuple that holds its transformation model to sec2, and the filtered_matches
"""
pre_match_res = {}
# dispatch blob computation
sec1_features_num = self.compute_section_blobs(sec1, sec1_cache, pool)
sec2_features_num = self.compute_section_blobs(sec2, sec2_cache, pool)
# compute a section to section global affine transform
# collect all features for each section
sec1_kps, sec1_descs = PreMatch3DFullSectionThenMfovsThumbsBlobs.collect_all_features(sec1_cache)
sec2_kps, sec2_descs = PreMatch3DFullSectionThenMfovsThumbsBlobs.collect_all_features(sec2_cache)
global_model, global_filtered_matches = self._matcher.match_and_filter(sec1_kps, sec1_descs, sec2_kps, sec2_descs)
if global_model is None:
logger.report_event("No global model found between section {} (all mfovs) and section {} (all mfovs)".format(sec1.canonical_section_name, sec2.canonical_section_name), log_level=logging.WARNING)
# TODO - write to log, and return None
return None
logger.report_event("Global model found between section {} (all mfovs) and section {} (all mfovs):\n{}".format(sec1.canonical_section_name, sec2.canonical_section_name, global_model.get_matrix()), log_level=logging.INFO)
print("DECOMPOSED MATRIX: ", mb_aligner.common.ransac.decompose_affine_matrix(global_model.get_matrix()))
if sec1.mfovs_num == 1:
logger.report_event("Section {} has a single mfov, using the global model between section {} and section {}:\n{}".format(sec1.canonical_section_name, sec1.canonical_section_name, sec2.canonical_section_name, global_model.get_matrix()), log_level=logging.INFO)
mfov_index = next(sec1.mfovs()).mfov_index
pre_match_res[mfov_index] = (global_model, global_filtered_matches)
return pre_match_res
# Create section2 tile's bounding box rtree, so it would be faster to search it
# TODO - maybe store it in cache, because it might be used by other comparisons of this section
sec2_rtree = tinyr.RTree(interleaved=False, max_cap=5, min_cap=2)
for t in sec2.tiles():
sec2_rtree.insert(t, t.bbox)
# refine the global transform to a local one | async_results = []
for mfov1 in sec1.mfovs():
# find overlapping mfovs in sec2
mfovs2 = PreMatch3DFullSectionThenMfovsThumbsBlobs.get_overlapping_mfovs(mfov1, sec2, global_model, sec2_rtree)
logger.report_event("Finding local model between section {} (mfov {}) and section {} (mfovs {})".format(sec1.canonical_section_name, mfov1.mfov_index, sec2.canonical_section_name, mfovs2), log_level=logging.INFO) | random_line_split |
|
full_section_then_mfovs_thumbs_blobs.py | kps_pts[:, 1] *= us_y
# Apply the transformation to the points
assert(len(tile.transforms) == 1)
model = tile.transforms[0]
kps_pts = model.apply(kps_pts)
all_kps_descs[0].extend(kps_pts)
all_kps_descs[1].extend(descs)
logger.report_event("Found {} blobs in section {}, mfov {}".format(len(all_kps_descs[0]), mfov.layer, mfov.mfov_index), log_level=logging.INFO)
return mfov.mfov_index, all_kps_descs
def compute_section_blobs(self, sec, sec_cache, pool):
# Create nested caches is needed
if "pre_match_blobs" not in sec_cache:
#sec_cache.create_dict("pre_match_blobs")
sec_cache["pre_match_blobs"] = {}
total_features_num = 0
# create the mfovs blob computation jobs
async_results = []
for mfov in sec.mfovs():
if mfov in sec_cache["pre_match_blobs"]:
continue
res = pool.apply_async(PreMatch3DFullSectionThenMfovsThumbsBlobs.detect_mfov_blobs, (self._kwargs.get("blob_detector", {}), mfov))
async_results.append(res)
for res in async_results:
mfov_index, mfov_kps_descs = res.get()
#sec_cache["pre_match_blobs"].create_dict(mfov_index)
sec_cache["pre_match_blobs"][mfov_index] = mfov_kps_descs
total_features_num += len(mfov_kps_descs[0])
return total_features_num
@staticmethod
def collect_all_features(sec_cache):
# TODO - need to see if pre-allocation can improve speed
all_kps_arrays = [kps_descs[0] for kps_descs in sec_cache["pre_match_blobs"].values() if len(kps_descs[0]) > 0]
all_descs_arrays = [kps_descs[1] for kps_descs in sec_cache["pre_match_blobs"].values() if len(kps_descs[1]) > 0]
return np.vstack(all_kps_arrays), np.vstack(all_descs_arrays)
@staticmethod
def get_overlapping_mfovs(mfov1, sec2, sec1_to_sec2_model, sec2_rtree):
# TODO - for single beam data, it might be better to take the boundaries of all tiles in mfov1,
# and return their overlapping mfovs on sec2
# Take mfov1's center
mfov1_center = np.array([
(mfov1.bbox[0] + mfov1.bbox[1]) / 2,
(mfov1.bbox[2] + mfov1.bbox[3]) / 2
])
# Add the triangle points
sec1_points = PreMatch3DFullSectionThenMfovsThumbsBlobs.OVERLAP_DELTAS + mfov1_center
sec1_on_sec2_points = sec1_to_sec2_model.apply(sec1_points)
overlapping_mfovs = set()
for sec1_on_sec2_point in sec1_on_sec2_points:
rect_res = sec2_rtree.search([sec1_on_sec2_point[0], sec1_on_sec2_point[0] + 1, sec1_on_sec2_point[1], sec1_on_sec2_point[1] + 1])
for other_t in rect_res:
overlapping_mfovs.add(other_t.mfov_index)
return overlapping_mfovs
@staticmethod
def match_mfovs_features(matcher_params, sec1_cache, sec2_cache, mfovs1, mfovs2):
"""
Matches the features in mfovs1 (of sec1) to the features in mfovs2 (of sec2).
This method is run by a process that loads the matcher from its local thread storage.
"""
thread_local_store = ThreadLocalStorageLRU()
if 'matcher' in thread_local_store.keys():
matcher = thread_local_store['matcher']
else:
# Initialize the matcher, and store it in the local thread storage
matcher = FeaturesMatcher(BlobDetector2D.create_matcher, **matcher_params)
thread_local_store['matcher'] = matcher
# matcher = getattr(threadLocal, 'matcher', None)
# if matcher is None:
# # Initialize the matcher, and store it in the local thread storage
# matcher = FeaturesMatcher(BlobDetector2D.create_matcher, **matcher_params)
# threadLocal.matcher = matcher
def get_kps_descs(mfovs, sec_cache):
mfovs = list(mfovs)
if len(mfovs) == 1:
mfovs_kps = np.array(sec_cache["pre_match_blobs"][mfovs[0]][0])
mfovs_descs = np.array(sec_cache["pre_match_blobs"][mfovs[0]][1])
else:
mfovs_kps_arrays = []
mfovs_descs_arrays = []
for mfov in mfovs:
kps_descs = sec_cache["pre_match_blobs"][mfov]
if len(kps_descs[0]) > 0:
mfovs_kps_arrays.append(kps_descs[0])
mfovs_descs_arrays.append(kps_descs[1])
if len(mfovs_kps_arrays) == 0:
mfovs_kps = np.array([])
mfovs_descs = np.array([])
elif len(mfovs_kps_arrays) == 1:
mfovs_kps = mfovs_kps_arrays[0]
mfovs_descs = mfovs_descs_arrays[0]
else:
mfovs_kps = np.vstack(mfovs_kps_arrays)
mfovs_descs = np.vstack(mfovs_descs_arrays)
return np.array(mfovs_kps), np.array(mfovs_descs)
mfovs1_kps, mfovs1_descs = get_kps_descs(mfovs1, sec1_cache)
mfovs2_kps, mfovs2_descs = get_kps_descs(mfovs2, sec2_cache)
model, filtered_matches = matcher.match_and_filter(mfovs1_kps, mfovs1_descs, mfovs2_kps, mfovs2_descs)
return mfovs1, model, filtered_matches
def pre_match_sections(self, sec1, sec2, sec1_cache, sec2_cache, pool):
"""
Performs a section to section pre-matching by detecting blobs in each section,
then performing a global section matching, and then a per-mfov (of sec1) refinement of the matches.
Returns a map between an mfov of sec1, and a tuple that holds its transformation model to sec2, and the filtered_matches
"""
pre_match_res = {}
# dispatch blob computation
sec1_features_num = self.compute_section_blobs(sec1, sec1_cache, pool)
sec2_features_num = self.compute_section_blobs(sec2, sec2_cache, pool)
# compute a section to section global affine transform
# collect all features for each section
sec1_kps, sec1_descs = PreMatch3DFullSectionThenMfovsThumbsBlobs.collect_all_features(sec1_cache)
sec2_kps, sec2_descs = PreMatch3DFullSectionThenMfovsThumbsBlobs.collect_all_features(sec2_cache)
global_model, global_filtered_matches = self._matcher.match_and_filter(sec1_kps, sec1_descs, sec2_kps, sec2_descs)
if global_model is None:
logger.report_event("No global model found between section {} (all mfovs) and section {} (all mfovs)".format(sec1.canonical_section_name, sec2.canonical_section_name), log_level=logging.WARNING)
# TODO - write to log, and return None
return None
logger.report_event("Global model found between section {} (all mfovs) and section {} (all mfovs):\n{}".format(sec1.canonical_section_name, sec2.canonical_section_name, global_model.get_matrix()), log_level=logging.INFO)
print("DECOMPOSED MATRIX: ", mb_aligner.common.ransac.decompose_affine_matrix(global_model.get_matrix()))
if sec1.mfovs_num == 1:
logger.report_event("Section {} has a single mfov, using the global model between section {} and section {}:\n{}".format(sec1.canonical_section_name, sec1.canonical_section_name, sec2.canonical_section_name, global_model.get_matrix()), log_level=logging.INFO)
mfov_index = next(sec1.mfovs()).mfov_index
pre_match_res[mfov_index] = (global_model, global_filtered_matches)
return pre_match_res
# Create section2 tile's bounding box rtree, so it would be faster to search it
# TODO - maybe store it in cache, because it might be used by other comparisons of this section
sec2_rtree = tinyr.RTree(interleaved=False, max_cap=5, min_cap=2)
for t in sec2.tiles():
| sec2_rtree.insert(t, t.bbox) | conditional_block |
|
main.rs | (&opt.file_name);
let file =
fs::File::open(file_name).with_context(|| format!("open file {:?} err", file_name))?;
// 使用 zip 创建该文件的 Archive
let mut archive = zip::ZipArchive::new(file).context("create zip archive err")?;
for i in 0..archive.len() {
let file = archive.by_index(i).unwrap();
if opt.verbose {
println!("filename: {}", file.name());
}
}
// 直接解析 main document: word/document.xml
// TODO 这个是写死的路径,正常应该先解析 [Content_types].xml 找到 main document.
let word_doc = archive
.by_name("word/document.xml")
.context("found no word/document.xml")?;
// xml parse
let mut doc_parsing = MainDocParsing::new();
let parser = EventReader::new(word_doc);
let mut depth = 0;
for e in parser {
let event = e.context("xml parser got err")?;
match event {
XmlEvent::StartElement {
name,
attributes,
namespace: _,
} => {
// 调试信息
if opt.verbose {
print_xml_owned_name(&name, depth, true);
}
depth += 1;
// 新元素开始解析
doc_parsing.feed_element(name, attributes);
}
XmlEvent::EndElement { name } => {
depth -= 1;
// 调试信息
if opt.verbose {
print_xml_owned_name(&name, depth, false);
}
// 当前元素解析完成
doc_parsing.fish_feed_element();
}
XmlEvent::Comment(_) => {}
XmlEvent::CData(_) => {}
XmlEvent::Characters(data) => {
// 调试信息
if opt.verbose {
println!(r#"{}Characters("{}")"#, " ".repeat(depth), data,);
}
// 当前元素添加 text data
doc_parsing.feed_characters(data);
}
XmlEvent::Whitespace(_) => {}
_ => {
// TODO
}
}
}
// 打印 文中的字体颜色和字体内容
print_elements(&doc_parsing.root, opt.verbose);
Ok(())
}
/// 辅助调试函数,打印元素
fn print_xml_owned_name(name: &OwnedName, indent: usize, start: bool) {
print!("{}", " ".repeat(indent));
if start {
print!("+");
} else {
print!("-");
}
if let Some(v) = &name.prefix {
print!("{}:", v);
}
println!("{}", name.local_name);
}
/// Main document 中我们支持的一些元素类型
/// 保存原始的格式(例如 w:t)到 String 只是为了方便调试.
#[derive(Debug)]
enum ElementType {
Document(String),
Body(String),
Paragraph(String),
Run(String),
Text(String),
/// 属性
ParagraphProperty(String),
RunProperty(String),
Color(String),
/// 其他剩余的不支持的类型
Unknown(String),
}
impl ElementType {
/// 从 xml的 OwnedName 中构建 ElementType
fn from_name(name: &OwnedName) -> Self {
let raw = format!(
"{}:{}",
name.prefix.as_ref().unwrap_or(&String::new()),
name.local_name
);
// 目前 只识别 `w:xxx` 格式, 且只是部分标签
if name.prefix.is_none() || name.prefix.as_ref().unwrap().ne("w") {
return Self::Unknown(raw);
}
match &*name.local_name {
"document" => Self::Document(raw),
"body" => Self::Body(raw),
"p" => Self::Paragraph(raw),
"r" => Self::Run(raw),
"t" => Self::Text(raw),
"pPr" => Self::ParagraphProperty(raw),
"rPr" => Self::RunProperty(raw),
"color" => Self::Color(raw),
_ => Self::Unknown(raw),
}
}
/// 是否是 Text类型(w:t)
fn is_text(&self) -> bool {
matches!(self, Self::Text(_))
}
/// 是否是Run property(w:rPr)
fn is_run_property(&self) -> bool {
matches!(self, Self::RunProperty(_))
}
/// 是否是 Color 类型(color)
fn is_color(&self) -> bool {
matches!(self, Self::Color(_))
}
}
/// main document中的元素.
struct Element {
element_type: ElementType,
parent: Option<Weak<RefCell<Element>>>,
children: Vec<Rc<RefCell<Element>>>,
attributes: HashMap<String, String>,
literal_text: Option<String>, // 目前只有 w:t 有
depth: usize, // for debug
}
impl Element {
/// new Element, 需要指定 parent 和 type, parent 可以为 None
fn new(
element_type: ElementType,
parent: &Option<Rc<RefCell<Element>>>,
attributes: Vec<OwnedAttribute>,
depth: usize,
) -> Self {
let mut attrs = HashMap::new();
attributes.iter().for_each(|v| {
attrs.insert(v.name.local_name.clone(), v.value.clone());
});
Self {
element_type,
parent: parent.as_ref().map(Rc::downgrade),
children: vec![],
attributes: attrs,
literal_text: None,
depth,
}
}
fn append_child(&mut self, child: Rc<RefCell<Element>>) {
self.children.push(child);
}
// 下面是一些辅助方法
/// 寻找本节点最近的 run property
fn find_run_property(element: &Option<Rc<RefCell<Element>>>) -> Option<Rc<RefCell<Element>>> {
if let Some(ele) = element {
if let Some(parent) = &ele.borrow().parent {
if let Some(parent) = parent.upgrade() {
// find run property from parent's children
for child in parent.borrow().children.iter() {
if child.borrow().element_type.is_run_property() {
return Some(Rc::clone(child));
}
}
// if not found, goes up
return Self::find_run_property(&Some(parent));
}
}
}
None
}
/// 如果自己是 run property, 从中获取 color 属性
fn get_color(element: &Option<Rc<RefCell<Element>>>) -> Option<String> {
if let Some(ele) = &element {
// 本身不是 run property
if !ele.borrow().element_type.is_run_property() {
return None;
}
// 从 children 中寻找 w:color
for child in ele.borrow().children.iter() {
let child_ref = child.borrow();
if child_ref.element_type.is_color() {
return child_ref.attributes.get("val").cloned();
}
}
}
None
}
fn display(root: &Option<Rc<RefCell<Element>>>) -> String {
if let Some(root_rc) = root {
let attrs: Vec<_> = root_rc
.borrow()
.attributes
.iter()
.map(|(k, v)| format!("{}={}", k, v))
.collect();
let indent = " ".repeat(root_rc.borrow().depth);
format!(
"{}{:?}, attrs: {:?},",
indent,
root_rc.borrow().element_type,
attrs
)
} else {
"None<Element>".to_string()
}
}
}
/// Main document 解析过程.
/// 流程:
/// 内部维护一颗 Element 的树 root, 并且维护当前解析的节点的指针 cur.
/// 1. 当新的元素解析到,调用 feed_element, 会将新的 Element 添加到 cur 的子元素中(children),
/// 并将 cur 指向新的 Element
/// 2. 当一个元素解析完成,调用 fish_feed_element,
/// 会将 cur 指向其父节点
/// 3. 当有新的 text data 时,调用 feed_characters, 将 data 填空到当前的 Element中.
/// 目前只是针对 w:t 类型
struct MainDocParsing {
// 这里假设有一个唯一的 root
root: Option<Rc<RefCell<Element>>>,
cur: Option<Rc<RefCell<Element>>>,
depth: usize,
}
impl MainDocParsing {
fn new() -> Self {
Self {
root: None,
cur: None,
depth: 0,
}
}
/// 一个新的元素开始解析
fn feed_element(&mut self, name: OwnedName, attributes: Vec<OwnedAttribute>) {
self.depth += 1;
let element_type = ElementType::from_name(&name);
let element = Rc::new(RefCell::new(Element::new(
element_type,
&self.cur,
attributes,
self.depth,
)));
if let Some(cur_parent) = &self.cur {
// 最新节点添加为 parent 的子节点
cur_parent.borrow_mut().append_child(Rc::clone(&element)); | // cur parent 变更为 最新节点
self.cur.replace(element);
} else {
// 第一个节点
self.root.replace(Rc::clone(&element)); | random_line_split |
|
main.rs | .xml")
.context("found no word/document.xml")?;
// xml parse
let mut doc_parsing = MainDocParsing::new();
let parser = EventReader::new(word_doc);
let mut depth = 0;
for e in parser {
let event = e.context("xml parser got err")?;
match event {
XmlEvent::StartElement {
name,
attributes,
namespace: _,
} => {
// 调试信息
if opt.verbose {
print_xml_owned_name(&name, depth, true);
}
depth += 1;
// 新元素开始解析
doc_parsing.feed_element(name, attributes);
}
XmlEvent::EndElement { name } => {
depth -= 1;
// 调试信息
if opt.verbose {
print_xml_owned_name(&name, depth, false);
}
// 当前元素解析完成
doc_parsing.fish_feed_element();
}
XmlEvent::Comment(_) => {}
XmlEvent::CData(_) => {}
XmlEvent::Characters(data) => {
// 调试信息
if opt.verbose {
println!(r#"{}Characters("{}")"#, " ".repeat(depth), data,);
}
// 当前元素添加 text data
doc_parsing.feed_characters(data);
}
XmlEvent::Whitespace(_) => {}
_ => {
// TODO
}
}
}
// 打印 文中的字体颜色和字体内容
print_elements(&doc_parsing.root, opt.verbose);
Ok(())
}
/// 辅助调试函数,打印元素
fn print_xml_owned_name(name: &OwnedName, indent: usize, start: bool) {
print!("{}", " ".repeat(indent));
if start {
print!("+");
} else {
print!("-");
}
if let Some(v) = &name.prefix {
print!("{}:", v);
}
println!("{}", name.local_name);
}
/// Main document 中我们支持的一些元素类型
/// 保存原始的格式(例如 w:t)到 String 只是为了方便调试.
#[derive(Debug)]
enum ElementType {
Document(String),
Body(String),
Paragraph(String),
Run(String),
Text(String),
/// 属性
ParagraphProperty(String),
RunProperty(String),
Color(String),
/// 其他剩余的不支持的类型
Unknown(String),
}
impl ElementType {
/// 从 xml的 OwnedName 中构建 ElementType
fn from_name(name: &OwnedName) -> Self {
let raw = format!(
"{}:{}",
name.prefix.as_ref().unwrap_or(&String::new()),
name.local_name
);
// 目前 只识别 `w:xxx` 格式, 且只是部分标签
if name.prefix.is_none() || name.prefix.as_ref().unwrap().ne("w") {
return Self::Unknown(raw);
}
match &*name.local_name {
"document" => Self::Document(raw),
"body" => Self::Body(raw),
"p" => Self::Paragraph(raw),
"r" => Self::Run(raw),
"t" => Self::Text(raw),
"pPr" => Self::ParagraphProperty(raw),
"rPr" => Self::RunProperty(raw),
"color" => Self::Color(raw),
_ => Self::Unknown(raw),
}
}
/// 是否是 Text类型(w:t)
fn is_text(&self) -> bool {
matches!(self, Self::Text(_))
}
/// 是否是Run property(w:rPr)
fn is_run_property(&self) -> bool {
matches!(self, Self::RunProperty(_))
}
/// 是否是 Color 类型(color)
fn is_color(&self) -> bool {
matches!(self, Self::Color(_))
}
}
/// main document中的元素.
struct Element {
element_type: ElementType,
parent: Option<Weak<RefCell<Element>>>,
children: Vec<Rc<RefCell<Element>>>,
attributes: HashMap<String, String>,
literal_text: Option<String>, // 目前只有 w:t 有
depth: usize, // for debug
}
impl Element {
/// new Element, 需要指定 parent 和 type, parent 可以为 None
fn new(
element_type: ElementType,
parent: &Option<Rc<RefCell<Element>>>,
attributes: Vec<OwnedAttribute>,
depth: usize,
) -> Self {
let mut attrs = HashMap::new();
attributes.iter().for_each(|v| {
attrs.insert(v.name.local_name.clone(), v.value.clone());
});
Self {
element_type,
parent: parent.as_ref().map(Rc::downgrade),
children: vec![],
attributes: attrs,
literal_text: None,
depth,
}
}
fn append_child(&mut self, child: Rc<RefCell<Element>>) {
self.children.push(child);
}
// 下面是一些辅助方法
/// 寻找本节点最近的 run property
fn find_run_property(element: &Option<Rc<RefCell<Element>>>) -> Option<Rc<RefCell<Element>>> {
if let Some(ele) = element {
if let Some(parent) = &ele.borrow().parent {
if let Some(parent) = parent.upgrade() {
// find run property from parent's children
for child in parent.borrow().children.iter() {
if child.borrow().element_type.is_run_property() {
return Some(Rc::clone(child));
}
}
// if not found, goes up
return Self::find_run_property(&Some(parent));
}
}
}
None
}
/// 如果自己是 run property, 从中获取 color 属性
fn get_color(element: &Option<Rc<RefCell<Element>>>) -> Option<String> {
if let Some(ele) = &element {
// 本身不是 run property
if !ele.borrow().element_type.is_run_property() {
return None;
}
// 从 children 中寻找 w:color
for child in ele.borrow().children.iter() {
let child_ref = child.borrow();
if child_ref.element_type.is_color() {
return child_ref.attributes.get("val").cloned();
}
}
}
None
}
fn display(root: &Option<Rc<RefCell<Element>>>) -> String {
if let Some(root_rc) = root {
let attrs: Vec<_> = root_rc
.borrow()
.attributes
.iter()
.map(|(k, v)| format!("{}={}", k, v))
.collect();
let indent = " ".repeat(root_rc.borrow().depth);
format!(
"{}{:?}, attrs: {:?},",
indent,
root_rc.borrow().element_type,
attrs
)
} else {
"None<Element>".to_string()
}
}
}
/// Main document 解析过程.
/// 流程:
/// 内部维护一颗 Element 的树 root, 并且维护当前解析的节点的指针 cur.
/// 1. 当新的元素解析到,调用 feed_element, 会将新的 Element 添加到 cur 的子元素中(children),
/// 并将 cur 指向新的 Element
/// 2. 当一个元素解析完成,调用 fish_feed_element,
/// 会将 cur 指向其父节点
/// 3. 当有新的 text data 时,调用 feed_characters, 将 data 填空到当前的 Element中.
/// 目前只是针对 w:t 类型
struct MainDocParsing {
// 这里假设有一个唯一的 root
root: Option<Rc<RefCell<Element>>>,
cur: Option<Rc<RefCell<Element>>>,
depth: usize,
}
impl MainDocParsing {
fn new() -> Self {
Self {
root: None,
cur: None,
depth: 0,
}
}
/// 一个新的元素开始解析
fn feed_element(&mut self, name: OwnedName, attributes: Vec<OwnedAttribute>) {
self.depth += 1;
let element_type = ElementType::from_name(&name);
let element = Rc::new(RefCell::new(Element::new(
element_type,
&self.cur,
attributes,
self.depth,
)));
if let Some(cur_parent) = &self.cur {
// 最新节点添加为 parent 的子节点
cur_parent.borrow_mut().append_child(Rc::clone(&element));
// cur parent 变更为 最新节点
self.cur.replace(element);
} else {
// 第一个节点
self.root.replace(Rc::clone(&element));
self.cur.replace(element);
}
}
/// 当前元素解析完成
fn fish_feed_element(&mut self) {
self.de | pth -= 1;
// 当前父节点指向上一层的节点
let mut parent = None;
if let Some(cur) = &self.cur {
if let Some(p) = &cur.borrow().parent {
parent = p.upgrade();
}
}
self.cur = parent;
}
/// 向当前的 element 中添加text, 目前只有 w:t 类型会有
fn feed_characters(&mut self, data: String) {
if let Some(cur) = &self.cur {
cur.borrow_mut().literal_text = Some(data);
}
}
} | identifier_body |
|
main.rs | ```sh
/// cargo run -- demo.docx
/// ```
/// 输出字体,并且带字体的颜色值.
fn main() -> Result<()> {
let opt = Opt::from_args();
let file_name = Path::new(&opt.file_name);
let file =
fs::File::open(file_name).with_context(|| format!("open file {:?} err", file_name))?;
// 使用 zip 创建该文件的 Archive
let mut archive = zip::ZipArchive::new(file).context("create zip archive err")?;
for i in 0..archive.len() {
let file = archive.by_index(i).unwrap();
if opt.verbose {
println!("filename: {}", file.name());
}
}
// 直接解析 main document: word/document.xml
// TODO 这个是写死的路径,正常应该先解析 [Content_types].xml 找到 main document.
let word_doc = archive
.by_name("word/document.xml")
.context("found no word/document.xml")?;
// xml parse
let mut doc_parsing = MainDocParsing::new();
let parser = EventReader::new(word_doc);
let mut depth = 0;
for e in parser {
let event = e.context("xml parser got err")?;
match event {
XmlEvent::StartElement {
name,
attributes,
namespace: _,
} => {
// 调试信息
if opt.verbose {
print_xml_owned_name(&name, depth, true);
}
depth += 1;
// 新元素开始解析
doc_parsing.feed_element(name, attributes);
}
XmlEvent::EndElement { name } => {
depth -= 1;
// 调试信息
if opt.verbose {
print_xml_owned_name(&name, depth, false);
}
// 当前元素解析完成
doc_parsing.fish_feed_element();
}
XmlEvent::Comment(_) => {}
XmlEvent::CData(_) => {}
XmlEvent::Characters(data) => {
// 调试信息
if opt.verbose {
println!(r#"{}Characters("{}")"#, " ".repeat(depth), data,);
}
// 当前元素添加 text data
doc_parsing.feed_characters(data);
}
XmlEvent::Whitespace(_) => {}
_ => {
// TODO
}
}
}
// 打印 文中的字体颜色和字体内容
print_elements(&doc_parsing.root, opt.verbose);
Ok(())
}
/// 辅助调试函数,打印元素
fn print_xml_owned_name(name: &OwnedName, indent: usize, start: bool) {
print!("{}", " ".repeat(indent));
if start {
print!("+");
} else {
print!("-");
}
if let Some(v) = &name.prefix {
print!("{}:", v);
}
println!("{}", name.local_name);
}
/// Main document 中我们支持的一些元素类型
/// 保存原始的格式(例如 w:t)到 String 只是为了方便调试.
#[derive(Debug)]
enum ElementType {
Document(String),
Body(String),
Paragraph(String),
Run(String),
Text(String),
/// 属性
ParagraphProperty(String),
RunProperty(String),
Color(String),
/// 其他剩余的不支持的类型
Unknown(String),
}
impl ElementType {
| l的 OwnedName 中构建 ElementType
fn from_name(name: &OwnedName) -> Self {
let raw = format!(
"{}:{}",
name.prefix.as_ref().unwrap_or(&String::new()),
name.local_name
);
// 目前 只识别 `w:xxx` 格式, 且只是部分标签
if name.prefix.is_none() || name.prefix.as_ref().unwrap().ne("w") {
return Self::Unknown(raw);
}
match &*name.local_name {
"document" => Self::Document(raw),
"body" => Self::Body(raw),
"p" => Self::Paragraph(raw),
"r" => Self::Run(raw),
"t" => Self::Text(raw),
"pPr" => Self::ParagraphProperty(raw),
"rPr" => Self::RunProperty(raw),
"color" => Self::Color(raw),
_ => Self::Unknown(raw),
}
}
/// 是否是 Text类型(w:t)
fn is_text(&self) -> bool {
matches!(self, Self::Text(_))
}
/// 是否是Run property(w:rPr)
fn is_run_property(&self) -> bool {
matches!(self, Self::RunProperty(_))
}
/// 是否是 Color 类型(color)
fn is_color(&self) -> bool {
matches!(self, Self::Color(_))
}
}
/// main document中的元素.
struct Element {
element_type: ElementType,
parent: Option<Weak<RefCell<Element>>>,
children: Vec<Rc<RefCell<Element>>>,
attributes: HashMap<String, String>,
literal_text: Option<String>, // 目前只有 w:t 有
depth: usize, // for debug
}
impl Element {
/// new Element, 需要指定 parent 和 type, parent 可以为 None
fn new(
element_type: ElementType,
parent: &Option<Rc<RefCell<Element>>>,
attributes: Vec<OwnedAttribute>,
depth: usize,
) -> Self {
let mut attrs = HashMap::new();
attributes.iter().for_each(|v| {
attrs.insert(v.name.local_name.clone(), v.value.clone());
});
Self {
element_type,
parent: parent.as_ref().map(Rc::downgrade),
children: vec![],
attributes: attrs,
literal_text: None,
depth,
}
}
fn append_child(&mut self, child: Rc<RefCell<Element>>) {
self.children.push(child);
}
// 下面是一些辅助方法
/// 寻找本节点最近的 run property
fn find_run_property(element: &Option<Rc<RefCell<Element>>>) -> Option<Rc<RefCell<Element>>> {
if let Some(ele) = element {
if let Some(parent) = &ele.borrow().parent {
if let Some(parent) = parent.upgrade() {
// find run property from parent's children
for child in parent.borrow().children.iter() {
if child.borrow().element_type.is_run_property() {
return Some(Rc::clone(child));
}
}
// if not found, goes up
return Self::find_run_property(&Some(parent));
}
}
}
None
}
/// 如果自己是 run property, 从中获取 color 属性
fn get_color(element: &Option<Rc<RefCell<Element>>>) -> Option<String> {
if let Some(ele) = &element {
// 本身不是 run property
if !ele.borrow().element_type.is_run_property() {
return None;
}
// 从 children 中寻找 w:color
for child in ele.borrow().children.iter() {
let child_ref = child.borrow();
if child_ref.element_type.is_color() {
return child_ref.attributes.get("val").cloned();
}
}
}
None
}
fn display(root: &Option<Rc<RefCell<Element>>>) -> String {
if let Some(root_rc) = root {
let attrs: Vec<_> = root_rc
.borrow()
.attributes
.iter()
.map(|(k, v)| format!("{}={}", k, v))
.collect();
let indent = " ".repeat(root_rc.borrow().depth);
format!(
"{}{:?}, attrs: {:?},",
indent,
root_rc.borrow().element_type,
attrs
)
} else {
"None<Element>".to_string()
}
}
}
/// Main document 解析过程.
/// 流程:
/// 内部维护一颗 Element 的树 root, 并且维护当前解析的节点的指针 cur.
/// 1. 当新的元素解析到,调用 feed_element, 会将新的 Element 添加到 cur 的子元素中(children),
/// 并将 cur 指向新的 Element
/// 2. 当一个元素解析完成,调用 fish_feed_element,
/// 会将 cur 指向其父节点
/// 3. 当有新的 text data 时,调用 feed_characters, 将 data 填空到当前的 Element中.
/// 目前只是针对 w:t 类型
struct MainDocParsing {
// 这里假设有一个唯一的 root
root: Option<Rc<RefCell<Element>>>,
cur: Option<Rc<RefCell<Element>>>,
depth: usize,
}
impl MainDocParsing {
fn new() -> Self {
Self {
root: None,
cur: None,
depth: 0,
}
}
/// 一个新的元素开始解析
fn feed_element(&mut self, name: OwnedName, attributes: Vec<OwnedAttribute>) {
self.depth += 1;
let element_type = ElementType::from_name(&name);
let element = Rc::new(RefCell::new(Element::new(
element_type,
&self.cur,
attributes,
self.depth,
)));
if let Some(cur_parent) = &self.cur {
// 最新节点添加为 parent 的子节点
cur_parent.borrow | /// 从 xm | identifier_name |
main.rs | ```sh
/// cargo run -- demo.docx
/// ```
/// 输出字体,并且带字体的颜色值.
fn main() -> Result<()> {
let opt = Opt::from_args();
let file_name = Path::new(&opt.file_name);
let file =
fs::File::open(file_name).with_context(|| format!("open file {:?} err", file_name))?;
// 使用 zip 创建该文件的 Archive
let mut archive = zip::ZipArchive::new(file).context("create zip archive err")?;
for i in 0..archive.len() {
let file = archive.by_index(i).unwrap();
if opt.verbose {
println!("filename: {}", file.name());
}
}
// 直接解析 main document: word/document.xml
// TODO 这个是写死的路径,正常应该先解析 [Content_types].xml 找到 main document.
let word_doc = archive
.by_name("word/document.xml")
.context("found no word/document.xml")?;
// xml parse
let mut doc_parsing = MainDocParsing::new();
let parser = EventReader::new(word_doc);
let mut depth = 0;
for e in parser {
let event = e.context("xml parser got err")?;
match event {
XmlEvent::StartElement {
name,
attributes,
namespace: _,
} => {
// 调试信息
if opt.verbose {
print_xml_owned_name(&name, depth, true);
}
depth += 1;
// 新元素开始解析
doc_parsing.feed_element(name, attributes);
}
XmlEvent::EndElement { name } => {
depth -= 1;
// 调试信息
if opt.verbose {
pri | // 调试信息
if opt.verbose {
println!(r#"{}Characters("{}")"#, " ".repeat(depth), data,);
}
// 当前元素添加 text data
doc_parsing.feed_characters(data);
}
XmlEvent::Whitespace(_) => {}
_ => {
// TODO
}
}
}
// 打印 文中的字体颜色和字体内容
print_elements(&doc_parsing.root, opt.verbose);
Ok(())
}
/// 辅助调试函数,打印元素
fn print_xml_owned_name(name: &OwnedName, indent: usize, start: bool) {
print!("{}", " ".repeat(indent));
if start {
print!("+");
} else {
print!("-");
}
if let Some(v) = &name.prefix {
print!("{}:", v);
}
println!("{}", name.local_name);
}
/// Main document 中我们支持的一些元素类型
/// 保存原始的格式(例如 w:t)到 String 只是为了方便调试.
#[derive(Debug)]
enum ElementType {
Document(String),
Body(String),
Paragraph(String),
Run(String),
Text(String),
/// 属性
ParagraphProperty(String),
RunProperty(String),
Color(String),
/// 其他剩余的不支持的类型
Unknown(String),
}
impl ElementType {
/// 从 xml的 OwnedName 中构建 ElementType
fn from_name(name: &OwnedName) -> Self {
let raw = format!(
"{}:{}",
name.prefix.as_ref().unwrap_or(&String::new()),
name.local_name
);
// 目前 只识别 `w:xxx` 格式, 且只是部分标签
if name.prefix.is_none() || name.prefix.as_ref().unwrap().ne("w") {
return Self::Unknown(raw);
}
match &*name.local_name {
"document" => Self::Document(raw),
"body" => Self::Body(raw),
"p" => Self::Paragraph(raw),
"r" => Self::Run(raw),
"t" => Self::Text(raw),
"pPr" => Self::ParagraphProperty(raw),
"rPr" => Self::RunProperty(raw),
"color" => Self::Color(raw),
_ => Self::Unknown(raw),
}
}
/// 是否是 Text类型(w:t)
fn is_text(&self) -> bool {
matches!(self, Self::Text(_))
}
/// 是否是Run property(w:rPr)
fn is_run_property(&self) -> bool {
matches!(self, Self::RunProperty(_))
}
/// 是否是 Color 类型(color)
fn is_color(&self) -> bool {
matches!(self, Self::Color(_))
}
}
/// main document中的元素.
struct Element {
element_type: ElementType,
parent: Option<Weak<RefCell<Element>>>,
children: Vec<Rc<RefCell<Element>>>,
attributes: HashMap<String, String>,
literal_text: Option<String>, // 目前只有 w:t 有
depth: usize, // for debug
}
impl Element {
/// new Element, 需要指定 parent 和 type, parent 可以为 None
fn new(
element_type: ElementType,
parent: &Option<Rc<RefCell<Element>>>,
attributes: Vec<OwnedAttribute>,
depth: usize,
) -> Self {
let mut attrs = HashMap::new();
attributes.iter().for_each(|v| {
attrs.insert(v.name.local_name.clone(), v.value.clone());
});
Self {
element_type,
parent: parent.as_ref().map(Rc::downgrade),
children: vec![],
attributes: attrs,
literal_text: None,
depth,
}
}
fn append_child(&mut self, child: Rc<RefCell<Element>>) {
self.children.push(child);
}
// 下面是一些辅助方法
/// 寻找本节点最近的 run property
fn find_run_property(element: &Option<Rc<RefCell<Element>>>) -> Option<Rc<RefCell<Element>>> {
if let Some(ele) = element {
if let Some(parent) = &ele.borrow().parent {
if let Some(parent) = parent.upgrade() {
// find run property from parent's children
for child in parent.borrow().children.iter() {
if child.borrow().element_type.is_run_property() {
return Some(Rc::clone(child));
}
}
// if not found, goes up
return Self::find_run_property(&Some(parent));
}
}
}
None
}
/// 如果自己是 run property, 从中获取 color 属性
fn get_color(element: &Option<Rc<RefCell<Element>>>) -> Option<String> {
if let Some(ele) = &element {
// 本身不是 run property
if !ele.borrow().element_type.is_run_property() {
return None;
}
// 从 children 中寻找 w:color
for child in ele.borrow().children.iter() {
let child_ref = child.borrow();
if child_ref.element_type.is_color() {
return child_ref.attributes.get("val").cloned();
}
}
}
None
}
fn display(root: &Option<Rc<RefCell<Element>>>) -> String {
if let Some(root_rc) = root {
let attrs: Vec<_> = root_rc
.borrow()
.attributes
.iter()
.map(|(k, v)| format!("{}={}", k, v))
.collect();
let indent = " ".repeat(root_rc.borrow().depth);
format!(
"{}{:?}, attrs: {:?},",
indent,
root_rc.borrow().element_type,
attrs
)
} else {
"None<Element>".to_string()
}
}
}
/// Main document 解析过程.
/// 流程:
/// 内部维护一颗 Element 的树 root, 并且维护当前解析的节点的指针 cur.
/// 1. 当新的元素解析到,调用 feed_element, 会将新的 Element 添加到 cur 的子元素中(children),
/// 并将 cur 指向新的 Element
/// 2. 当一个元素解析完成,调用 fish_feed_element,
/// 会将 cur 指向其父节点
/// 3. 当有新的 text data 时,调用 feed_characters, 将 data 填空到当前的 Element中.
/// 目前只是针对 w:t 类型
struct MainDocParsing {
// 这里假设有一个唯一的 root
root: Option<Rc<RefCell<Element>>>,
cur: Option<Rc<RefCell<Element>>>,
depth: usize,
}
impl MainDocParsing {
fn new() -> Self {
Self {
root: None,
cur: None,
depth: 0,
}
}
/// 一个新的元素开始解析
fn feed_element(&mut self, name: OwnedName, attributes: Vec<OwnedAttribute>) {
self.depth += 1;
let element_type = ElementType::from_name(&name);
let element = Rc::new(RefCell::new(Element::new(
element_type,
&self.cur,
attributes,
self.depth,
)));
if let Some(cur_parent) = &self.cur {
// 最新节点添加为 parent 的子节点
cur_parent.borrow | nt_xml_owned_name(&name, depth, false);
}
// 当前元素解析完成
doc_parsing.fish_feed_element();
}
XmlEvent::Comment(_) => {}
XmlEvent::CData(_) => {}
XmlEvent::Characters(data) => {
| conditional_block |
utils.go | /go/v14/arrow/internal/debug"
"github.com/apache/arrow/go/v14/arrow/memory"
"golang.org/x/xerrors"
)
type bufferWriteSeeker struct {
buf *memory.Buffer
pos int
mem memory.Allocator
}
func (b *bufferWriteSeeker) Reserve(nbytes int) {
if b.buf == nil {
b.buf = memory.NewResizableBuffer(b.mem)
}
newCap := int(math.Max(float64(b.buf.Cap()), 256))
for newCap < b.pos+nbytes {
newCap = bitutil.NextPowerOf2(newCap)
}
b.buf.Reserve(newCap)
}
func (b *bufferWriteSeeker) Write(p []byte) (n int, err error) {
if len(p) == 0 {
return 0, nil
}
if b.buf == nil {
b.Reserve(len(p))
} else if b.pos+len(p) >= b.buf.Cap() {
b.Reserve(len(p))
}
return b.UnsafeWrite(p)
}
func (b *bufferWriteSeeker) UnsafeWrite(p []byte) (n int, err error) {
n = copy(b.buf.Buf()[b.pos:], p)
b.pos += len(p)
if b.pos > b.buf.Len() {
b.buf.ResizeNoShrink(b.pos)
}
return
}
func (b *bufferWriteSeeker) Seek(offset int64, whence int) (int64, error) {
newpos, offs := 0, int(offset)
switch whence {
case io.SeekStart:
newpos = offs
case io.SeekCurrent:
newpos = b.pos + offs
case io.SeekEnd:
newpos = b.buf.Len() + offs
}
if newpos < 0 {
return 0, xerrors.New("negative result pos")
}
b.pos = newpos
return int64(newpos), nil
}
// ensureDictionaryDecoded is used by DispatchBest to determine
// the proper types for promotion. Casting is then performed by
// the executor before continuing execution: see the implementation
// of execInternal in exec.go after calling DispatchBest.
//
// That casting is where actual decoding would be performed for
// the dictionary
func ensureDictionaryDecoded(vals ...arrow.DataType) {
for i, v := range vals {
if v.ID() == arrow.DICTIONARY |
}
}
func replaceNullWithOtherType(vals ...arrow.DataType) {
debug.Assert(len(vals) == 2, "should be length 2")
if vals[0].ID() == arrow.NULL {
vals[0] = vals[1]
return
}
if vals[1].ID() == arrow.NULL {
vals[1] = vals[0]
return
}
}
func commonTemporalResolution(vals ...arrow.DataType) (arrow.TimeUnit, bool) {
isTimeUnit := false
finestUnit := arrow.Second
for _, v := range vals {
switch dt := v.(type) {
case *arrow.Date32Type:
isTimeUnit = true
continue
case *arrow.Date64Type:
finestUnit = exec.Max(finestUnit, arrow.Millisecond)
isTimeUnit = true
case arrow.TemporalWithUnit:
finestUnit = exec.Max(finestUnit, dt.TimeUnit())
isTimeUnit = true
default:
continue
}
}
return finestUnit, isTimeUnit
}
func replaceTemporalTypes(unit arrow.TimeUnit, vals ...arrow.DataType) {
for i, v := range vals {
switch dt := v.(type) {
case *arrow.TimestampType:
dt.Unit = unit
vals[i] = dt
case *arrow.Time32Type, *arrow.Time64Type:
if unit > arrow.Millisecond {
vals[i] = &arrow.Time64Type{Unit: unit}
} else {
vals[i] = &arrow.Time32Type{Unit: unit}
}
case *arrow.DurationType:
dt.Unit = unit
vals[i] = dt
case *arrow.Date32Type, *arrow.Date64Type:
vals[i] = &arrow.TimestampType{Unit: unit}
}
}
}
func replaceTypes(replacement arrow.DataType, vals ...arrow.DataType) {
for i := range vals {
vals[i] = replacement
}
}
func commonNumeric(vals ...arrow.DataType) arrow.DataType {
for _, v := range vals {
if !arrow.IsFloating(v.ID()) && !arrow.IsInteger(v.ID()) {
// a common numeric type is only possible if all are numeric
return nil
}
if v.ID() == arrow.FLOAT16 {
// float16 arithmetic is not currently supported
return nil
}
}
for _, v := range vals {
if v.ID() == arrow.FLOAT64 {
return arrow.PrimitiveTypes.Float64
}
}
for _, v := range vals {
if v.ID() == arrow.FLOAT32 {
return arrow.PrimitiveTypes.Float32
}
}
maxWidthSigned, maxWidthUnsigned := 0, 0
for _, v := range vals {
if arrow.IsUnsignedInteger(v.ID()) {
maxWidthUnsigned = exec.Max(v.(arrow.FixedWidthDataType).BitWidth(), maxWidthUnsigned)
} else {
maxWidthSigned = exec.Max(v.(arrow.FixedWidthDataType).BitWidth(), maxWidthSigned)
}
}
if maxWidthSigned == 0 {
switch {
case maxWidthUnsigned >= 64:
return arrow.PrimitiveTypes.Uint64
case maxWidthUnsigned == 32:
return arrow.PrimitiveTypes.Uint32
case maxWidthUnsigned == 16:
return arrow.PrimitiveTypes.Uint16
default:
debug.Assert(maxWidthUnsigned == 8, "bad maxWidthUnsigned")
return arrow.PrimitiveTypes.Uint8
}
}
if maxWidthSigned <= maxWidthUnsigned {
maxWidthSigned = bitutil.NextPowerOf2(maxWidthUnsigned + 1)
}
switch {
case maxWidthSigned >= 64:
return arrow.PrimitiveTypes.Int64
case maxWidthSigned == 32:
return arrow.PrimitiveTypes.Int32
case maxWidthSigned == 16:
return arrow.PrimitiveTypes.Int16
default:
debug.Assert(maxWidthSigned == 8, "bad maxWidthSigned")
return arrow.PrimitiveTypes.Int8
}
}
func hasDecimal(vals ...arrow.DataType) bool {
for _, v := range vals {
if arrow.IsDecimal(v.ID()) {
return true
}
}
return false
}
type decimalPromotion uint8
const (
decPromoteNone decimalPromotion = iota
decPromoteAdd
decPromoteMultiply
decPromoteDivide
)
func castBinaryDecimalArgs(promote decimalPromotion, vals ...arrow.DataType) error {
left, right := vals[0], vals[1]
debug.Assert(arrow.IsDecimal(left.ID()) || arrow.IsDecimal(right.ID()), "at least one of the types should be decimal")
// decimal + float = float
if arrow.IsFloating(left.ID()) {
vals[1] = vals[0]
return nil
} else if arrow.IsFloating(right.ID()) {
vals[0] = vals[1]
return nil
}
var prec1, scale1, prec2, scale2 int32
var err error
// decimal + integer = decimal
if arrow.IsDecimal(left.ID()) {
dec := left.(arrow.DecimalType)
prec1, scale1 = dec.GetPrecision(), dec.GetScale()
} else {
debug.Assert(arrow.IsInteger(left.ID()), "floats were already handled, this should be an int")
if prec1, err = kernels.MaxDecimalDigitsForInt(left.ID()); err != nil {
return err
}
}
if arrow.IsDecimal(right.ID()) {
dec := right.(arrow.DecimalType)
prec2, scale2 = dec.GetPrecision(), dec.GetScale()
} else {
debug.Assert(arrow.IsInteger(right.ID()), "float already handled, should be ints")
if prec2, err = kernels.MaxDecimalDigitsForInt(right.ID()); err != nil {
return err
}
}
if scale1 < 0 || scale2 < 0 {
return fmt.Errorf("%w: decimals with negative scales not supported", arrow.ErrNotImplemented)
}
// decimal128 + decimal256 = decimal256
castedID := arrow.DECIMAL128
if left.ID() == arrow.DECIMAL256 || right.ID() == arrow.DECIMAL256 {
castedID = arrow.DECIMAL256
}
// decimal promotion rules compatible with amazon redshift
// https://docs.aws.amazon.com/redshift/latest/dg/r_numeric_computations201.html
var leftScaleup, rightScaleup int32
switch promote {
case decPromoteAdd:
leftScaleup = exec.Max(scale1, scale2) - scale1
rightScaleup = exec.Max(scale1, scale2) - scale2
case decPromoteMultiply:
case decPromoteDivide:
leftScaleup = exec.Max(4, scale1+prec2-scale2+1) + scale2 - scale1
default:
debug.Assert(false, fmt | {
vals[i] = v.(*arrow.DictionaryType).ValueType
} | conditional_block |
utils.go | {
debug.Assert(len(vals) == 2, "should be length 2")
if vals[0].ID() == arrow.NULL {
vals[0] = vals[1]
return
}
if vals[1].ID() == arrow.NULL {
vals[1] = vals[0]
return
}
}
func commonTemporalResolution(vals ...arrow.DataType) (arrow.TimeUnit, bool) {
isTimeUnit := false
finestUnit := arrow.Second
for _, v := range vals {
switch dt := v.(type) {
case *arrow.Date32Type:
isTimeUnit = true
continue
case *arrow.Date64Type:
finestUnit = exec.Max(finestUnit, arrow.Millisecond)
isTimeUnit = true
case arrow.TemporalWithUnit:
finestUnit = exec.Max(finestUnit, dt.TimeUnit())
isTimeUnit = true
default:
continue
}
}
return finestUnit, isTimeUnit
}
func replaceTemporalTypes(unit arrow.TimeUnit, vals ...arrow.DataType) {
for i, v := range vals {
switch dt := v.(type) {
case *arrow.TimestampType:
dt.Unit = unit
vals[i] = dt
case *arrow.Time32Type, *arrow.Time64Type:
if unit > arrow.Millisecond {
vals[i] = &arrow.Time64Type{Unit: unit}
} else {
vals[i] = &arrow.Time32Type{Unit: unit}
}
case *arrow.DurationType:
dt.Unit = unit
vals[i] = dt
case *arrow.Date32Type, *arrow.Date64Type:
vals[i] = &arrow.TimestampType{Unit: unit}
}
}
}
func replaceTypes(replacement arrow.DataType, vals ...arrow.DataType) {
for i := range vals {
vals[i] = replacement
}
}
func commonNumeric(vals ...arrow.DataType) arrow.DataType {
for _, v := range vals {
if !arrow.IsFloating(v.ID()) && !arrow.IsInteger(v.ID()) {
// a common numeric type is only possible if all are numeric
return nil
}
if v.ID() == arrow.FLOAT16 {
// float16 arithmetic is not currently supported
return nil
}
}
for _, v := range vals {
if v.ID() == arrow.FLOAT64 {
return arrow.PrimitiveTypes.Float64
}
}
for _, v := range vals {
if v.ID() == arrow.FLOAT32 {
return arrow.PrimitiveTypes.Float32
}
}
maxWidthSigned, maxWidthUnsigned := 0, 0
for _, v := range vals {
if arrow.IsUnsignedInteger(v.ID()) {
maxWidthUnsigned = exec.Max(v.(arrow.FixedWidthDataType).BitWidth(), maxWidthUnsigned)
} else {
maxWidthSigned = exec.Max(v.(arrow.FixedWidthDataType).BitWidth(), maxWidthSigned)
}
}
if maxWidthSigned == 0 {
switch {
case maxWidthUnsigned >= 64:
return arrow.PrimitiveTypes.Uint64
case maxWidthUnsigned == 32:
return arrow.PrimitiveTypes.Uint32
case maxWidthUnsigned == 16:
return arrow.PrimitiveTypes.Uint16
default:
debug.Assert(maxWidthUnsigned == 8, "bad maxWidthUnsigned")
return arrow.PrimitiveTypes.Uint8
}
}
if maxWidthSigned <= maxWidthUnsigned {
maxWidthSigned = bitutil.NextPowerOf2(maxWidthUnsigned + 1)
}
switch {
case maxWidthSigned >= 64:
return arrow.PrimitiveTypes.Int64
case maxWidthSigned == 32:
return arrow.PrimitiveTypes.Int32
case maxWidthSigned == 16:
return arrow.PrimitiveTypes.Int16
default:
debug.Assert(maxWidthSigned == 8, "bad maxWidthSigned")
return arrow.PrimitiveTypes.Int8
}
}
func hasDecimal(vals ...arrow.DataType) bool {
for _, v := range vals {
if arrow.IsDecimal(v.ID()) {
return true
}
}
return false
}
type decimalPromotion uint8
const (
decPromoteNone decimalPromotion = iota
decPromoteAdd
decPromoteMultiply
decPromoteDivide
)
func castBinaryDecimalArgs(promote decimalPromotion, vals ...arrow.DataType) error {
left, right := vals[0], vals[1]
debug.Assert(arrow.IsDecimal(left.ID()) || arrow.IsDecimal(right.ID()), "at least one of the types should be decimal")
// decimal + float = float
if arrow.IsFloating(left.ID()) {
vals[1] = vals[0]
return nil
} else if arrow.IsFloating(right.ID()) {
vals[0] = vals[1]
return nil
}
var prec1, scale1, prec2, scale2 int32
var err error
// decimal + integer = decimal
if arrow.IsDecimal(left.ID()) {
dec := left.(arrow.DecimalType)
prec1, scale1 = dec.GetPrecision(), dec.GetScale()
} else {
debug.Assert(arrow.IsInteger(left.ID()), "floats were already handled, this should be an int")
if prec1, err = kernels.MaxDecimalDigitsForInt(left.ID()); err != nil {
return err
}
}
if arrow.IsDecimal(right.ID()) {
dec := right.(arrow.DecimalType)
prec2, scale2 = dec.GetPrecision(), dec.GetScale()
} else {
debug.Assert(arrow.IsInteger(right.ID()), "float already handled, should be ints")
if prec2, err = kernels.MaxDecimalDigitsForInt(right.ID()); err != nil {
return err
}
}
if scale1 < 0 || scale2 < 0 {
return fmt.Errorf("%w: decimals with negative scales not supported", arrow.ErrNotImplemented)
}
// decimal128 + decimal256 = decimal256
castedID := arrow.DECIMAL128
if left.ID() == arrow.DECIMAL256 || right.ID() == arrow.DECIMAL256 {
castedID = arrow.DECIMAL256
}
// decimal promotion rules compatible with amazon redshift
// https://docs.aws.amazon.com/redshift/latest/dg/r_numeric_computations201.html
var leftScaleup, rightScaleup int32
switch promote {
case decPromoteAdd:
leftScaleup = exec.Max(scale1, scale2) - scale1
rightScaleup = exec.Max(scale1, scale2) - scale2
case decPromoteMultiply:
case decPromoteDivide:
leftScaleup = exec.Max(4, scale1+prec2-scale2+1) + scale2 - scale1
default:
debug.Assert(false, fmt.Sprintf("invalid DecimalPromotion value %d", promote))
}
vals[0], err = arrow.NewDecimalType(castedID, prec1+leftScaleup, scale1+leftScaleup)
if err != nil {
return err
}
vals[1], err = arrow.NewDecimalType(castedID, prec2+rightScaleup, scale2+rightScaleup)
return err
}
func commonTemporal(vals ...arrow.DataType) arrow.DataType {
var (
finestUnit = arrow.Second
zone *string
loc *time.Location
sawDate32, sawDate64 bool
)
for _, ty := range vals {
switch ty.ID() {
case arrow.DATE32:
// date32's unit is days, but the coarsest we have is seconds
sawDate32 = true
case arrow.DATE64:
finestUnit = exec.Max(finestUnit, arrow.Millisecond)
sawDate64 = true
case arrow.TIMESTAMP:
ts := ty.(*arrow.TimestampType)
if ts.TimeZone != "" {
tz, _ := ts.GetZone()
if loc != nil && loc != tz {
return nil
}
loc = tz
}
zone = &ts.TimeZone
finestUnit = exec.Max(finestUnit, ts.Unit)
default:
return nil
}
}
switch {
case zone != nil:
// at least one timestamp seen
return &arrow.TimestampType{Unit: finestUnit, TimeZone: *zone}
case sawDate64:
return arrow.FixedWidthTypes.Date64
case sawDate32:
return arrow.FixedWidthTypes.Date32
}
return nil
}
func commonBinary(vals ...arrow.DataType) arrow.DataType | {
var (
allUTF8, allOffset32, allFixedWidth = true, true, true
)
for _, ty := range vals {
switch ty.ID() {
case arrow.STRING:
allFixedWidth = false
case arrow.BINARY:
allFixedWidth, allUTF8 = false, false
case arrow.FIXED_SIZE_BINARY:
allUTF8 = false
case arrow.LARGE_BINARY:
allOffset32, allFixedWidth, allUTF8 = false, false, false
case arrow.LARGE_STRING:
allOffset32, allFixedWidth = false, false
default:
return nil
} | identifier_body |
|
utils.go | /go/v14/arrow/internal/debug"
"github.com/apache/arrow/go/v14/arrow/memory"
"golang.org/x/xerrors"
)
type bufferWriteSeeker struct {
buf *memory.Buffer
pos int
mem memory.Allocator
}
func (b *bufferWriteSeeker) Reserve(nbytes int) {
if b.buf == nil {
b.buf = memory.NewResizableBuffer(b.mem)
}
newCap := int(math.Max(float64(b.buf.Cap()), 256))
for newCap < b.pos+nbytes {
newCap = bitutil.NextPowerOf2(newCap)
}
b.buf.Reserve(newCap)
}
func (b *bufferWriteSeeker) Write(p []byte) (n int, err error) {
if len(p) == 0 {
return 0, nil
}
if b.buf == nil {
b.Reserve(len(p))
} else if b.pos+len(p) >= b.buf.Cap() {
b.Reserve(len(p))
}
return b.UnsafeWrite(p)
}
func (b *bufferWriteSeeker) UnsafeWrite(p []byte) (n int, err error) {
n = copy(b.buf.Buf()[b.pos:], p)
b.pos += len(p)
if b.pos > b.buf.Len() {
b.buf.ResizeNoShrink(b.pos)
}
return
}
func (b *bufferWriteSeeker) Seek(offset int64, whence int) (int64, error) {
newpos, offs := 0, int(offset)
switch whence {
case io.SeekStart:
newpos = offs
case io.SeekCurrent:
newpos = b.pos + offs
case io.SeekEnd:
newpos = b.buf.Len() + offs
}
if newpos < 0 {
return 0, xerrors.New("negative result pos")
}
b.pos = newpos
return int64(newpos), nil
}
// ensureDictionaryDecoded is used by DispatchBest to determine
// the proper types for promotion. Casting is then performed by
// the executor before continuing execution: see the implementation
// of execInternal in exec.go after calling DispatchBest.
//
// That casting is where actual decoding would be performed for
// the dictionary
func ensureDictionaryDecoded(vals ...arrow.DataType) {
for i, v := range vals {
if v.ID() == arrow.DICTIONARY {
vals[i] = v.(*arrow.DictionaryType).ValueType
}
}
}
func replaceNullWithOtherType(vals ...arrow.DataType) {
debug.Assert(len(vals) == 2, "should be length 2")
if vals[0].ID() == arrow.NULL {
vals[0] = vals[1]
return
}
if vals[1].ID() == arrow.NULL {
vals[1] = vals[0]
return
}
}
func commonTemporalResolution(vals ...arrow.DataType) (arrow.TimeUnit, bool) {
isTimeUnit := false
finestUnit := arrow.Second
for _, v := range vals {
switch dt := v.(type) {
case *arrow.Date32Type:
isTimeUnit = true
continue
case *arrow.Date64Type:
finestUnit = exec.Max(finestUnit, arrow.Millisecond)
isTimeUnit = true
case arrow.TemporalWithUnit:
finestUnit = exec.Max(finestUnit, dt.TimeUnit())
isTimeUnit = true
default:
continue
}
}
return finestUnit, isTimeUnit
}
func replaceTemporalTypes(unit arrow.TimeUnit, vals ...arrow.DataType) {
for i, v := range vals {
switch dt := v.(type) {
case *arrow.TimestampType:
dt.Unit = unit
vals[i] = dt
case *arrow.Time32Type, *arrow.Time64Type:
if unit > arrow.Millisecond {
vals[i] = &arrow.Time64Type{Unit: unit}
} else {
vals[i] = &arrow.Time32Type{Unit: unit}
}
case *arrow.DurationType:
dt.Unit = unit
vals[i] = dt
case *arrow.Date32Type, *arrow.Date64Type:
vals[i] = &arrow.TimestampType{Unit: unit}
}
}
}
func replaceTypes(replacement arrow.DataType, vals ...arrow.DataType) {
for i := range vals {
vals[i] = replacement
}
}
func commonNumeric(vals ...arrow.DataType) arrow.DataType {
for _, v := range vals {
if !arrow.IsFloating(v.ID()) && !arrow.IsInteger(v.ID()) {
// a common numeric type is only possible if all are numeric
return nil
}
if v.ID() == arrow.FLOAT16 {
// float16 arithmetic is not currently supported
return nil
}
}
for _, v := range vals {
if v.ID() == arrow.FLOAT64 {
return arrow.PrimitiveTypes.Float64
}
}
for _, v := range vals {
if v.ID() == arrow.FLOAT32 {
return arrow.PrimitiveTypes.Float32
}
}
maxWidthSigned, maxWidthUnsigned := 0, 0
for _, v := range vals {
if arrow.IsUnsignedInteger(v.ID()) {
maxWidthUnsigned = exec.Max(v.(arrow.FixedWidthDataType).BitWidth(), maxWidthUnsigned)
} else {
maxWidthSigned = exec.Max(v.(arrow.FixedWidthDataType).BitWidth(), maxWidthSigned)
}
}
if maxWidthSigned == 0 {
switch {
case maxWidthUnsigned >= 64:
return arrow.PrimitiveTypes.Uint64
case maxWidthUnsigned == 32:
return arrow.PrimitiveTypes.Uint32
case maxWidthUnsigned == 16:
return arrow.PrimitiveTypes.Uint16
default:
debug.Assert(maxWidthUnsigned == 8, "bad maxWidthUnsigned")
return arrow.PrimitiveTypes.Uint8
}
}
if maxWidthSigned <= maxWidthUnsigned {
maxWidthSigned = bitutil.NextPowerOf2(maxWidthUnsigned + 1)
}
switch {
case maxWidthSigned >= 64:
return arrow.PrimitiveTypes.Int64
case maxWidthSigned == 32:
return arrow.PrimitiveTypes.Int32
case maxWidthSigned == 16:
return arrow.PrimitiveTypes.Int16
default:
debug.Assert(maxWidthSigned == 8, "bad maxWidthSigned")
return arrow.PrimitiveTypes.Int8
}
}
func hasDecimal(vals ...arrow.DataType) bool {
for _, v := range vals {
if arrow.IsDecimal(v.ID()) {
return true
}
}
return false
}
type decimalPromotion uint8
const (
decPromoteNone decimalPromotion = iota
decPromoteAdd
decPromoteMultiply
decPromoteDivide
)
func castBinaryDecimalArgs(promote decimalPromotion, vals ...arrow.DataType) error {
left, right := vals[0], vals[1]
debug.Assert(arrow.IsDecimal(left.ID()) || arrow.IsDecimal(right.ID()), "at least one of the types should be decimal")
// decimal + float = float
if arrow.IsFloating(left.ID()) {
vals[1] = vals[0]
return nil
} else if arrow.IsFloating(right.ID()) {
vals[0] = vals[1]
return nil
}
var prec1, scale1, prec2, scale2 int32
var err error
// decimal + integer = decimal
if arrow.IsDecimal(left.ID()) {
dec := left.(arrow.DecimalType)
prec1, scale1 = dec.GetPrecision(), dec.GetScale()
} else {
debug.Assert(arrow.IsInteger(left.ID()), "floats were already handled, this should be an int")
if prec1, err = kernels.MaxDecimalDigitsForInt(left.ID()); err != nil {
return err
}
}
if arrow.IsDecimal(right.ID()) {
dec := right.(arrow.DecimalType)
prec2, scale2 = dec.GetPrecision(), dec.GetScale()
} else {
debug.Assert(arrow.IsInteger(right.ID()), "float already handled, should be ints")
if prec2, err = kernels.MaxDecimalDigitsForInt(right.ID()); err != nil {
return err
}
}
if scale1 < 0 || scale2 < 0 {
return fmt.Errorf("%w: decimals with negative scales not supported", arrow.ErrNotImplemented)
}
// decimal128 + decimal256 = decimal256
castedID := arrow.DECIMAL128
if left.ID() == arrow.DECIMAL256 || right.ID() == arrow.DECIMAL256 { |
// decimal promotion rules compatible with amazon redshift
// https://docs.aws.amazon.com/redshift/latest/dg/r_numeric_computations201.html
var leftScaleup, rightScaleup int32
switch promote {
case decPromoteAdd:
leftScaleup = exec.Max(scale1, scale2) - scale1
rightScaleup = exec.Max(scale1, scale2) - scale2
case decPromoteMultiply:
case decPromoteDivide:
leftScaleup = exec.Max(4, scale1+prec2-scale2+1) + scale2 - scale1
default:
debug.Assert(false, fmt.Sprintf | castedID = arrow.DECIMAL256
} | random_line_split |
utils.go | /go/v14/arrow/internal/debug"
"github.com/apache/arrow/go/v14/arrow/memory"
"golang.org/x/xerrors"
)
type bufferWriteSeeker struct {
buf *memory.Buffer
pos int
mem memory.Allocator
}
func (b *bufferWriteSeeker) Reserve(nbytes int) {
if b.buf == nil {
b.buf = memory.NewResizableBuffer(b.mem)
}
newCap := int(math.Max(float64(b.buf.Cap()), 256))
for newCap < b.pos+nbytes {
newCap = bitutil.NextPowerOf2(newCap)
}
b.buf.Reserve(newCap)
}
func (b *bufferWriteSeeker) Write(p []byte) (n int, err error) {
if len(p) == 0 {
return 0, nil
}
if b.buf == nil {
b.Reserve(len(p))
} else if b.pos+len(p) >= b.buf.Cap() {
b.Reserve(len(p))
}
return b.UnsafeWrite(p)
}
func (b *bufferWriteSeeker) UnsafeWrite(p []byte) (n int, err error) {
n = copy(b.buf.Buf()[b.pos:], p)
b.pos += len(p)
if b.pos > b.buf.Len() {
b.buf.ResizeNoShrink(b.pos)
}
return
}
func (b *bufferWriteSeeker) Seek(offset int64, whence int) (int64, error) {
newpos, offs := 0, int(offset)
switch whence {
case io.SeekStart:
newpos = offs
case io.SeekCurrent:
newpos = b.pos + offs
case io.SeekEnd:
newpos = b.buf.Len() + offs
}
if newpos < 0 {
return 0, xerrors.New("negative result pos")
}
b.pos = newpos
return int64(newpos), nil
}
// ensureDictionaryDecoded is used by DispatchBest to determine
// the proper types for promotion. Casting is then performed by
// the executor before continuing execution: see the implementation
// of execInternal in exec.go after calling DispatchBest.
//
// That casting is where actual decoding would be performed for
// the dictionary
func ensureDictionaryDecoded(vals ...arrow.DataType) {
for i, v := range vals {
if v.ID() == arrow.DICTIONARY {
vals[i] = v.(*arrow.DictionaryType).ValueType
}
}
}
func replaceNullWithOtherType(vals ...arrow.DataType) {
debug.Assert(len(vals) == 2, "should be length 2")
if vals[0].ID() == arrow.NULL {
vals[0] = vals[1]
return
}
if vals[1].ID() == arrow.NULL {
vals[1] = vals[0]
return
}
}
func commonTemporalResolution(vals ...arrow.DataType) (arrow.TimeUnit, bool) {
isTimeUnit := false
finestUnit := arrow.Second
for _, v := range vals {
switch dt := v.(type) {
case *arrow.Date32Type:
isTimeUnit = true
continue
case *arrow.Date64Type:
finestUnit = exec.Max(finestUnit, arrow.Millisecond)
isTimeUnit = true
case arrow.TemporalWithUnit:
finestUnit = exec.Max(finestUnit, dt.TimeUnit())
isTimeUnit = true
default:
continue
}
}
return finestUnit, isTimeUnit
}
func replaceTemporalTypes(unit arrow.TimeUnit, vals ...arrow.DataType) {
for i, v := range vals {
switch dt := v.(type) {
case *arrow.TimestampType:
dt.Unit = unit
vals[i] = dt
case *arrow.Time32Type, *arrow.Time64Type:
if unit > arrow.Millisecond {
vals[i] = &arrow.Time64Type{Unit: unit}
} else {
vals[i] = &arrow.Time32Type{Unit: unit}
}
case *arrow.DurationType:
dt.Unit = unit
vals[i] = dt
case *arrow.Date32Type, *arrow.Date64Type:
vals[i] = &arrow.TimestampType{Unit: unit}
}
}
}
func replaceTypes(replacement arrow.DataType, vals ...arrow.DataType) {
for i := range vals {
vals[i] = replacement
}
}
func commonNumeric(vals ...arrow.DataType) arrow.DataType {
for _, v := range vals {
if !arrow.IsFloating(v.ID()) && !arrow.IsInteger(v.ID()) {
// a common numeric type is only possible if all are numeric
return nil
}
if v.ID() == arrow.FLOAT16 {
// float16 arithmetic is not currently supported
return nil
}
}
for _, v := range vals {
if v.ID() == arrow.FLOAT64 {
return arrow.PrimitiveTypes.Float64
}
}
for _, v := range vals {
if v.ID() == arrow.FLOAT32 {
return arrow.PrimitiveTypes.Float32
}
}
maxWidthSigned, maxWidthUnsigned := 0, 0
for _, v := range vals {
if arrow.IsUnsignedInteger(v.ID()) {
maxWidthUnsigned = exec.Max(v.(arrow.FixedWidthDataType).BitWidth(), maxWidthUnsigned)
} else {
maxWidthSigned = exec.Max(v.(arrow.FixedWidthDataType).BitWidth(), maxWidthSigned)
}
}
if maxWidthSigned == 0 {
switch {
case maxWidthUnsigned >= 64:
return arrow.PrimitiveTypes.Uint64
case maxWidthUnsigned == 32:
return arrow.PrimitiveTypes.Uint32
case maxWidthUnsigned == 16:
return arrow.PrimitiveTypes.Uint16
default:
debug.Assert(maxWidthUnsigned == 8, "bad maxWidthUnsigned")
return arrow.PrimitiveTypes.Uint8
}
}
if maxWidthSigned <= maxWidthUnsigned {
maxWidthSigned = bitutil.NextPowerOf2(maxWidthUnsigned + 1)
}
switch {
case maxWidthSigned >= 64:
return arrow.PrimitiveTypes.Int64
case maxWidthSigned == 32:
return arrow.PrimitiveTypes.Int32
case maxWidthSigned == 16:
return arrow.PrimitiveTypes.Int16
default:
debug.Assert(maxWidthSigned == 8, "bad maxWidthSigned")
return arrow.PrimitiveTypes.Int8
}
}
func | (vals ...arrow.DataType) bool {
for _, v := range vals {
if arrow.IsDecimal(v.ID()) {
return true
}
}
return false
}
type decimalPromotion uint8
const (
decPromoteNone decimalPromotion = iota
decPromoteAdd
decPromoteMultiply
decPromoteDivide
)
func castBinaryDecimalArgs(promote decimalPromotion, vals ...arrow.DataType) error {
left, right := vals[0], vals[1]
debug.Assert(arrow.IsDecimal(left.ID()) || arrow.IsDecimal(right.ID()), "at least one of the types should be decimal")
// decimal + float = float
if arrow.IsFloating(left.ID()) {
vals[1] = vals[0]
return nil
} else if arrow.IsFloating(right.ID()) {
vals[0] = vals[1]
return nil
}
var prec1, scale1, prec2, scale2 int32
var err error
// decimal + integer = decimal
if arrow.IsDecimal(left.ID()) {
dec := left.(arrow.DecimalType)
prec1, scale1 = dec.GetPrecision(), dec.GetScale()
} else {
debug.Assert(arrow.IsInteger(left.ID()), "floats were already handled, this should be an int")
if prec1, err = kernels.MaxDecimalDigitsForInt(left.ID()); err != nil {
return err
}
}
if arrow.IsDecimal(right.ID()) {
dec := right.(arrow.DecimalType)
prec2, scale2 = dec.GetPrecision(), dec.GetScale()
} else {
debug.Assert(arrow.IsInteger(right.ID()), "float already handled, should be ints")
if prec2, err = kernels.MaxDecimalDigitsForInt(right.ID()); err != nil {
return err
}
}
if scale1 < 0 || scale2 < 0 {
return fmt.Errorf("%w: decimals with negative scales not supported", arrow.ErrNotImplemented)
}
// decimal128 + decimal256 = decimal256
castedID := arrow.DECIMAL128
if left.ID() == arrow.DECIMAL256 || right.ID() == arrow.DECIMAL256 {
castedID = arrow.DECIMAL256
}
// decimal promotion rules compatible with amazon redshift
// https://docs.aws.amazon.com/redshift/latest/dg/r_numeric_computations201.html
var leftScaleup, rightScaleup int32
switch promote {
case decPromoteAdd:
leftScaleup = exec.Max(scale1, scale2) - scale1
rightScaleup = exec.Max(scale1, scale2) - scale2
case decPromoteMultiply:
case decPromoteDivide:
leftScaleup = exec.Max(4, scale1+prec2-scale2+1) + scale2 - scale1
default:
debug.Assert(false, fmt.Sprintf | hasDecimal | identifier_name |
evaluator.py | Cache:
def __init__(self):
self._cache: Dict[str, List[Optional["array.Array"]]] = {}
def merge(self, other):
self._cache = {**self._cache, **other._cache}
def add_entry(self, node_name: str, index: int, array: "array.Array"):
if node_name in self._cache:
entry = self._cache[node_name]
if index >= len(entry):
# grow cache in order to fit the new entry
entry += [None] * (index - len(entry) + 1)
else:
entry = [None] * (index + 1)
self._cache[node_name] = entry
self._cache[node_name][index] = array
def get_entry(self, node_name: str, index: int) -> Optional["array.Array"]:
return self._cache[node_name][index]
def get_node_cache(self, node_name: str) -> List[Optional["array.Array"]]:
return self._cache[node_name]
def get_all_cache_tensor_mappings(self) -> Dict[str, "array.Array"]:
mapping: Dict[str, "array.Array"] = {}
for key in self._cache.keys():
mapping = {**mapping, **self.get_node_cache_tensor_mapping(key)}
return mapping
def get_node_cache_tensor_mapping(self, node_name: str) -> Dict[str,
"array.Array"]:
caches = self._cache[node_name]
mapping: Dict[str, "array.Array"] = {}
for cache in caches:
if cache is None:
continue
output_node = cache._evaluator._output_node
output_in_edge = list(cache._evaluator._graph._graph.in_edges(
output_node, data=True))[0]
mapping[output_in_edge[-1]["name"]] = cache
return mapping
def empty(self) -> bool:
return len(self._cache) == 0
def to_dict(self):
return self._cache
class ArrayNodeLookupTable:
def __init__(self):
self._input_table: Dict[str, Tuple["array.Array"]] = {}
self._output_table: Dict[str, Tuple["array.Array"]] = {}
def add_input(self, node_name: str, arrays: Tuple["array.Array"]):
self._input_table[node_name] = arrays
def add_output(self, node_name: str, arrays: Tuple["array.Array"]):
self._output_table[node_name] = arrays
def get_input_map(self):
return self._input_table
def get_output_map(self):
return self._output_table
def update(self, other):
self._input_table = {**self._input_table, **other._input_table}
self._output_table = {**self._output_table, **other._output_table}
class LazyEvaluator:
def __init__(self):
self._parent_node: Optional[str] = None
self._output_node: Optional[str] = None
self._array_to_node_map: ArrayNodeLookupTable = ArrayNodeLookupTable()
self._cached_results: IntermediateResultCache = IntermediateResultCache()
self._graph: Graph = Graph()
def copy(self) -> "LazyEvaluator":
evaluator = LazyEvaluator()
evaluator._graph = self._graph
evaluator._cached_results = self._cached_results
evaluator._array_to_node_map = self._array_to_node_map
return evaluator
def add_node(
self, op_name: str, inputs: Tuple["array.Array"],
outputs: Tuple["array.Array"], **attributes):
self._parent_node, output_node_names = self._graph.add_node(
op_name, inputs, outputs, **attributes)
for output_array, output_node_name in zip(outputs, output_node_names):
output_array._evaluator._output_node = output_node_name
output_array._evaluator._parent_node = self._parent_node
if self._parent_node is not None:
# keep mypy happy because self._parent_node is Optional[str]
self._array_to_node_map.add_input(self._parent_node, inputs)
self._array_to_node_map.add_output(self._parent_node, outputs)
else:
raise InternalException("Parent node not set")
return
def add_initializer(
self, name: str, dtype: np.dtype, dims: Tuple[int],
vals):
raise NotImplementedError("Initializers not implemented")
def add_input(self, array: "array.Array"):
dtype = array.dtype
dims = array.shape
default_values = array._ort_value
# FIXME
if default_values is not None:
if default_values.data_type() != numpy_to_ort(
np.dtype(dtype)): # pragma: no cover
raise TypeError("Input type does not match input node")
default_shape = as_shape(default_values.shape())
if not weak_shape_comparisson(
default_shape, dims): # pragma: no cover
raise ValueError(
f"Input tensor shape {default_shape} does not match input "
f"node shape {dims}")
input_name, output_name = self._graph.add_input(array)
self._parent_node = input_name
self._output_node = output_name
if self._parent_node is None:
raise InternalException("Parent node not set")
self._array_to_node_map.add_input(self._parent_node, (array,))
self._array_to_node_map.add_output(self._parent_node, (array,))
def add_subgraph(self, other_graph: Graph):
if self._graph is None:
self._graph = other_graph
elif other_graph is not None:
self._graph.add_subgraph(other_graph)
def merge(self, other: "LazyEvaluator"):
self.add_subgraph(other._graph)
# share result cache
self._cached_results.merge(other._cached_results)
other._cached_results = self._cached_results
self._array_to_node_map.update(other._array_to_node_map)
other._array_to_node_map = self._array_to_node_map
return
def _build_executable_graph(self, array: "array.Array") -> ExecutableGraph:
if self._parent_node is None:
raise InternalException("Parent node not set")
if self._output_node is None:
raise InternalException("Output node not set")
# FIXME: need to fix result caching
return compile_graph(
self._graph, self._array_to_node_map.get_input_map(),
self._array_to_node_map.get_output_map(),
[self._output_node],
self._cached_results)
def evaluate(self, output_array: "array.Array") -> List[np.ndarray]:
if self._graph is None: # pragma: no cover
raise InternalException(
"Graph is empty. "
"This is an internal error. Please file a bug")
output_node = self._array_to_node_map.get_output_map()[
self._parent_node]
output_idx = [o._internal_name for o in output_node].index(
output_array._internal_name)
if output_idx == -1:
raise InternalException(
"Could not find index of output Array in output node")
executable_graph = self._build_executable_graph(output_array)
onnx_graph = executable_graph.build_onnx_graph()
m = onnx.helper.make_model(onnx_graph)
buffer = m.SerializeToString()
output_name = list(self._graph._graph.in_edges(
self._output_node, data=True))[0][-1]["name"]
# TODO: maybe disable optimisations when graph has already been optimised
# with jit?
session_options = onnxruntime.SessionOptions()
session_options.graph_optimization_level = get_ort_graph_optimization_level()
try:
onnx.save_model(m, "failed_model.onnx")
session = onnxruntime.InferenceSession( | except Exception: # pragma: no cover
# dump failed model for debugging purposes
onnx.save_model(m, "failed_model.onnx")
raise
io_binding = session.io_binding()
session_input_names = [i.name for i in session.get_inputs()]
graph_node_mapping = executable_graph.get_input_node_mapping()
array_mapping = self._array_to_node_map.get_input_map()
inputs = {
**
{input_output_name: array_mapping[input_node_name][0]
for input_node_name, input_output_name in graph_node_mapping.items()
# TODO: some input nodes, such as initializers, do not require an input
# array. This should be cleaned up
if input_node_name in array_mapping and len(array_mapping[input_node_name]) > 0}, # noqa
**self._cached_results.get_all_cache_tensor_mappings()}
inputs = {k: v for k, v in inputs.items() if k in session_input_names}
if len(inputs) != len(session_input_names):
raise InternalException(
f"Expected {len(session_input_names)} inputs, but got {len(inputs)}")
for input_name, input_array in inputs.items():
ortvalue = input_array._ort_value
if ortvalue is None:
raise ValueError(
"Internal bug. Array's Ortvalue is not set and can not be a model "
"input")
ort_value_dtype = ort_to_numpy(ortvalue.data_type())
# this will work 99% of the time in this century :D
if ort_value_dtype == np.int64:
ort_value_dtype = np | buffer, providers=Config().get_providers(),
sess_options=session_options) | random_line_split |
evaluator.py | Cache:
def __init__(self):
self._cache: Dict[str, List[Optional["array.Array"]]] = {}
def merge(self, other):
self._cache = {**self._cache, **other._cache}
def add_entry(self, node_name: str, index: int, array: "array.Array"):
if node_name in self._cache:
entry = self._cache[node_name]
if index >= len(entry):
# grow cache in order to fit the new entry
entry += [None] * (index - len(entry) + 1)
else:
entry = [None] * (index + 1)
self._cache[node_name] = entry
self._cache[node_name][index] = array
def get_entry(self, node_name: str, index: int) -> Optional["array.Array"]:
return self._cache[node_name][index]
def get_node_cache(self, node_name: str) -> List[Optional["array.Array"]]:
return self._cache[node_name]
def get_all_cache_tensor_mappings(self) -> Dict[str, "array.Array"]:
mapping: Dict[str, "array.Array"] = {}
for key in self._cache.keys():
mapping = {**mapping, **self.get_node_cache_tensor_mapping(key)}
return mapping
def get_node_cache_tensor_mapping(self, node_name: str) -> Dict[str,
"array.Array"]:
caches = self._cache[node_name]
mapping: Dict[str, "array.Array"] = {}
for cache in caches:
if cache is None:
continue
output_node = cache._evaluator._output_node
output_in_edge = list(cache._evaluator._graph._graph.in_edges(
output_node, data=True))[0]
mapping[output_in_edge[-1]["name"]] = cache
return mapping
def empty(self) -> bool:
return len(self._cache) == 0
def | (self):
return self._cache
class ArrayNodeLookupTable:
def __init__(self):
self._input_table: Dict[str, Tuple["array.Array"]] = {}
self._output_table: Dict[str, Tuple["array.Array"]] = {}
def add_input(self, node_name: str, arrays: Tuple["array.Array"]):
self._input_table[node_name] = arrays
def add_output(self, node_name: str, arrays: Tuple["array.Array"]):
self._output_table[node_name] = arrays
def get_input_map(self):
return self._input_table
def get_output_map(self):
return self._output_table
def update(self, other):
self._input_table = {**self._input_table, **other._input_table}
self._output_table = {**self._output_table, **other._output_table}
class LazyEvaluator:
def __init__(self):
self._parent_node: Optional[str] = None
self._output_node: Optional[str] = None
self._array_to_node_map: ArrayNodeLookupTable = ArrayNodeLookupTable()
self._cached_results: IntermediateResultCache = IntermediateResultCache()
self._graph: Graph = Graph()
def copy(self) -> "LazyEvaluator":
evaluator = LazyEvaluator()
evaluator._graph = self._graph
evaluator._cached_results = self._cached_results
evaluator._array_to_node_map = self._array_to_node_map
return evaluator
def add_node(
self, op_name: str, inputs: Tuple["array.Array"],
outputs: Tuple["array.Array"], **attributes):
self._parent_node, output_node_names = self._graph.add_node(
op_name, inputs, outputs, **attributes)
for output_array, output_node_name in zip(outputs, output_node_names):
output_array._evaluator._output_node = output_node_name
output_array._evaluator._parent_node = self._parent_node
if self._parent_node is not None:
# keep mypy happy because self._parent_node is Optional[str]
self._array_to_node_map.add_input(self._parent_node, inputs)
self._array_to_node_map.add_output(self._parent_node, outputs)
else:
raise InternalException("Parent node not set")
return
def add_initializer(
self, name: str, dtype: np.dtype, dims: Tuple[int],
vals):
raise NotImplementedError("Initializers not implemented")
def add_input(self, array: "array.Array"):
dtype = array.dtype
dims = array.shape
default_values = array._ort_value
# FIXME
if default_values is not None:
if default_values.data_type() != numpy_to_ort(
np.dtype(dtype)): # pragma: no cover
raise TypeError("Input type does not match input node")
default_shape = as_shape(default_values.shape())
if not weak_shape_comparisson(
default_shape, dims): # pragma: no cover
raise ValueError(
f"Input tensor shape {default_shape} does not match input "
f"node shape {dims}")
input_name, output_name = self._graph.add_input(array)
self._parent_node = input_name
self._output_node = output_name
if self._parent_node is None:
raise InternalException("Parent node not set")
self._array_to_node_map.add_input(self._parent_node, (array,))
self._array_to_node_map.add_output(self._parent_node, (array,))
def add_subgraph(self, other_graph: Graph):
if self._graph is None:
self._graph = other_graph
elif other_graph is not None:
self._graph.add_subgraph(other_graph)
def merge(self, other: "LazyEvaluator"):
self.add_subgraph(other._graph)
# share result cache
self._cached_results.merge(other._cached_results)
other._cached_results = self._cached_results
self._array_to_node_map.update(other._array_to_node_map)
other._array_to_node_map = self._array_to_node_map
return
def _build_executable_graph(self, array: "array.Array") -> ExecutableGraph:
if self._parent_node is None:
raise InternalException("Parent node not set")
if self._output_node is None:
raise InternalException("Output node not set")
# FIXME: need to fix result caching
return compile_graph(
self._graph, self._array_to_node_map.get_input_map(),
self._array_to_node_map.get_output_map(),
[self._output_node],
self._cached_results)
def evaluate(self, output_array: "array.Array") -> List[np.ndarray]:
if self._graph is None: # pragma: no cover
raise InternalException(
"Graph is empty. "
"This is an internal error. Please file a bug")
output_node = self._array_to_node_map.get_output_map()[
self._parent_node]
output_idx = [o._internal_name for o in output_node].index(
output_array._internal_name)
if output_idx == -1:
raise InternalException(
"Could not find index of output Array in output node")
executable_graph = self._build_executable_graph(output_array)
onnx_graph = executable_graph.build_onnx_graph()
m = onnx.helper.make_model(onnx_graph)
buffer = m.SerializeToString()
output_name = list(self._graph._graph.in_edges(
self._output_node, data=True))[0][-1]["name"]
# TODO: maybe disable optimisations when graph has already been optimised
# with jit?
session_options = onnxruntime.SessionOptions()
session_options.graph_optimization_level = get_ort_graph_optimization_level()
try:
onnx.save_model(m, "failed_model.onnx")
session = onnxruntime.InferenceSession(
buffer, providers=Config().get_providers(),
sess_options=session_options)
except Exception: # pragma: no cover
# dump failed model for debugging purposes
onnx.save_model(m, "failed_model.onnx")
raise
io_binding = session.io_binding()
session_input_names = [i.name for i in session.get_inputs()]
graph_node_mapping = executable_graph.get_input_node_mapping()
array_mapping = self._array_to_node_map.get_input_map()
inputs = {
**
{input_output_name: array_mapping[input_node_name][0]
for input_node_name, input_output_name in graph_node_mapping.items()
# TODO: some input nodes, such as initializers, do not require an input
# array. This should be cleaned up
if input_node_name in array_mapping and len(array_mapping[input_node_name]) > 0}, # noqa
**self._cached_results.get_all_cache_tensor_mappings()}
inputs = {k: v for k, v in inputs.items() if k in session_input_names}
if len(inputs) != len(session_input_names):
raise InternalException(
f"Expected {len(session_input_names)} inputs, but got {len(inputs)}")
for input_name, input_array in inputs.items():
ortvalue = input_array._ort_value
if ortvalue is None:
raise ValueError(
"Internal bug. Array's Ortvalue is not set and can not be a model "
"input")
ort_value_dtype = ort_to_numpy(ortvalue.data_type())
# this will work 99% of the time in this century :D
if ort_value_dtype == np.int64:
ort_value_dtype = | to_dict | identifier_name |
evaluator.py | def __init__(self):
self._cache: Dict[str, List[Optional["array.Array"]]] = {}
def merge(self, other):
self._cache = {**self._cache, **other._cache}
def add_entry(self, node_name: str, index: int, array: "array.Array"):
if node_name in self._cache:
entry = self._cache[node_name]
if index >= len(entry):
# grow cache in order to fit the new entry
entry += [None] * (index - len(entry) + 1)
else:
entry = [None] * (index + 1)
self._cache[node_name] = entry
self._cache[node_name][index] = array
def get_entry(self, node_name: str, index: int) -> Optional["array.Array"]:
return self._cache[node_name][index]
def get_node_cache(self, node_name: str) -> List[Optional["array.Array"]]:
return self._cache[node_name]
def get_all_cache_tensor_mappings(self) -> Dict[str, "array.Array"]:
mapping: Dict[str, "array.Array"] = {}
for key in self._cache.keys():
mapping = {**mapping, **self.get_node_cache_tensor_mapping(key)}
return mapping
def get_node_cache_tensor_mapping(self, node_name: str) -> Dict[str,
"array.Array"]:
caches = self._cache[node_name]
mapping: Dict[str, "array.Array"] = {}
for cache in caches:
if cache is None:
continue
output_node = cache._evaluator._output_node
output_in_edge = list(cache._evaluator._graph._graph.in_edges(
output_node, data=True))[0]
mapping[output_in_edge[-1]["name"]] = cache
return mapping
def empty(self) -> bool:
return len(self._cache) == 0
def to_dict(self):
return self._cache
class ArrayNodeLookupTable:
def __init__(self):
self._input_table: Dict[str, Tuple["array.Array"]] = {}
self._output_table: Dict[str, Tuple["array.Array"]] = {}
def add_input(self, node_name: str, arrays: Tuple["array.Array"]):
self._input_table[node_name] = arrays
def add_output(self, node_name: str, arrays: Tuple["array.Array"]):
self._output_table[node_name] = arrays
def get_input_map(self):
return self._input_table
def get_output_map(self):
return self._output_table
def update(self, other):
self._input_table = {**self._input_table, **other._input_table}
self._output_table = {**self._output_table, **other._output_table}
class LazyEvaluator:
def __init__(self):
self._parent_node: Optional[str] = None
self._output_node: Optional[str] = None
self._array_to_node_map: ArrayNodeLookupTable = ArrayNodeLookupTable()
self._cached_results: IntermediateResultCache = IntermediateResultCache()
self._graph: Graph = Graph()
def copy(self) -> "LazyEvaluator":
evaluator = LazyEvaluator()
evaluator._graph = self._graph
evaluator._cached_results = self._cached_results
evaluator._array_to_node_map = self._array_to_node_map
return evaluator
def add_node(
self, op_name: str, inputs: Tuple["array.Array"],
outputs: Tuple["array.Array"], **attributes):
self._parent_node, output_node_names = self._graph.add_node(
op_name, inputs, outputs, **attributes)
for output_array, output_node_name in zip(outputs, output_node_names):
output_array._evaluator._output_node = output_node_name
output_array._evaluator._parent_node = self._parent_node
if self._parent_node is not None:
# keep mypy happy because self._parent_node is Optional[str]
self._array_to_node_map.add_input(self._parent_node, inputs)
self._array_to_node_map.add_output(self._parent_node, outputs)
else:
raise InternalException("Parent node not set")
return
def add_initializer(
self, name: str, dtype: np.dtype, dims: Tuple[int],
vals):
raise NotImplementedError("Initializers not implemented")
def add_input(self, array: "array.Array"):
dtype = array.dtype
dims = array.shape
default_values = array._ort_value
# FIXME
if default_values is not None:
if default_values.data_type() != numpy_to_ort(
np.dtype(dtype)): # pragma: no cover
raise TypeError("Input type does not match input node")
default_shape = as_shape(default_values.shape())
if not weak_shape_comparisson(
default_shape, dims): # pragma: no cover
raise ValueError(
f"Input tensor shape {default_shape} does not match input "
f"node shape {dims}")
input_name, output_name = self._graph.add_input(array)
self._parent_node = input_name
self._output_node = output_name
if self._parent_node is None:
raise InternalException("Parent node not set")
self._array_to_node_map.add_input(self._parent_node, (array,))
self._array_to_node_map.add_output(self._parent_node, (array,))
def add_subgraph(self, other_graph: Graph):
if self._graph is None:
self._graph = other_graph
elif other_graph is not None:
self._graph.add_subgraph(other_graph)
def merge(self, other: "LazyEvaluator"):
self.add_subgraph(other._graph)
# share result cache
self._cached_results.merge(other._cached_results)
other._cached_results = self._cached_results
self._array_to_node_map.update(other._array_to_node_map)
other._array_to_node_map = self._array_to_node_map
return
def _build_executable_graph(self, array: "array.Array") -> ExecutableGraph:
if self._parent_node is None:
raise InternalException("Parent node not set")
if self._output_node is None:
raise InternalException("Output node not set")
# FIXME: need to fix result caching
return compile_graph(
self._graph, self._array_to_node_map.get_input_map(),
self._array_to_node_map.get_output_map(),
[self._output_node],
self._cached_results)
def evaluate(self, output_array: "array.Array") -> List[np.ndarray]:
if self._graph is None: # pragma: no cover
raise InternalException(
"Graph is empty. "
"This is an internal error. Please file a bug")
output_node = self._array_to_node_map.get_output_map()[
self._parent_node]
output_idx = [o._internal_name for o in output_node].index(
output_array._internal_name)
if output_idx == -1:
raise InternalException(
"Could not find index of output Array in output node")
executable_graph = self._build_executable_graph(output_array)
onnx_graph = executable_graph.build_onnx_graph()
m = onnx.helper.make_model(onnx_graph)
buffer = m.SerializeToString()
output_name = list(self._graph._graph.in_edges(
self._output_node, data=True))[0][-1]["name"]
# TODO: maybe disable optimisations when graph has already been optimised
# with jit?
session_options = onnxruntime.SessionOptions()
session_options.graph_optimization_level = get_ort_graph_optimization_level()
try:
onnx.save_model(m, "failed_model.onnx")
session = onnxruntime.InferenceSession(
buffer, providers=Config().get_providers(),
sess_options=session_options)
except Exception: # pragma: no cover
# dump failed model for debugging purposes
onnx.save_model(m, "failed_model.onnx")
raise
io_binding = session.io_binding()
session_input_names = [i.name for i in session.get_inputs()]
graph_node_mapping = executable_graph.get_input_node_mapping()
array_mapping = self._array_to_node_map.get_input_map()
inputs = {
**
{input_output_name: array_mapping[input_node_name][0]
for input_node_name, input_output_name in graph_node_mapping.items()
# TODO: some input nodes, such as initializers, do not require an input
# array. This should be cleaned up
if input_node_name in array_mapping and len(array_mapping[input_node_name]) > 0}, # noqa
**self._cached_results.get_all_cache_tensor_mappings()}
inputs = {k: v for k, v in inputs.items() if k in session_input_names}
if len(inputs) != len(session_input_names):
raise InternalException(
f"Expected {len(session_input_names)} inputs, but got {len(inputs)}")
for input_name, input_array in inputs.items():
ortvalue = input_array._ort_value
if ortvalue is None:
raise ValueError(
"Internal bug. Array's Ortvalue is not set and can not be a model "
"input")
ort_value_dtype = ort_to_numpy(ortvalue.data_type())
# this will work 99% of the time in this century :D
if ort_value_dtype == np.int64:
| ort_value_dtype = np.longlong | conditional_block |
|
evaluator.py | Cache:
def __init__(self):
self._cache: Dict[str, List[Optional["array.Array"]]] = {}
def merge(self, other):
|
def add_entry(self, node_name: str, index: int, array: "array.Array"):
if node_name in self._cache:
entry = self._cache[node_name]
if index >= len(entry):
# grow cache in order to fit the new entry
entry += [None] * (index - len(entry) + 1)
else:
entry = [None] * (index + 1)
self._cache[node_name] = entry
self._cache[node_name][index] = array
def get_entry(self, node_name: str, index: int) -> Optional["array.Array"]:
return self._cache[node_name][index]
def get_node_cache(self, node_name: str) -> List[Optional["array.Array"]]:
return self._cache[node_name]
def get_all_cache_tensor_mappings(self) -> Dict[str, "array.Array"]:
mapping: Dict[str, "array.Array"] = {}
for key in self._cache.keys():
mapping = {**mapping, **self.get_node_cache_tensor_mapping(key)}
return mapping
def get_node_cache_tensor_mapping(self, node_name: str) -> Dict[str,
"array.Array"]:
caches = self._cache[node_name]
mapping: Dict[str, "array.Array"] = {}
for cache in caches:
if cache is None:
continue
output_node = cache._evaluator._output_node
output_in_edge = list(cache._evaluator._graph._graph.in_edges(
output_node, data=True))[0]
mapping[output_in_edge[-1]["name"]] = cache
return mapping
def empty(self) -> bool:
return len(self._cache) == 0
def to_dict(self):
return self._cache
class ArrayNodeLookupTable:
def __init__(self):
self._input_table: Dict[str, Tuple["array.Array"]] = {}
self._output_table: Dict[str, Tuple["array.Array"]] = {}
def add_input(self, node_name: str, arrays: Tuple["array.Array"]):
self._input_table[node_name] = arrays
def add_output(self, node_name: str, arrays: Tuple["array.Array"]):
self._output_table[node_name] = arrays
def get_input_map(self):
return self._input_table
def get_output_map(self):
return self._output_table
def update(self, other):
self._input_table = {**self._input_table, **other._input_table}
self._output_table = {**self._output_table, **other._output_table}
class LazyEvaluator:
def __init__(self):
self._parent_node: Optional[str] = None
self._output_node: Optional[str] = None
self._array_to_node_map: ArrayNodeLookupTable = ArrayNodeLookupTable()
self._cached_results: IntermediateResultCache = IntermediateResultCache()
self._graph: Graph = Graph()
def copy(self) -> "LazyEvaluator":
evaluator = LazyEvaluator()
evaluator._graph = self._graph
evaluator._cached_results = self._cached_results
evaluator._array_to_node_map = self._array_to_node_map
return evaluator
def add_node(
self, op_name: str, inputs: Tuple["array.Array"],
outputs: Tuple["array.Array"], **attributes):
self._parent_node, output_node_names = self._graph.add_node(
op_name, inputs, outputs, **attributes)
for output_array, output_node_name in zip(outputs, output_node_names):
output_array._evaluator._output_node = output_node_name
output_array._evaluator._parent_node = self._parent_node
if self._parent_node is not None:
# keep mypy happy because self._parent_node is Optional[str]
self._array_to_node_map.add_input(self._parent_node, inputs)
self._array_to_node_map.add_output(self._parent_node, outputs)
else:
raise InternalException("Parent node not set")
return
def add_initializer(
self, name: str, dtype: np.dtype, dims: Tuple[int],
vals):
raise NotImplementedError("Initializers not implemented")
def add_input(self, array: "array.Array"):
dtype = array.dtype
dims = array.shape
default_values = array._ort_value
# FIXME
if default_values is not None:
if default_values.data_type() != numpy_to_ort(
np.dtype(dtype)): # pragma: no cover
raise TypeError("Input type does not match input node")
default_shape = as_shape(default_values.shape())
if not weak_shape_comparisson(
default_shape, dims): # pragma: no cover
raise ValueError(
f"Input tensor shape {default_shape} does not match input "
f"node shape {dims}")
input_name, output_name = self._graph.add_input(array)
self._parent_node = input_name
self._output_node = output_name
if self._parent_node is None:
raise InternalException("Parent node not set")
self._array_to_node_map.add_input(self._parent_node, (array,))
self._array_to_node_map.add_output(self._parent_node, (array,))
def add_subgraph(self, other_graph: Graph):
if self._graph is None:
self._graph = other_graph
elif other_graph is not None:
self._graph.add_subgraph(other_graph)
def merge(self, other: "LazyEvaluator"):
self.add_subgraph(other._graph)
# share result cache
self._cached_results.merge(other._cached_results)
other._cached_results = self._cached_results
self._array_to_node_map.update(other._array_to_node_map)
other._array_to_node_map = self._array_to_node_map
return
def _build_executable_graph(self, array: "array.Array") -> ExecutableGraph:
if self._parent_node is None:
raise InternalException("Parent node not set")
if self._output_node is None:
raise InternalException("Output node not set")
# FIXME: need to fix result caching
return compile_graph(
self._graph, self._array_to_node_map.get_input_map(),
self._array_to_node_map.get_output_map(),
[self._output_node],
self._cached_results)
def evaluate(self, output_array: "array.Array") -> List[np.ndarray]:
if self._graph is None: # pragma: no cover
raise InternalException(
"Graph is empty. "
"This is an internal error. Please file a bug")
output_node = self._array_to_node_map.get_output_map()[
self._parent_node]
output_idx = [o._internal_name for o in output_node].index(
output_array._internal_name)
if output_idx == -1:
raise InternalException(
"Could not find index of output Array in output node")
executable_graph = self._build_executable_graph(output_array)
onnx_graph = executable_graph.build_onnx_graph()
m = onnx.helper.make_model(onnx_graph)
buffer = m.SerializeToString()
output_name = list(self._graph._graph.in_edges(
self._output_node, data=True))[0][-1]["name"]
# TODO: maybe disable optimisations when graph has already been optimised
# with jit?
session_options = onnxruntime.SessionOptions()
session_options.graph_optimization_level = get_ort_graph_optimization_level()
try:
onnx.save_model(m, "failed_model.onnx")
session = onnxruntime.InferenceSession(
buffer, providers=Config().get_providers(),
sess_options=session_options)
except Exception: # pragma: no cover
# dump failed model for debugging purposes
onnx.save_model(m, "failed_model.onnx")
raise
io_binding = session.io_binding()
session_input_names = [i.name for i in session.get_inputs()]
graph_node_mapping = executable_graph.get_input_node_mapping()
array_mapping = self._array_to_node_map.get_input_map()
inputs = {
**
{input_output_name: array_mapping[input_node_name][0]
for input_node_name, input_output_name in graph_node_mapping.items()
# TODO: some input nodes, such as initializers, do not require an input
# array. This should be cleaned up
if input_node_name in array_mapping and len(array_mapping[input_node_name]) > 0}, # noqa
**self._cached_results.get_all_cache_tensor_mappings()}
inputs = {k: v for k, v in inputs.items() if k in session_input_names}
if len(inputs) != len(session_input_names):
raise InternalException(
f"Expected {len(session_input_names)} inputs, but got {len(inputs)}")
for input_name, input_array in inputs.items():
ortvalue = input_array._ort_value
if ortvalue is None:
raise ValueError(
"Internal bug. Array's Ortvalue is not set and can not be a model "
"input")
ort_value_dtype = ort_to_numpy(ortvalue.data_type())
# this will work 99% of the time in this century :D
if ort_value_dtype == np.int64:
ort_value_dtype = | self._cache = {**self._cache, **other._cache} | identifier_body |
main.js | }
// });
// 上传
var uploadBanner = function(target, callback) {
$(target).uploadify({
'auto': true,
'width': 80,
'height': 27,
'fileObjName': 'upfile',
'buttonText': '选择图片',
'swf': '/js/lib/uploadify/uploadify.swf',
'uploader': '/englishCompetition/upload?action=upload',
'multi': false,
'removeCompleted': false,
'cancelImg': 'js/uploadify/uploadify-cancel.png',
'fileTypeExts': '*.jpg;*.jpeg;*.gif;*.png',
'fileSizeLimit': '1MB',
'overrideEvents': ['onSelectError', 'onDialogClose'],
onUploadSuccess: function (file, data, response) {
this.queueData.files = [];
try {
data = JSON.parse(data);
} catch (error) {
data = JSON.parse(data.substring(1));
}
// console.log(data.url);
if (data.errorCode == 1) {
$(target).uploadify('cancel', '*');
if (typeof callback === 'function') callback(data);
} else if (data.errorCode == -1) {
alert(data.state);
}
},
onSelectError: function (file, errorCode, errorMsg) {
switch (errorCode) {
case -110:
alert('文件 [' + file.name + '] 大小超出系统限制的' + $(target).uploadify('settings', 'fileSizeLimit') + '大小!');
break;
case -120:
alert('文件 [' + file.name + '] 大小异常!');
break;
case -130:
alert('文件 [' + file.name + '] 类型不正确!');
break;
default:
alert('上传失败: ' + errorCode + ': ' + errorMsg);
break;
}
}
});
}
var gradeList;
var promptTpl = '<div class="contain">' +
'<div class="wrap"><i></i></div>' +
'<p class="content" style="line-height:24px;font-size:16px;color:#333333;text-align:center;margin:0;margin-bottom:30px;">你的浏览器没有安装Flash<br>请点击下方链接下载</p>' +
'<div class="dialog-btnBar" style="padding-top:0;">' +
'<div class="error"></div>' +
'<a class="download-btn" href="http://17ks.chivoxapp.com/guide/QQBrowser_Setup_SEM1.exe" style="float:right;margin-left:10px;height:40px;width:auto;border:#dcdcdc 1px solid;"><i class="icon-qq"></i>下载QQ浏览器<i class="icon-new"></i></a>' +
'<a class="download-btn" href="http://17ks.chivoxapp.com/guide/360cse_8.7.0.306.exe" style="float:right;margin-left:10px;height:40px;width:auto;border:#dcdcdc 1px solid;"><i class="icon-360"></i>下载360极速浏览器<i class="icon-new"></i></a>' +
'<div class="clear"></div>' +
'</div>' +
'</div>';
function getGradeList (callback) {
solution.server.get('/TeacherClasses/getGrade', {}, function (data) {
if (data.result == 1) {
gradeList = data.info;
callback();
} else {
alert(data.message)
}
}, false);
}
$(function () {
var resizeHeight = function () {
var browserHeight = $(window).height();
$('.container').css('min-height', browserHeight - 98);
};
resizeHeight();
$(window).resize(function (event) {
resizeHeight();
});
try {
solution.txFactory = new txFactory();
} catch (e) {
// console.log("tx.js没有引入");
}
if (flashcheck().f === 0) {
// alert("你的浏览器没有安装Flash");
$.Prompt({
content: promptTpl,
event: function () {
}
})
}
solution.server.get(GET_MENU, {}, function (data) {
if (data.result == 1) {
var menus = data.info.auth;
solution.constant = {};
solution.constant.user = data.info.user;
$('.nav > ul.nav-list').empty();
$.each(menus, function (index, menu) {
$('.nav > ul.nav-list').append('<li name="topic_build" data-for="' + menu.shortName + '"><a href="#" data-chivox-event="click:menuClick&' + menu.shortName + '" data-chivox-group="menu">' + menu.menuName + '</a></li>');
});
$('#userName').html(data.info.user.nickName);
$('#userName').data('userName', data.info.user.nickName);
$('#userName').data('ManageType', data.info.user.ManageType);
$('#userName').data('RoleType', data.info.user.RoleType);
$('#userName').data('userId', data.info.user.userID);
if ($.cookie('rt') == data.info.user.RoleType && $.cookie('uid') == data.info.user.userID) {
} else {
$.cookie('cid', '');
}
$.cookie('rt', data.info.user.RoleType);
$.cookie('uid', data.info.user.userID);
if (data.info.user.RoleType == 'teacher' && $.cookie('hasCp') == 1 && $.cookie('hasZy') !== 1) {
$('.nav-sub > li:last > a').attr('href', Config.reportUrl + '/web/models/Login/self.php').text('退出');
clearAllCookie();
}
if (data.info.user.RoleType == 'teacher' && $.cookie('hasCp') == 1 && $.cookie('hasZy') == 1) {
$('.nav-sub > li:last > a').attr('href', Config.reportUrl + '/web/models/TeaMain/self.php').text('返回作业系统');
}
if (data.info.user.RoleType == 'manager') {
$('.nav-sub > li:last > a').click(function() {
clearAllCookie();
})
}
getGradeList(function() {
setTimeout('$(".nav > ul > li:first > a").click()', 0);
});
$('#sdzj-info input').click(function (evt) {
$(this).select();
});
} else {
window.location.href = Config.reportUrl + '/login/self.php'
}
});
});
// 设备检测
function checkmedia(cb) {
// var GuideObj = new Guide(start, play);
try {
var aiPanel = new chivox.AiPanel($.extend(true, {}, Config.sdk, {
onMicStatusChange: function (code, msg) {
console.log('onMicStatusChange: ' + code);
cb && Guide3.addCallback(cb);
if (code == 50001 || code == 50008) {
if (Guide3.getStep() === 0) {
Guide3.show();
$('#recorder').addClass('min');
} else {
$('#guide').find('button').removeClass('disabled');
}
} else {
if (code == 50003) {
Guide3.show(11);
} else {
Guide3.show(9);
}
}
hideGuide();
}
}));
player = aiPanel.player;
recorder = aiPanel.recorder;
} catch (e) {
cw.ui.alert('语音服务暂时不可用,请稍后再试');
}
}
function hideGuide() {
$('.guide_container').addClass('hide');
}
$('body').off('click').on('click', '.live-admin.check-device', function () {
checkmedia();
});
if (!window.console) { // 兼容IE8
window.console = {
log: function () { },
error: function () { },
info: function () { },
warn: function () { }
}
}
/*
*使用canvas合成图片
*param:{object}
*return:base64图片
*/
var mergeImage = function(cfg, callback) {
/**
*文字换行根据canvas宽度来做折行
* @param {[type]} str [字符串]
* @param {[type]} canvas [canvas对象]
* @param {[type]} initX [左间距]
* @param {[type]} initY [上间距]
* @param {[type]} lineHeight [行高]
*/
var canvasTextAutoLine = function (str, canvas, initX, initY, lineHeight) {
var lineWidth = 0;
// var canvasWidth = parseInt(canvas.width.replace("px",""));
var lastSubStrIndex = 0;
if (str.length > 24) {
str = str.substr(0, 24) + '...'
}
for (var i = 0; i < str.length; i++) {
lineWidth += canvas.measureText(str[i]).width;
if (lineWidth > 720) {
canvas.fillText(str.substring(lastSubStrIndex, i), initX, initY);
initY += lineHeight;
lineWidth = 0;
lastSubStrIndex = i;
} | random_line_split |
||
main.js | Zy') !== 1) {
$('.nav-sub > li:last > a').attr('href', Config.reportUrl + '/web/models/Login/self.php').text('退出');
clearAllCookie();
}
if (data.info.user.RoleType == 'teacher' && $.cookie('hasCp') == 1 && $.cookie('hasZy') == 1) {
$('.nav-sub > li:last > a').attr('href', Config.reportUrl + '/web/models/TeaMain/self.php').text('返回作业系统');
}
if (data.info.user.RoleType == 'manager') {
$('.nav-sub > li:last > a').click(function() {
clearAllCookie();
})
}
getGradeList(function() {
setTimeout('$(".nav > ul > li:first > a").click()', 0);
});
$('#sdzj-info input').click(function (evt) {
$(this).select();
});
} else {
window.location.href = Config.reportUrl + '/login/self.php'
}
});
});
// 设备检测
function checkmedia(cb) {
// var GuideObj = new Guide(start, play);
try {
var aiPanel = new chivox.AiPanel($.extend(true, {}, Config.sdk, {
onMicStatusChange: function (code, msg) {
console.log('onMicStatusChange: ' + code);
cb && Guide3.addCallback(cb);
if (code == 50001 || code == 50008) {
if (Guide3.getStep() === 0) {
Guide3.show();
$('#recorder').addClass('min');
} else {
$('#guide').find('button').removeClass('disabled');
}
} else {
if (code == 50003) {
Guide3.show(11);
} else {
Guide3.show(9);
}
}
hideGuide();
}
}));
player = aiPanel.player;
recorder = aiPanel.recorder;
} catch (e) {
cw.ui.alert('语音服务暂时不可用,请稍后再试');
}
}
function hideGuide() {
$('.guide_container').addClass('hide');
}
$('body').off('click').on('click', '.live-admin.check-device', function () {
checkmedia();
});
if (!window.console) { // 兼容IE8
window.console = {
log: function () { },
error: function () { },
info: function () { },
warn: function () { }
}
}
/*
*使用canvas合成图片
*param:{object}
*return:base64图片
*/
var mergeImage = function(cfg, callback) {
/**
*文字换行根据canvas宽度来做折行
* @param {[type]} str [字符串]
* @param {[type]} canvas [canvas对象]
* @param {[type]} initX [左间距]
* @param {[type]} initY [上间距]
* @param {[type]} lineHeight [行高]
*/
var canvasTextAutoLine = function (str, canvas, initX, initY, lineHeight) {
var lineWidth = 0;
// var canvasWidth = parseInt(canvas.width.replace("px",""));
var lastSubStrIndex = 0;
if (str.length > 24) {
str = str.substr(0, 24) + '...'
}
for (var i = 0; i < str.length; i++) {
lineWidth += canvas.measureText(str[i]).width;
if (lineWidth > 720) {
canvas.fillText(str.substring(lastSubStrIndex, i), initX, initY);
initY += lineHeight;
lineWidth = 0;
lastSubStrIndex = i;
}
if (i == str.length - 1) {
canvas.fillText(str.substring(lastSubStrIndex, i + 1), initX, initY);
}
}
}
var getCanvasContext = function(width, height) {
var html = '<canvas id="MergeImageId" width="' + width + '" height="' + height + '" style="display:none"></canvas>';
$('body').append(html);
ret = document.getElementById('MergeImageId').getContext('2d');
return ret;
};
var defalut_cfg = {
width: 1000, // canvas宽度
height: 500, // canvas高度
value: '', // 输入的文字
imgsrc: '', // 图片地址
font: '60px Microsoft Yahei', // 合成图片字体大小
textAlign: 'center', // 合成图片中文字位置
fillStyle: '#333', // 合成图片中文字颜色
};
var _cfg = $.extend({}, defalut_cfg, cfg);
var mainCtx = getCanvasContext(_cfg.width, _cfg.height);// 获取canvas对象
var maxWidth = mainCtx.width;
var maxHeight = mainCtx.height;
mainCtx.clearRect(0, 0, 1000, 1000);
var starImg = new Image();// 获取图片的实际路径
starImg.src = _cfg.imgsrc;
starImg.onload = function () { // 合成
mainCtx.drawImage(starImg, 0, 0, _cfg.width, _cfg.height);// 先把图片绘制在这里
if (_cfg.value) { // 读取用户的文本
// mainCtx.font = '60px "Microsoft Yahei"';
mainCtx.font = _cfg.font;
mainCtx.fillStyle = _cfg.fillStyle;
mainCtx.textAlign = _cfg.textAlign;
mainCtx.width = '250px';
canvasTextAutoLine(_cfg.value, mainCtx, 500, 240, 72);
var mycanvas = document.getElementById('MergeImageId');
typeof callback === 'function' && callback(mycanvas.toDataURL('image/jpeg'));
}
};
}
function showDSBJ (competitionId, isEdit, isFile, competitionType, isCpxx) {
// $('.nav ul li > a.active').removeClass("active");
var $dsjd = $('#dsjd');
$('#left-container, #bjds').show();
var $a = $('#bjds').find('li:eq(0) > a');
$a.data('id', competitionId).data('competitionType', competitionType || 1).data('isEdit', isEdit).data('isFile', isFile)
if (competitionId) {
solution.server.get('/competitionsRanges/getCompetitionRange', {
competitionId: competitionId
}, function(data) {
$dsjd.empty();
$('#fzsj_dsjd').empty();
$('#dsjd_kcjk').empty();
$('#xqfx').empty();
$('#bjds').find('#bggl').parent().hide();// 报告管理
data.info.forEach(function (jd, index) {
var $jd = $('<li data-chivox-grou | p="three-level-menu"><a href="#" data-chivox-event="click:subMenuClick&xsxx&' + jd.id + '&' + jd.stage + '">' + jd.stageName + '</a></li>');
var $sj = $('<li data-chivox-group="three-level-menu"><a href="#" data-chivox-event="click:subMenuClick&fzsj&' + jd.id + '">' + jd.stageName + '<img src="/css/images/icon_warm_small.png" alt="" class="hide" style="margin-left:5px;"/><img src="/css/images/tip_unfinishedset.png" alt="" class="warning-tip hide"/></a></li>');
var $jk = $('<li><a style="font-size:14px;background-color:#90deb9;" href="/monitor/index.html?competitionId=' + (document.cookie.match(/cid=(\d+);?/) ? document.cookie.match(/cid=(\d+);?/)[1] : competitionId) + '&stageId=' + jd.id + '&role=' + $('#userName').data('RoleType') + '" target="_blank">' + jd.stageName + '</a></li>');
var $xf = $('<li data-chivox-group="three-level-menu"><a href="#" data-chivox-event="click:subMenuClick&xqfx&' + jd.id + '&' + jd.stage + '">' + jd.stageName + '</a></li>');
$dsjd.append($jd);
$('#fzsj_dsjd').append($sj);
$('#dsjd_kcjk').append($jk);
$('#xqfx').append($xf);
if ($('#userName').data('RoleType') == 'teacher') {
$('#bjds').find('#dsjd').prev('a').text('学生信息');
// $("#dsjd_kcjk").parent("li").hide();//考场监控
} else if ($('#userName').data('RoleType') == 'manager') {
$('#bjds').find('#dsjd').prev('a').text('选手信息');
$('#bjds').find('#xqfx').parent().hide();// 学情分析
}
});
if (isCpxx) {
$a.attr('data-chivox-event', 'click:subMenuClick&nlcp&Cpxx&' + competitionId);
$('#fzsj_dsjd').parent().hide() | identifier_body |
|
main.js | 标题', // 中标题
// content: '正文', // 正文
// subcontent: '副文', //副文 灰色
// warnBtn: '删除', // 红
// cancelBtn: '关闭', // 白
// confirmBtn: '确认', // 绿
// isClose: true, //true有关闭 false没关闭
// timeout: true,
// timeoutConfig: {
// time: 5, // 秒
// text: '剩余{{time}}秒', // 剩余{{time}}秒
// textPosiiton: 'content', // title subtitle content subcontent
// timeoutCallback: function(closeDialog) {
// closeDialog()
// alert(1);
// }
// },
// warnCallback: function() {
// console.info('warn')
// },
// confirmCallback: function() {
// console.info('confirm')
// }
// });
// 上传
var uploadBanner = function(target, callback) {
$(target).uploadify({
'auto': true,
'width': 80,
'height': 27,
'fileObjName': 'upfile',
'buttonText': '选择图片',
'swf': '/js/lib/uploadify/uploadify.swf',
'uploader': '/englishCompetition/upload?action=upload',
'multi': false,
'removeCompleted': false,
'cancelImg': 'js/uploadify/uploadify-cancel.png',
'fileTypeExts': '*.jpg;*.jpeg;*.gif;*.png',
'fileSizeLimit': '1MB',
'overrideEvents': ['onSelectError', 'onDialogClose'],
onUploadSuccess: function (file, data, response) {
this.queueData.files = [];
try {
data = JSON.parse(data);
} catch (error) {
data = JSON.parse(data.substring(1));
}
// console.log(data.url);
if (data.errorCode == 1) {
$(target).uploadify('cancel', '*');
if (typeof callback === 'function') callback(data);
} else if (data.errorCode == -1) {
alert(data.state);
}
},
onSelectError: function (file, errorCode, errorMsg) {
switch (errorCode) {
case -110:
alert('文件 [' + file.name + '] 大小超出系统限制的' + $(target).uploadify('settings', 'fileSizeLimit') + '大小!');
break;
case -120:
alert('文件 [' + file.name + '] 大小异常!');
break;
case -130:
alert('文件 [' + file.name + '] 类型不正确!');
break;
default:
alert('上传失败: ' + errorCode + ': ' + errorMsg);
break;
}
}
});
}
var gradeList;
var promptTpl = '<div class="contain">' +
'<div class="wrap"><i></i></div>' +
'<p class="content" style="line-height:24px;font-size:16px;color:#333333;text-align:center;margin:0;margin-bottom:30px;">你的浏览器没有安装Flash<br>请点击下方链接下载</p>' +
'<div class="dialog-btnBar" style="padding-top:0;">' +
'<div class="error"></div>' +
'<a class="download-btn" href="http://17ks.chivoxapp.com/guide/QQBrowser_Setup_SEM1.exe" style="float:right;margin-left:10px;height:40px;width:auto;border:#dcdcdc 1px solid;"><i class="icon-qq"></i>下载QQ浏览器<i class="icon-new"></i></a>' +
'<a class="download-btn" href="http://17ks.chivoxapp.com/guide/360cse_8.7.0.306.exe" style="float:right;margin-left:10px;height:40px;width:auto;border:#dcdcdc 1px solid;"><i class="icon-360"></i>下载360极速浏览器<i class="icon-new"></i></a>' +
'<div class="clear"></div>' +
'</div>' +
'</div>';
function getGradeList (callback) {
solution.server.get('/TeacherClasses/getGrade', {}, function (data) {
if (data.result == 1) {
gradeList = data.info;
callback();
} else {
alert(data.message)
}
}, false);
}
$(function () {
var resizeHeight = function () {
var browserHeight = $(window).height();
$('.container').css('min-height', browserHeight - 98);
};
resizeHeight();
$(window).resize(function (event) {
resizeHeight();
});
try {
solution.txFactory = new txFactory();
} catch (e) {
// console.log("tx.js没有引入");
}
if (flashcheck().f === 0) {
// alert("你的浏览器没有安装Flash");
$.Prompt({
content: promptTpl,
event: function () {
}
})
}
solution.server.get(GET_MENU, {}, function (data) {
if (data.result == 1) {
var menus = data.info.auth;
solution.constant = {};
solution.constant.user = data.info.user;
$('.nav > ul.nav-list').empty();
$.each(menus, function (index, menu) {
$('.nav > ul.nav-list').append('<li name="topic_build" data-for="' + menu.shortName + '"><a href="#" data-chivox-event="click:menuClick&' + menu.shortName + '" data-chivox-group="menu">' + menu.menuName + '</a></li>');
});
$('#userName').html(data.info.user.nickName);
$('#userName').data('userName', data.info.user.nickName);
$('#userName').data('ManageType', data.info.user.ManageType);
$('#userName').data('RoleType', data.info.user.RoleType);
$('#userName').data('userId', data.info.user.userID);
if ($.cookie('rt') == data.info.user.RoleType && $.cookie('uid') == data.info.user.userID) {
} else {
$.cookie('cid', '');
}
$.cookie('rt', data.info.user.RoleType);
$.cookie('uid', data.info.user.userID);
if (data.info.user.RoleType == 'teacher' && $.cookie('hasCp') == 1 && $.cookie('hasZy') !== 1) {
$('.nav-sub > li:last > a').attr('href', Config.reportUrl + '/web/models/Login/self.php').text('退出');
clearAllCookie();
}
if (data.info.user.RoleType == 'teacher' && $.cookie('hasCp') == 1 && $.cookie('hasZy') == 1) {
$('.nav-sub > li:last > a').attr('href', Config.reportUrl + '/web/models/TeaMain/self.php').text('返回作业系统');
}
if (data.info.user.RoleType == 'manager') {
$('.nav-sub > li:last > a').click(function() {
clearAllCookie();
})
}
getGradeList(function() {
setTimeout('$(".nav > ul > li:first > a").click()', 0);
});
$('#sdzj-info input').click(function (evt) {
$(this).select();
});
} else {
window.location.href = Config.reportUrl + '/login/self.php'
}
});
});
// 设备检测
function checkmedia(cb) {
// var GuideObj = new Guide(start, play);
try {
var aiPanel = new chivox.AiPanel($.extend(true, {}, Config.sdk, {
onMicStatusChange: function (code, msg) {
console.log('onMicStatusChange: ' + code);
cb && Guide3.addCallback(cb);
if (code == 50001 || code == 50008) {
if (Guide3.getStep() === 0) {
Guide3.show();
$('#recorder').addClass('min');
} else {
$('#guide').find('button').removeClass('disabled');
}
} else {
if (code == 50003) {
Guide3.show(11);
} else {
Guide3.show(9);
}
}
hideGuide();
}
}));
player = aiPanel.player;
recorder = aiPanel.recorder;
} catch (e) {
cw.ui.alert('语音服务暂时不可用,请稍后再试');
}
}
function hideGuide() {
$('.guide_container').addClass('hide');
}
$('body').off('click').on('click', '.live-admin.check-device', function () {
checkmedia();
});
if (!window.console) { // 兼容IE8
window.console = {
log: function () { },
error: function () { },
info: function () { },
warn: function () { }
}
}
/*
*使用canvas合成图片
*param:{object}
*return:base64图片
*/
var mergeImage = function(cfg, callback) {
/**
*文字换行根据canvas宽度来做折行
* @param {[type]} str [字符串]
* @param {[type]} c | anvas | identifier_name |
|
dictionary.py |
interest.
:param int entry_id: The ID of the dictionary entry.
:param dict restrictions: A dictionary describing the restrictions imposed
on the possible structural ways in which the POS tags may interrelate.
Necessary in order to provide POS tag trees.
"""
def __init__(self, conn, language_code, entry_id, restrictions):
c = conn.cursor()
self.language_code = language_code
self.entry_id = entry_id
self.headwords = tuple(c.execute('SELECT nonkana, reading FROM lexemes WHERE language = ? AND entry_id = ? ORDER BY sequence_id', (self.language_code, self.entry_id)))
if not self.headwords:
raise ValueError('Unable to find entry with ID %d for language %r' % (self.entry_id, self.language_code))
# XXX Ensure that there is a suitable index for this query
same_main_headword_entries = tuple(other_entry_id for (other_entry_id,) in c.execute('SELECT entry_id FROM lexemes WHERE language = ? AND nonkana IS ? AND reading = ? AND sequence_id = 1 ORDER BY entry_id' if self.headwords[0][0] is None else 'SELECT entry_id FROM lexemes WHERE language = ? AND nonkana = ? AND reading = ? AND sequence_id = 1 ORDER BY entry_id', (self.language_code, *self.headwords[0])))
self.discriminator = next(j for j, other_entry_id in enumerate(same_main_headword_entries, start=1) if other_entry_id == self.entry_id) if len(same_main_headword_entries) > 1 else None
self.roles = []
current_pos_list_id = None
sense_ids = []
for (pos_list_id, sense_id) in tuple(c.execute('SELECT pos_list_id, sense_id FROM roles WHERE language = ? AND entry_id = ? ORDER BY sense_id', (self.language_code, self.entry_id,))):
if (current_pos_list_id is not None
and current_pos_list_id != pos_list_id):
self.roles.append(Role(conn, self.language_code, self.entry_id, current_pos_list_id, sense_ids, restrictions))
sense_ids = []
current_pos_list_id = pos_list_id
sense_ids.append(sense_id)
else:
if current_pos_list_id is not None:
self.roles.append(Role(conn, self.language_code, self.entry_id, current_pos_list_id, sense_ids, restrictions))
def __repr__(self):
return ('<%s(%r, %d) %s【%s】%s>'
% (self.__class__.__name__,
self.language_code,
self.entry_id,
self.headwords[0][0],
self.headwords[0][1],
'' if self.discriminator is None
else circled_number(self.discriminator, False)))
def __str__(self):
out = '\033[35m%s【%s】\033[0m' % self.headwords[0]
if self.discriminator is not None:
out += circled_number(self.discriminator, False)
out += '\n' + '-' * 8 + '\n'
for nonkana, reading in self.headwords[1:]:
out += '%s【%s】\n' % (nonkana, reading)
out += '\n'.join(str(role) for role in self.roles)
return out
@staticmethod
def lookup(conn, language_code, graphic, phonetic, restrictions):
"""Look up all lexemes that may be represented by the specified
combination of a graphic and a phonetic variant.
:param str language_code: ISO 639-3 language code of the language of
interest.
:param str graphic: The graphic variant.
:param str phonetic: The phonetic variant.
:param dict restrictions: A dictionary describing the restrictions
imposed on the possible structural ways in which the POS tags may
interrelate. Necessary in order to provide POS tag trees.
:return: A tuple of lexemes that contain the specified combination of a
graphic variant and a phonetic variant in their list of headwords.
"""
c = conn.cursor()
entry_ids = tuple(c.execute('SELECT entry_id FROM lemmas WHERE language = ? AND graphic = ? and phonetic = ?', (language_code, graphic, hiragana_to_katakana(phonetic))))
return tuple(Lexeme(conn, language_code, entry_id, restrictions) for (entry_id,) in entry_ids)
class Role():
"""A role in the dictionary.
A role in this context means a collection of connotations of a lexeme that
have the same grammatical functions in text.
In addition to the connotations, a role has a part-of-speech (POS) list.
POS tags in this list may have mutually hierarchical, nonconflicting, and
even exclusive relations.
A dictionary entry may contain multiple roles ``A`` and ``B`` with the same
POS lists if the entry's connotations are sorted by frequency of use, and a
third role ``C`` with a different POS list has connotations with a lower
frequency than those of ``A`` and with a higher frequency than those of
``B``.
On construction, all relevant data is loaded from the database.
:param conn: The database connection for the dictionary.
:param str language_code: ISO 639-3 language code of the language of
interest.
:param int entry_id: The ID of the dictionary entry to which this role
belongs.
:param int pos_list_id: The ID of the list of POS tags for this role.
:param sense_id: An iterable of integer IDs of the connotations of this
role.
:param dict restrictions: A dictionary describing the restrictions imposed
on the possible structural ways in which the POS tags may interrelate.
Necessary in order to provide POS tag trees.
"""
def __init__(self, conn, language_code, entry_id, pos_list_id, sense_ids, restrictions):
c = conn.cursor()
self.language_code = language_code
self.entry_id = entry_id
self.pos_tags = tuple(pos for (pos,) in c.execute('SELECT pos FROM pos_lists WHERE language = ? AND pos_list_id = ? ORDER BY sequence_id', (self.language_code, pos_list_id)))
self.restrictions = restrictions
self.senses = tuple(Sense(conn, self.language_code, self.entry_id, sense_id) for sense_id in sense_ids)
def normalized_pos_tags(self):
"""Translate the list of POS tags as used in the dictionary to a list of
POS tags in the representation used internally.
:return: The list of POS tags associated with this role, in their
internal representation.
"""
pos_list = []
for pos in self.pos_tags:
pos_list.extend([i for i in re.split('[:;]', pos) if i != ''])
return pos_list
def pos_tree(self) -> TemplateTree:
"""From the POS tags of this role, build a tree structure.
The restrictions of this role are used on tree creation.
:return: A template tree that represents the list of POS tags associated
with this role in a hierarchical fashion.
"""
return TemplateTree.parse(self.normalized_pos_tags(), self.restrictions)
def __repr__(self):
return ('<%s(%r, %d, %r, %r)>'
% (self.__class__.__name__,
self.language_code,
self.entry_id,
self.pos_tags,
self.senses))
def __str__(self):
return '\n '.join([str(self.pos_tree())] + [str(sense) for sense in self.senses])
# XXX Rename to 'Connotation'
class Sense():
"""A connotation in the dictionary.
A connotation in this context means an abstract word meaning that is limited
to a specific lexeme. Multiple lexemes may appear in text conveying the
same meaning, and multiple meanings may be denoted by the same lexeme, but
each combination of lexeme and sense is a unique connotation.
A connotation may be described by multiple glosses, each of which can be a
direct translation, a description or similar.
On construction, all relevant data is loaded from the database.
:param conn: The database connection for the dictionary.
:param str language_code: ISO 639-3 language code of the language of
interest.
:param int entry_id: The ID of the dictionary entry to which this
connotation belongs.
:param int sense_id: The ID of this connotation w.r.t. the entry with ID
``entry_id``.
"""
def __init__(self, conn, language_code, entry_id, sense_id):
c = conn.cursor()
self.language_code = language_code
self.entry_id = entry_id
self.sense_id = sense_id
self.glosses = tuple(c.execute('SELECT type, gloss FROM glosses WHERE language = ? AND entry_id = ? AND sense_id = ? ORDER BY sequence_id', (self.language_code, self.entry_id, self.sense_id)))
def __repr__(self):
return ('<%s(% | r, %d, %d)>'
% (self.__class__.__name__, self.language_code, self.entry_id, self.sense_id))
def __s | identifier_body |
|
dictionary.py | umber, bold_circle=True):
"""Provide a Unicode representation of the specified number.
:param int number: The positive number to convert to a string.
:param bool bold_circle: If ``True``, return a white number on a black
circle; return a black number on a white circle otherwise.
:return: A string that is the specified number enclosed in a circle. For
integers that have no such representation in Unicode, return the number
enclosed in parentheses.
"""
if number <= 0:
raise ValueError()
elif number < 10:
return chr((0x2775 if bold_circle else 0x245f) + number)
elif number < 21 and not bold_circle:
return chr(0x245f + number)
elif number == 10 and bold_circle:
return chr(0x277f)
elif number < 21 and bold_circle:
return chr(0x24e0 + number)
elif bold_circle:
return '[%s]' % (number,) # raise ValueError()
elif number < 30:
return chr(0x323c + number)
elif number == 30:
return chr(0x325a)
elif number < 36:
return chr(0x323c + number)
elif number < 51:
return chr(0x328d + number)
else:
return '(%s)' % (number,) # raise ValueError()
class Lexeme():
"""A lexeme (i.e. an entry) in the dictionary.
An entry in this context means a base meaning that may be denoted by either
element of a set of highly similar pairs of graphic and phonetic variants.
The base meaning may be further refined to one of several connotations of
this lexeme, see :class:`Sense`.
The same lexeme may appear in different grammatical positions, and different
connotations of the same lexeme might be restricted to multiple, different
grammatical usages, see :class:`Role`.
Furthermore, there might be restrictions as to which graphic and phonetic
variants may appear together, as well as which of those variants may appear
with which connotations.
On construction, all relevant data is loaded from the database.
:param conn: The database connection for the dictionary.
:param str language_code: ISO 639-3 language code of the language of
interest.
:param int entry_id: The ID of the dictionary entry.
:param dict restrictions: A dictionary describing the restrictions imposed
on the possible structural ways in which the POS tags may interrelate.
Necessary in order to provide POS tag trees.
"""
def __init__(self, conn, language_code, entry_id, restrictions):
c = conn.cursor()
self.language_code = language_code
self.entry_id = entry_id
self.headwords = tuple(c.execute('SELECT nonkana, reading FROM lexemes WHERE language = ? AND entry_id = ? ORDER BY sequence_id', (self.language_code, self.entry_id)))
if not self.headwords:
raise ValueError('Unable to find entry with ID %d for language %r' % (self.entry_id, self.language_code))
# XXX Ensure that there is a suitable index for this query
same_main_headword_entries = tuple(other_entry_id for (other_entry_id,) in c.execute('SELECT entry_id FROM lexemes WHERE language = ? AND nonkana IS ? AND reading = ? AND sequence_id = 1 ORDER BY entry_id' if self.headwords[0][0] is None else 'SELECT entry_id FROM lexemes WHERE language = ? AND nonkana = ? AND reading = ? AND sequence_id = 1 ORDER BY entry_id', (self.language_code, *self.headwords[0])))
self.discriminator = next(j for j, other_entry_id in enumerate(same_main_headword_entries, start=1) if other_entry_id == self.entry_id) if len(same_main_headword_entries) > 1 else None
self.roles = []
current_pos_list_id = None
sense_ids = []
for (pos_list_id, sense_id) in tuple(c.execute('SELECT pos_list_id, sense_id FROM roles WHERE language = ? AND entry_id = ? ORDER BY sense_id', (self.language_code, self.entry_id,))):
if (current_pos_list_id is not None
and current_pos_list_id != pos_list_id):
self.roles.append(Role(conn, self.language_code, self.entry_id, current_pos_list_id, sense_ids, restrictions))
sense_ids = []
current_pos_list_id = pos_list_id
sense_ids.append(sense_id)
else:
if current_pos_list_id is not None:
self.roles.append(Role(conn, self.language_code, self.entry_id, current_pos_list_id, sense_ids, restrictions))
def __repr__(self):
return ('<%s(%r, %d) %s【%s】%s>'
% (self.__class__.__name__,
self.language_code,
self.entry_id,
self.headwords[0][0],
self.headwords[0][1],
'' if self.discriminator is None
else circled_number(self.discriminator, False)))
def __str__(self):
out = '\033[35m%s【%s】\033[0m' % self.headwords[0]
if self.discriminator is not None:
out += circled_number(self.discriminator, False)
out += '\n' + '-' * 8 + '\n'
for nonkana, reading in self.headwords[1:]:
out += '%s【%s】\n' % (nonkana, reading)
out += '\n'.join(str(role) for role in self.roles)
return out
@staticmethod
def lookup(conn, language_code, graphic, phonetic, restrictions):
"""Look up all lexemes that may be represented by the specified
combination of a graphic and a phonetic variant.
:param str language_code: ISO 639-3 language code of the language of
interest.
:param str graphic: The graphic variant.
:param str phonetic: The phonetic variant.
:param dict restrictions: A dictionary describing the restrictions
imposed on the possible structural ways in which the POS tags may
interrelate. Necessary in order to provide POS tag trees.
:return: A tuple of lexemes that contain the specified combination of a
graphic variant and a phonetic variant in their list of headwords.
"""
c = conn.cursor()
entry_ids = tuple(c.execute('SELECT entry_id FROM lemmas WHERE language = ? AND graphic = ? and phonetic = ?', (language_code, graphic, hiragana_to_katakana(phonetic))))
return tuple(Lexeme(conn, language_code, entry_id, restrictions) for (entry_id,) in entry_ids)
class Role():
"""A role in the dictionary.
A role in this context means a collection of connotations of a lexeme that
have the same grammatical functions in text.
In addition to the connotations, a role has a part-of-speech (POS) list.
POS tags in this list may have mutually hierarchical, nonconflicting, and
even exclusive relations.
A dictionary entry may contain multiple roles ``A`` and ``B`` with the same
POS lists if the entry's connotations are sorted by frequency of use, and a
third role ``C`` with a different POS list has connotations with a lower
frequency than those of ``A`` and with a higher frequency than those of
``B``.
On construction, all relevant data is loaded from the database.
:param conn: The database connection for the dictionary.
:param str language_code: ISO 639-3 language code of the language of
interest.
:param int entry_id: The ID of the dictionary entry to which this role
belongs.
:param int pos_list_id: The ID of the list of POS tags for this role.
:param sense_id: An iterable of integer IDs of the connotations of this
role.
:param dict restrictions: A dictionary describing the restrictions imposed
on the possible structural ways in which the POS tags may interrelate.
Necessary in order to provide POS tag trees.
"""
def __init__(self, conn, language_code, entry_id, pos_list_id, sense_ids, restrictions):
c = conn.cursor()
self.language_code = language_code
self.entry_id = entry_id
self.pos_tags = tuple(pos for (pos,) in c.execute('SELECT pos FROM pos_lists WHERE language = ? AND pos_list_id = ? ORDER BY sequence_id', (self.language_code, pos_list_id)))
self.restrictions = restrictions
self.senses = tuple(Sense(conn, self.language_code, self.entry_id, sense_id) for sense_id in sense_ids)
def normalized_pos_tags(self):
"""Translate the list of POS tags as used in the dictionary to a list of
POS tags in the representation used internally.
:return: The list of POS tags associated with this role, in their
internal representation.
"""
pos_list = []
for pos in self.pos_tags:
pos_list.extend([i for i in re.split(' | rcled_number(n | identifier_name |
|
dictionary.py | return chr(0x328d + number)
else:
return '(%s)' % (number,) # raise ValueError()
class Lexeme():
"""A lexeme (i.e. an entry) in the dictionary.
An entry in this context means a base meaning that may be denoted by either
element of a set of highly similar pairs of graphic and phonetic variants.
The base meaning may be further refined to one of several connotations of
this lexeme, see :class:`Sense`.
The same lexeme may appear in different grammatical positions, and different
connotations of the same lexeme might be restricted to multiple, different
grammatical usages, see :class:`Role`.
Furthermore, there might be restrictions as to which graphic and phonetic
variants may appear together, as well as which of those variants may appear
with which connotations.
On construction, all relevant data is loaded from the database.
:param conn: The database connection for the dictionary.
:param str language_code: ISO 639-3 language code of the language of
interest.
:param int entry_id: The ID of the dictionary entry.
:param dict restrictions: A dictionary describing the restrictions imposed
on the possible structural ways in which the POS tags may interrelate.
Necessary in order to provide POS tag trees.
"""
def __init__(self, conn, language_code, entry_id, restrictions):
c = conn.cursor()
self.language_code = language_code
self.entry_id = entry_id
self.headwords = tuple(c.execute('SELECT nonkana, reading FROM lexemes WHERE language = ? AND entry_id = ? ORDER BY sequence_id', (self.language_code, self.entry_id)))
if not self.headwords:
raise ValueError('Unable to find entry with ID %d for language %r' % (self.entry_id, self.language_code))
# XXX Ensure that there is a suitable index for this query
same_main_headword_entries = tuple(other_entry_id for (other_entry_id,) in c.execute('SELECT entry_id FROM lexemes WHERE language = ? AND nonkana IS ? AND reading = ? AND sequence_id = 1 ORDER BY entry_id' if self.headwords[0][0] is None else 'SELECT entry_id FROM lexemes WHERE language = ? AND nonkana = ? AND reading = ? AND sequence_id = 1 ORDER BY entry_id', (self.language_code, *self.headwords[0])))
self.discriminator = next(j for j, other_entry_id in enumerate(same_main_headword_entries, start=1) if other_entry_id == self.entry_id) if len(same_main_headword_entries) > 1 else None
self.roles = []
current_pos_list_id = None
sense_ids = []
for (pos_list_id, sense_id) in tuple(c.execute('SELECT pos_list_id, sense_id FROM roles WHERE language = ? AND entry_id = ? ORDER BY sense_id', (self.language_code, self.entry_id,))):
if (current_pos_list_id is not None
and current_pos_list_id != pos_list_id):
self.roles.append(Role(conn, self.language_code, self.entry_id, current_pos_list_id, sense_ids, restrictions))
sense_ids = []
current_pos_list_id = pos_list_id
sense_ids.append(sense_id)
else:
if current_pos_list_id is not None:
self.roles.append(Role(conn, self.language_code, self.entry_id, current_pos_list_id, sense_ids, restrictions))
def __repr__(self):
return ('<%s(%r, %d) %s【%s】%s>'
% (self.__class__.__name__,
self.language_code,
self.entry_id,
self.headwords[0][0],
self.headwords[0][1],
'' if self.discriminator is None
else circled_number(self.discriminator, False)))
def __str__(self):
out = '\033[35m%s【%s】\033[0m' % self.headwords[0]
if self.discriminator is not None:
out += circled_number(self.discriminator, False)
out += '\n' + '-' * 8 + '\n'
for nonkana, reading in self.headwords[1:]:
out += '%s【%s】\n' % (nonkana, reading)
out += '\n'.join(str(role) for role in self.roles)
return out
@staticmethod
def lookup(conn, language_code, graphic, phonetic, restrictions):
"""Look up all lexemes that may be represented by the specified
combination of a graphic and a phonetic variant.
:param str language_code: ISO 639-3 language code of the language of
interest.
:param str graphic: The graphic variant.
:param str phonetic: The phonetic variant.
:param dict restrictions: A dictionary describing the restrictions
imposed on the possible structural ways in which the POS tags may
interrelate. Necessary in order to provide POS tag trees.
:return: A tuple of lexemes that contain the specified combination of a
graphic variant and a phonetic variant in their list of headwords.
"""
c = conn.cursor()
entry_ids = tuple(c.execute('SELECT entry_id FROM lemmas WHERE language = ? AND graphic = ? and phonetic = ?', (language_code, graphic, hiragana_to_katakana(phonetic))))
return tuple(Lexeme(conn, language_code, entry_id, restrictions) for (entry_id,) in entry_ids)
class Role():
"""A role in the dictionary.
A role in this context means a collection of connotations of a lexeme that
have the same grammatical functions in text.
In addition to the connotations, a role has a part-of-speech (POS) list.
POS tags in this list may have mutually hierarchical, nonconflicting, and
even exclusive relations.
A dictionary entry may contain multiple roles ``A`` and ``B`` with the same
POS lists if the entry's connotations are sorted by frequency of use, and a
third role ``C`` with a different POS list has connotations with a lower
frequency than those of ``A`` and with a higher frequency than those of
``B``.
On construction, all relevant data is loaded from the database.
:param conn: The database connection for the dictionary.
:param str language_code: ISO 639-3 language code of the language of
interest.
:param int entry_id: The ID of the dictionary entry to which this role
belongs.
:param int pos_list_id: The ID of the list of POS tags for this role.
:param sense_id: An iterable of integer IDs of the connotations of this
role.
:param dict restrictions: A dictionary describing the restrictions imposed
on the possible structural ways in which the POS tags may interrelate.
Necessary in order to provide POS tag trees.
"""
def __init__(self, conn, language_code, entry_id, pos_list_id, sense_ids, restrictions):
c = conn.cursor()
self.language_code = language_code
self.entry_id = entry_id
self.pos_tags = tuple(pos for (pos,) in c.execute('SELECT pos FROM pos_lists WHERE language = ? AND pos_list_id = ? ORDER BY sequence_id', (self.language_code, pos_list_id)))
self.restrictions = restrictions
self.senses = tuple(Sense(conn, self.language_code, self.entry_id, sense_id) for sense_id in sense_ids)
def normalized_pos_tags(self):
"""Translate the list of POS tags as used in the dictionary to a list of
POS tags in the representation used internally.
:return: The list of POS tags associated with this role, in their
internal representation.
"""
pos_list = []
for pos in self.pos_tags:
pos_list.extend([i for i in re.split('[:;]', pos) if i != ''])
return pos_list
def pos_tree(self) -> TemplateTree:
"""From the POS tags of this role, build a tree structure.
The restrictions of this role are used on tree creation.
:return: A template tree that represents the list of POS tags associated
with this role in a hierarchical fashion.
"""
return TemplateTree.parse(self.normalized_pos_tags(), self.restrictions)
def __repr__(self):
return ('<%s(%r, %d, %r, %r)>'
% (self.__class__.__name__,
self.language_code,
self.entry_id,
self.pos_tags,
self.senses))
def __str__(self):
return '\n '.join([str(self.pos_tree())] + [str(sense) for sense in self.senses])
# XXX Rename to 'Connotation'
class Sense():
"""A connotation in the dictionary.
A connotation in this context means an abstract word meaning that is limited
to a specific lexeme. Multiple lexemes may appear in text conveying the
same meaning, and multiple meanings may be denoted by the same lexeme, but
each combination of lexeme and sense is a unique connotation.
| A connotation may be described by multiple glosses, each of which can be a
direct translation, a description or similar.
On construction, all relevant data is loaded from the database.
| random_line_split |
|
dictionary.py | .
:param bool bold_circle: If ``True``, return a white number on a black
circle; return a black number on a white circle otherwise.
:return: A string that is the specified number enclosed in a circle. For
integers that have no such representation in Unicode, return the number
enclosed in parentheses.
"""
if number <= 0:
raise ValueError()
elif number < 10:
return chr((0x2775 if bold_circle else 0x245f) + number)
elif number < 21 and not bold_circle:
return chr(0x245f + number)
elif number == 10 and bold_circle:
return chr(0x277f)
elif number < 21 and bold_circle:
re | elif bold_circle:
return '[%s]' % (number,) # raise ValueError()
elif number < 30:
return chr(0x323c + number)
elif number == 30:
return chr(0x325a)
elif number < 36:
return chr(0x323c + number)
elif number < 51:
return chr(0x328d + number)
else:
return '(%s)' % (number,) # raise ValueError()
class Lexeme():
"""A lexeme (i.e. an entry) in the dictionary.
An entry in this context means a base meaning that may be denoted by either
element of a set of highly similar pairs of graphic and phonetic variants.
The base meaning may be further refined to one of several connotations of
this lexeme, see :class:`Sense`.
The same lexeme may appear in different grammatical positions, and different
connotations of the same lexeme might be restricted to multiple, different
grammatical usages, see :class:`Role`.
Furthermore, there might be restrictions as to which graphic and phonetic
variants may appear together, as well as which of those variants may appear
with which connotations.
On construction, all relevant data is loaded from the database.
:param conn: The database connection for the dictionary.
:param str language_code: ISO 639-3 language code of the language of
interest.
:param int entry_id: The ID of the dictionary entry.
:param dict restrictions: A dictionary describing the restrictions imposed
on the possible structural ways in which the POS tags may interrelate.
Necessary in order to provide POS tag trees.
"""
def __init__(self, conn, language_code, entry_id, restrictions):
c = conn.cursor()
self.language_code = language_code
self.entry_id = entry_id
self.headwords = tuple(c.execute('SELECT nonkana, reading FROM lexemes WHERE language = ? AND entry_id = ? ORDER BY sequence_id', (self.language_code, self.entry_id)))
if not self.headwords:
raise ValueError('Unable to find entry with ID %d for language %r' % (self.entry_id, self.language_code))
# XXX Ensure that there is a suitable index for this query
same_main_headword_entries = tuple(other_entry_id for (other_entry_id,) in c.execute('SELECT entry_id FROM lexemes WHERE language = ? AND nonkana IS ? AND reading = ? AND sequence_id = 1 ORDER BY entry_id' if self.headwords[0][0] is None else 'SELECT entry_id FROM lexemes WHERE language = ? AND nonkana = ? AND reading = ? AND sequence_id = 1 ORDER BY entry_id', (self.language_code, *self.headwords[0])))
self.discriminator = next(j for j, other_entry_id in enumerate(same_main_headword_entries, start=1) if other_entry_id == self.entry_id) if len(same_main_headword_entries) > 1 else None
self.roles = []
current_pos_list_id = None
sense_ids = []
for (pos_list_id, sense_id) in tuple(c.execute('SELECT pos_list_id, sense_id FROM roles WHERE language = ? AND entry_id = ? ORDER BY sense_id', (self.language_code, self.entry_id,))):
if (current_pos_list_id is not None
and current_pos_list_id != pos_list_id):
self.roles.append(Role(conn, self.language_code, self.entry_id, current_pos_list_id, sense_ids, restrictions))
sense_ids = []
current_pos_list_id = pos_list_id
sense_ids.append(sense_id)
else:
if current_pos_list_id is not None:
self.roles.append(Role(conn, self.language_code, self.entry_id, current_pos_list_id, sense_ids, restrictions))
def __repr__(self):
return ('<%s(%r, %d) %s【%s】%s>'
% (self.__class__.__name__,
self.language_code,
self.entry_id,
self.headwords[0][0],
self.headwords[0][1],
'' if self.discriminator is None
else circled_number(self.discriminator, False)))
def __str__(self):
out = '\033[35m%s【%s】\033[0m' % self.headwords[0]
if self.discriminator is not None:
out += circled_number(self.discriminator, False)
out += '\n' + '-' * 8 + '\n'
for nonkana, reading in self.headwords[1:]:
out += '%s【%s】\n' % (nonkana, reading)
out += '\n'.join(str(role) for role in self.roles)
return out
@staticmethod
def lookup(conn, language_code, graphic, phonetic, restrictions):
"""Look up all lexemes that may be represented by the specified
combination of a graphic and a phonetic variant.
:param str language_code: ISO 639-3 language code of the language of
interest.
:param str graphic: The graphic variant.
:param str phonetic: The phonetic variant.
:param dict restrictions: A dictionary describing the restrictions
imposed on the possible structural ways in which the POS tags may
interrelate. Necessary in order to provide POS tag trees.
:return: A tuple of lexemes that contain the specified combination of a
graphic variant and a phonetic variant in their list of headwords.
"""
c = conn.cursor()
entry_ids = tuple(c.execute('SELECT entry_id FROM lemmas WHERE language = ? AND graphic = ? and phonetic = ?', (language_code, graphic, hiragana_to_katakana(phonetic))))
return tuple(Lexeme(conn, language_code, entry_id, restrictions) for (entry_id,) in entry_ids)
class Role():
"""A role in the dictionary.
A role in this context means a collection of connotations of a lexeme that
have the same grammatical functions in text.
In addition to the connotations, a role has a part-of-speech (POS) list.
POS tags in this list may have mutually hierarchical, nonconflicting, and
even exclusive relations.
A dictionary entry may contain multiple roles ``A`` and ``B`` with the same
POS lists if the entry's connotations are sorted by frequency of use, and a
third role ``C`` with a different POS list has connotations with a lower
frequency than those of ``A`` and with a higher frequency than those of
``B``.
On construction, all relevant data is loaded from the database.
:param conn: The database connection for the dictionary.
:param str language_code: ISO 639-3 language code of the language of
interest.
:param int entry_id: The ID of the dictionary entry to which this role
belongs.
:param int pos_list_id: The ID of the list of POS tags for this role.
:param sense_id: An iterable of integer IDs of the connotations of this
role.
:param dict restrictions: A dictionary describing the restrictions imposed
on the possible structural ways in which the POS tags may interrelate.
Necessary in order to provide POS tag trees.
"""
def __init__(self, conn, language_code, entry_id, pos_list_id, sense_ids, restrictions):
c = conn.cursor()
self.language_code = language_code
self.entry_id = entry_id
self.pos_tags = tuple(pos for (pos,) in c.execute('SELECT pos FROM pos_lists WHERE language = ? AND pos_list_id = ? ORDER BY sequence_id', (self.language_code, pos_list_id)))
self.restrictions = restrictions
self.senses = tuple(Sense(conn, self.language_code, self.entry_id, sense_id) for sense_id in sense_ids)
def normalized_pos_tags(self):
"""Translate the list of POS tags as used in the dictionary to a list of
POS tags in the representation used internally.
:return: The list of POS tags associated with this role, in their
internal representation.
"""
pos_list = []
for pos in self.pos_tags:
pos_list.extend([i for i in re.split('[:;]', pos) if i != ''])
return pos_list
def pos_tree(self) -> TemplateTree:
"""From the POS tags of this | turn chr(0x24e0 + number)
| conditional_block |
lib.rs | (|e| unsafe {libc::close(fd); e})?;
Ok(SerialPort{
fd: fd,
orig_settings: orig_settings,
is_raw: false,
})
}
/// Retrieve the termios structure for the serial port.
pub fn termios(&self) -> io::Result<termios::Termios> {
termios::Termios::from_fd(self.fd)
}
/// Set low-level serial port settings
///
/// The `action` parameter must be one of the following:
///
/// - `termios::TCSANOW` Update immediately
/// - `termios::TCSADRAIN` Finish reading buffered data before updating.
/// - `termios::TCSAFLUSH` Finish writing buffered data before updating.
///
/// # Errors
///
/// Will return `ErrorKind::InvalidInput` if `action` is not one of the three constants
/// defined above.
pub fn set_termios(&mut self, action: i32, t: &termios::Termios) -> io::Result<()> {
match action {
termios::TCSANOW | termios::TCSADRAIN | termios::TCSAFLUSH => {
termios::tcsetattr(self.fd, action, t)
},
_ => Err(io::Error::new(io::ErrorKind::InvalidInput, format!("Illegal action: {}", action))),
}
}
/// Enable or disable blocking reads and writes.
///
/// # Panics
/// Will panic if the underlying `fcntl` system call returns a value other than 0 or -1
pub fn set_nonblocking(&mut self, blocking: bool) -> io::Result<()> {
match unsafe {libc::fcntl(self.fd, libc::F_SETFL, libc::O_NONBLOCK, blocking as libc::c_int)} {
0 => Ok(()),
-1 => Err(io::Error::last_os_error()),
e @ _ => unreachable!(format!("Unexpected return code from F_SETFL O_NONBLOCK: {}", e)),
}
}
/// Get the current blocking mode for the serial port
///
/// # Panics
/// Will panic if the underlying `fcntl` system call returns a value other than 0 or -1
pub fn is_blocking(&self) -> io::Result<bool> {
match unsafe {libc::fcntl(self.fd, libc::F_GETFL, libc::O_NONBLOCK)} {
0 => Ok(false),
1 => Ok(true),
-1 => Err(io::Error::last_os_error()),
e @ _ => unreachable!(format!("Unexpected return code from F_GETFL O_NONBLOCK: {}", e)),
}
}
/// Try writing some data.
///
/// Similar to the standard `io::Write` implementation, but errors
/// due to blocking IO are translated into Ok(None) results.
///
/// # Returns
///
/// - `Ok(Some(size))` on successful writes
/// - `Ok(None)` if calling write would block.
/// - `Err(e)` for all other IO errors
pub fn maybe_write(&mut self, buf: &[u8]) -> io::Result<Option<usize>> {
match self.write(buf) {
Ok(s) => Ok(Some(s)),
Err(e) => {
if let io::ErrorKind::WouldBlock = e.kind() {
Ok(None)
} else {
Err(e)
}
}
}
}
/// Try reading some data.
///
/// Similar to the standard `io::Read` implementation, but errors
/// due to blocking IO are translated into Ok(None) results.
///
/// # Returns
///
/// - `Ok(Some(size))` on successful reads
/// - `Ok(None)` if calling read would block.
/// - `Err(e)` for all other IO errors
pub fn maybe_read(&mut self, buf: &mut [u8]) -> io::Result<Option<usize>> {
match self.read(buf) {
Ok(s) => Ok(Some(s)),
Err(e) => {
if let io::ErrorKind::WouldBlock = e.kind() {
Ok(None)
} else {
Err(e)
}
}
}
}
/// Set the serial baudrate
///
/// Valid baudrates are:
///
/// - 0
/// - 50
/// - 75
/// - 110
/// - 134
/// - 150
/// - 200
/// - 300
/// - 600
/// - 1200
/// - 1800
/// - 2400
/// - 4800
/// - 9600
/// - 19200
/// - 38400
///
/// # Errors
///
/// Returns an io::ErrorKind::InvalidInput for baud rates no in the list
/// above.
pub fn set_baudrate(&mut self, baud: i32) -> io::Result<()> {
use termios::{B0, B50, B75, B110, B134, B150, B200, B300, B600,
B1200, B1800, B2400, B4800, B9600, B19200, B38400};
let b = match baud {
4800 => B4800,
9600 => B9600,
19200 => B19200,
38400 => B38400,
0 => B0,
50 => B50,
75 => B75,
110 => B110,
134 => B134,
150 => B150,
200 => B200,
300 => B300,
600 => B600,
1200 => B1200,
1800 => B1800,
2400 => B2400,
_ => return Err(io::Error::new(io::ErrorKind::InvalidInput, format!("{} is not a legal baudrate", baud))),
};
// Get the termios structure
let mut s = self.termios()?;
// And the original rate
// let orig_rate = termios::cfgetospeed(&s);
// Set the new rate
termios::cfsetspeed(&mut s, b)?;
// Now set the structure
self.set_termios(termios::TCSAFLUSH, &s)
}
/// Get the serial baudrate
///
/// Valid baudrates are:
///
/// - 0
/// - 50
/// - 75
/// - 110
/// - 134
/// - 150
/// - 200
/// - 300
/// - 600
/// - 1200
/// - 1800
/// - 2400
/// - 4800
/// - 9600
/// - 19200
/// - 38400
///
/// # Errors
///
/// Returns an io::ErrorKind::InvalidInput for baud rates no in the list
/// above.
pub fn baudrate(&self) -> io::Result<i32> | {
use termios::{B0, B50, B75, B110, B134, B150, B200, B300, B600,
B1200, B1800, B2400, B4800, B9600, B19200, B38400};
let s = self.termios()?;
// And the original rate
let baud = termios::cfgetospeed(&s);
let b = match baud {
B4800 => 4800,
B9600 => 9600,
B19200 => 19200,
B38400 => 38400,
B0 => 0,
B50 => 50,
B75 => 75,
B110 => 110,
B134 => 134, | identifier_body |
|
lib.rs | {
fd: RawFd,
orig_settings: termios::Termios,
is_raw: bool,
}
impl SerialPort {
/// Construct a new SerialPort
///
/// Opens the a serial port at the location provided by `path` with the following
/// default settings:
///
/// - 9600,8N1 (9600 Baud, 8-bit data, no parity, 1 stop bit)
/// - Receiver enabled in "Cannonical mode"
/// - Non-blocking
/// - No flow control (software OR hardware)
/// - Ignores hardware control lines
///
/// # Errors
///
/// SerialPort construction can fail for a few reasons:
///
/// - An invalid path is provided
/// - The path does not represent a serial port device
/// - We are unable to configure the serial port
/// ANY of the default settings. (Unlikely... but IS possible)
pub fn open<T: AsRef<Path>>(path: T) -> io::Result<Self> {
// Create a CString from the provided path.
let path_cstr = CString::new(path.as_ref().as_os_str().as_bytes())
.map_err(|_| io::Error::last_os_error())?;
// Attempt to open the desired path as a serial port. Set it read/write, nonblocking, and
// don't set it as the controlling terminal
let fd = unsafe { libc::open(path_cstr.as_ptr(), libc::O_RDWR | libc::O_NONBLOCK | libc::O_NOCTTY, 0) };
// Make sure the file descriptor is valid.
if fd < 0 {
return Err(io::Error::last_os_error());
}
// Get the existing termios settings. Close the file descriptor on errors.
let orig_settings = termios::Termios::from_fd(fd).map_err(|e| unsafe {libc::close(fd); e})?;
// Default port settings: Cannonical 9600-8N1
let mut default_settings = orig_settings.clone();
default_settings.c_cflag = termios::CS8 | termios::CLOCAL | termios::CREAD;
default_settings.c_oflag = 0;
default_settings.c_iflag = termios::IGNPAR;
default_settings.c_lflag = termios::ICANON;
default_settings.c_cc[termios::VMIN] = 0;
default_settings.c_cc[termios::VTIME] = 0;
termios::cfsetspeed(&mut default_settings, termios::B9600).unwrap();
// tcsetattr only errors out if we cannot set ANY attribute. Something is seriously wrong
// if that happens, so just close the file descriptor and raise the error.
termios::tcsetattr(fd, termios::TCSANOW, &default_settings).map_err(|e| unsafe {libc::close(fd); e})?;
Ok(SerialPort{
fd: fd,
orig_settings: orig_settings,
is_raw: false,
})
}
/// Retrieve the termios structure for the serial port.
pub fn termios(&self) -> io::Result<termios::Termios> {
termios::Termios::from_fd(self.fd)
}
/// Set low-level serial port settings
///
/// The `action` parameter must be one of the following:
///
/// - `termios::TCSANOW` Update immediately
/// - `termios::TCSADRAIN` Finish reading buffered data before updating.
/// - `termios::TCSAFLUSH` Finish writing buffered data before updating.
///
/// # Errors
///
/// Will return `ErrorKind::InvalidInput` if `action` is not one of the three constants
/// defined above.
pub fn set_termios(&mut self, action: i32, t: &termios::Termios) -> io::Result<()> {
match action {
termios::TCSANOW | termios::TCSADRAIN | termios::TCSAFLUSH => {
termios::tcsetattr(self.fd, action, t)
},
_ => Err(io::Error::new(io::ErrorKind::InvalidInput, format!("Illegal action: {}", action))),
}
}
/// Enable or disable blocking reads and writes.
///
/// # Panics
/// Will panic if the underlying `fcntl` system call returns a value other than 0 or -1
pub fn set_nonblocking(&mut self, blocking: bool) -> io::Result<()> {
match unsafe {libc::fcntl(self.fd, libc::F_SETFL, libc::O_NONBLOCK, blocking as libc::c_int)} {
0 => Ok(()),
-1 => Err(io::Error::last_os_error()),
e @ _ => unreachable!(format!("Unexpected return code from F_SETFL O_NONBLOCK: {}", e)),
}
}
/// Get the current blocking mode for the serial port
///
/// # Panics
/// Will panic if the underlying `fcntl` system call returns a value other than 0 or -1
pub fn is_blocking(&self) -> io::Result<bool> {
match unsafe {libc::fcntl(self.fd, libc::F_GETFL, libc::O_NONBLOCK)} {
0 => Ok(false),
1 => Ok(true),
-1 => Err(io::Error::last_os_error()),
e @ _ => unreachable!(format!("Unexpected return code from F_GETFL O_NONBLOCK: {}", e)),
}
}
/// Try writing some data.
///
/// Similar to the standard `io::Write` implementation, but errors
/// due to blocking IO are translated into Ok(None) results.
///
/// # Returns
///
/// - `Ok(Some(size))` on successful writes
/// - `Ok(None)` if calling write would block.
/// - `Err(e)` for all other IO errors
pub fn maybe_write(&mut self, buf: &[u8]) -> io::Result<Option<usize>> {
match self.write(buf) {
Ok(s) => Ok(Some(s)),
Err(e) => {
if let io::ErrorKind::WouldBlock = e.kind() {
Ok(None)
} else {
Err(e)
}
}
}
}
/// Try reading some data.
///
/// Similar to the standard `io::Read` implementation, but errors
/// due to blocking IO are translated into Ok(None) results.
///
/// # Returns
///
/// - `Ok(Some(size))` on successful reads
/// - `Ok(None)` if calling read would block.
/// - `Err(e)` for all other IO errors
pub fn maybe_read(&mut self, buf: &mut [u8]) -> io::Result<Option<usize>> {
match self.read(buf) {
Ok(s) => Ok(Some(s)),
Err(e) => {
if let io::ErrorKind::WouldBlock = e.kind() {
Ok(None)
} else {
Err(e)
}
}
}
}
/// Set the serial baudrate
///
/// Valid baudrates are:
///
/// - 0
/// - 50
/// - 75
/// - 110
/// - 134
/// - 150
/// - 200
/// - 300
/// - 600
/// - 1200
/// - 1800
/// - 2400
/// - 4800
/// - 9600
/// - 19200
/// - 38400
///
/// # Errors
///
/// Returns an io::ErrorKind::InvalidInput for baud rates no in the list
/// above.
pub fn set_baudrate(&mut self, baud: i32) -> io::Result<()> {
use termios::{B0, B50, B75, B110, B134, B150, B200, B300, B600,
B1200, B1800, B2400, B4800, B9600, B19200, B38400};
let b = match baud {
4800 => B4800,
9600 => B9600,
19200 => B19200,
38400 => B38400,
0 => B0,
50 => B50,
75 => B75,
110 => B110,
134 => B134,
150 => B150,
200 => B200,
| SerialPort | identifier_name |
|
lib.rs | .
let path_cstr = CString::new(path.as_ref().as_os_str().as_bytes())
.map_err(|_| io::Error::last_os_error())?;
// Attempt to open the desired path as a serial port. Set it read/write, nonblocking, and
// don't set it as the controlling terminal
let fd = unsafe { libc::open(path_cstr.as_ptr(), libc::O_RDWR | libc::O_NONBLOCK | libc::O_NOCTTY, 0) };
// Make sure the file descriptor is valid.
if fd < 0 {
return Err(io::Error::last_os_error());
}
// Get the existing termios settings. Close the file descriptor on errors.
let orig_settings = termios::Termios::from_fd(fd).map_err(|e| unsafe {libc::close(fd); e})?;
// Default port settings: Cannonical 9600-8N1
let mut default_settings = orig_settings.clone();
default_settings.c_cflag = termios::CS8 | termios::CLOCAL | termios::CREAD;
default_settings.c_oflag = 0;
default_settings.c_iflag = termios::IGNPAR;
default_settings.c_lflag = termios::ICANON;
default_settings.c_cc[termios::VMIN] = 0;
default_settings.c_cc[termios::VTIME] = 0;
termios::cfsetspeed(&mut default_settings, termios::B9600).unwrap();
// tcsetattr only errors out if we cannot set ANY attribute. Something is seriously wrong
// if that happens, so just close the file descriptor and raise the error.
termios::tcsetattr(fd, termios::TCSANOW, &default_settings).map_err(|e| unsafe {libc::close(fd); e})?;
Ok(SerialPort{
fd: fd,
orig_settings: orig_settings,
is_raw: false,
})
}
/// Retrieve the termios structure for the serial port.
pub fn termios(&self) -> io::Result<termios::Termios> {
termios::Termios::from_fd(self.fd)
}
/// Set low-level serial port settings
///
/// The `action` parameter must be one of the following:
///
/// - `termios::TCSANOW` Update immediately
/// - `termios::TCSADRAIN` Finish reading buffered data before updating.
/// - `termios::TCSAFLUSH` Finish writing buffered data before updating.
///
/// # Errors
///
/// Will return `ErrorKind::InvalidInput` if `action` is not one of the three constants
/// defined above.
pub fn set_termios(&mut self, action: i32, t: &termios::Termios) -> io::Result<()> {
match action {
termios::TCSANOW | termios::TCSADRAIN | termios::TCSAFLUSH => {
termios::tcsetattr(self.fd, action, t)
},
_ => Err(io::Error::new(io::ErrorKind::InvalidInput, format!("Illegal action: {}", action))),
}
}
/// Enable or disable blocking reads and writes.
///
/// # Panics
/// Will panic if the underlying `fcntl` system call returns a value other than 0 or -1
pub fn set_nonblocking(&mut self, blocking: bool) -> io::Result<()> {
match unsafe {libc::fcntl(self.fd, libc::F_SETFL, libc::O_NONBLOCK, blocking as libc::c_int)} {
0 => Ok(()),
-1 => Err(io::Error::last_os_error()),
e @ _ => unreachable!(format!("Unexpected return code from F_SETFL O_NONBLOCK: {}", e)),
}
}
/// Get the current blocking mode for the serial port
///
/// # Panics
/// Will panic if the underlying `fcntl` system call returns a value other than 0 or -1
pub fn is_blocking(&self) -> io::Result<bool> {
match unsafe {libc::fcntl(self.fd, libc::F_GETFL, libc::O_NONBLOCK)} {
0 => Ok(false),
1 => Ok(true),
-1 => Err(io::Error::last_os_error()),
e @ _ => unreachable!(format!("Unexpected return code from F_GETFL O_NONBLOCK: {}", e)),
}
}
/// Try writing some data.
///
/// Similar to the standard `io::Write` implementation, but errors
/// due to blocking IO are translated into Ok(None) results.
///
/// # Returns
///
/// - `Ok(Some(size))` on successful writes
/// - `Ok(None)` if calling write would block.
/// - `Err(e)` for all other IO errors
pub fn maybe_write(&mut self, buf: &[u8]) -> io::Result<Option<usize>> {
match self.write(buf) {
Ok(s) => Ok(Some(s)),
Err(e) => {
if let io::ErrorKind::WouldBlock = e.kind() {
Ok(None)
} else {
Err(e)
}
}
}
}
/// Try reading some data.
///
/// Similar to the standard `io::Read` implementation, but errors
/// due to blocking IO are translated into Ok(None) results.
/// | /// - `Err(e)` for all other IO errors
pub fn maybe_read(&mut self, buf: &mut [u8]) -> io::Result<Option<usize>> {
match self.read(buf) {
Ok(s) => Ok(Some(s)),
Err(e) => {
if let io::ErrorKind::WouldBlock = e.kind() {
Ok(None)
} else {
Err(e)
}
}
}
}
/// Set the serial baudrate
///
/// Valid baudrates are:
///
/// - 0
/// - 50
/// - 75
/// - 110
/// - 134
/// - 150
/// - 200
/// - 300
/// - 600
/// - 1200
/// - 1800
/// - 2400
/// - 4800
/// - 9600
/// - 19200
/// - 38400
///
/// # Errors
///
/// Returns an io::ErrorKind::InvalidInput for baud rates no in the list
/// above.
pub fn set_baudrate(&mut self, baud: i32) -> io::Result<()> {
use termios::{B0, B50, B75, B110, B134, B150, B200, B300, B600,
B1200, B1800, B2400, B4800, B9600, B19200, B38400};
let b = match baud {
4800 => B4800,
9600 => B9600,
19200 => B19200,
38400 => B38400,
0 => B0,
50 => B50,
75 => B75,
110 => B110,
134 => B134,
150 => B150,
200 => B200,
300 => B300,
600 => B600,
1200 => B1200,
1800 => B1800,
2400 => B2400,
_ => return Err(io::Error::new(io::ErrorKind::InvalidInput, format!("{} is not a legal baudrate", baud))),
};
// Get the termios structure
let mut s = self.termios()?;
// And the original rate
// let orig_rate = termios::cfgetospeed(&s);
// Set the new rate
termios::cfsetspeed(&mut s, b)?;
// Now set the structure
self.set_termios(termios::TCSAFLUSH, &s)
}
/// Get the serial baudrate
///
/// Valid baudrates are:
///
/// - 0
/// - 50
/// - 75
/// - 110
/// - 134
/// - 150
| /// # Returns
///
/// - `Ok(Some(size))` on successful reads
/// - `Ok(None)` if calling read would block. | random_line_split |
GradientBoostingClassifier.py | #amount committed to the loan
'funded_amnt_inv', #amount committed by investors for the loan
'installment', #monthly payment owed by the borrower
]
#Skip observations with missing values
loans = loans[[target] + features].dropna()
#Apply one-hot encoding to loans
categorical_variables = []
for feat_name, feat_type in zip(loans.columns, loans.dtypes):
if feat_type == object:
categorical_variables.append(feat_name)
for feature in categorical_variables:
loans_one_hot_encoded = pd.get_dummies(loans[feature],prefix=feature)
loans = pd.concat([loans, loans_one_hot_encoded],axis=1)
loans = loans.drop(feature, axis=1)
#Import indices of train valid
train_idx = pd.read_json('module-8-assignment-1-train-idx.json')
valid_idx = pd.read_json('module-8-assignment-1-validation-idx.json')
#Split data into training and validation
train_data = loans.iloc[train_idx.iloc[:,0].values]
valid_data = loans.iloc[valid_idx.iloc[:,0].values]
#Gradient boosted tree classifier
import sklearn
from sklearn.ensemble import GradientBoostingClassifier
train_target = train_data[target].as_matrix()
train_features = train_data.drop(target, axis=1).as_matrix()
model_5 = GradientBoostingClassifier(max_depth=6, n_estimators=5).fit(
train_features, train_target)
#Make predictions
valid_safe_loans = valid_data[valid_data[target] == 1]
valid_risky_loans = valid_data[valid_data[target] == -1]
sample_valid_data_risky = valid_risky_loans[0:2]
sample_valid_data_safe = valid_safe_loans[0:2]
sample_valid_data = sample_valid_data_safe.append(sample_valid_data_risky)
sample_valid_data
#Prediction Classes
sample_predictions = model_5.predict(sample_valid_data.drop(target, axis=1
).as_matrix())
#prediction accuracy
sample_accuracy = sum(sample_predictions == sample_valid_data[target]) / \
len(sample_predictions)
#Prediction Probabilities
sample_predProbas = model_5.predict_proba(sample_valid_data.drop(
target, axis=1).as_matrix())[:,1]
#return the probabilities of being a safe loan
idx_min = np.argmin(sample_predProbas) + 1
#return the loan in sample that is least likely to be a safe loan
#all the predictions with probability >= 0.5, the model predicts: label +1
#Evaluate the model on the validation data
#class predictions
valid_predictions = model_5.predict(valid_data.drop(target, axis=1
).as_matrix())
#calculate prediction accuracy
valid_accuracy = sum(valid_predictions == valid_data[target]) / \
len(valid_predictions) #.6612
#Calculate the number of false positives
valid_fp = sum((valid_predictions == 1)&(valid_data[target] == -1)) #1654
#Calculate the number of false negatives
valid_fn = sum((valid_predictions == -1)&(valid_data[target] == 1)) #1491
#Comparison with decision trees
#the prediction accuracy of the decision trees was around 0.6361
'''
As we explored in the decision tree assignment, we calculated the cost of
the mistakes made by the model. We again consider the same costs as follows:
False negatives: Assume a cost of $10,000 per false negative.
False positives: Assume a cost of $20,000 per false positive.
Assume that the number of false positives and false negatives for
the learned decision tree was:
False negatives: 1936
False positives: 1503
'''
cost_dt = 10000 * 1936 + 20000 * 1503 #49,420,000
cost_gb = 10000 * valid_fn + 20000 * valid_fp #47,990,000
#Most positive & negative loans
#probability predictions for all the loans in validation
valid_predProbas = model_5.predict_proba(valid_data.drop(
target, axis=1).as_matrix())[:,1]
#add probability predictions as a column called predictions into validation
valid_data['predictions'] = valid_predProbas
#Sort the data (in descreasing order) by the probability predictions
valid_data = valid_data.sort_values(by = 'predictions', ascending = False)
#For each row, the probabilities should be a number in the range [0, 1]
#Find the top 5 loans with the highest probability of being a safe loan
print(valid_data.head(5))
#What grades are the top 5 loans?
print(valid_data.head(5)[['grade_A','grade_B', 'grade_C', 'grade_D',
'grade_E', 'grade_F', 'grade_G']])
#find the 5 loans with the lowest probability of being a safe loan
print(valid_data.tail(5)) #last is the least
#valid_data.sort_values(by='predictions', ascending=True).head(5)
print(valid_data.tail(5)[['grade_A','grade_B', 'grade_C', 'grade_D',
'grade_E', 'grade_F', 'grade_G']])
valid_target = valid_data[target].as_matrix()
valid_features = valid_data.drop([target, 'predictions'], axis=1).as_matrix()
#Effects of adding more trees
model_10 = GradientBoostingClassifier(max_depth=6, n_estimators=10).fit(
train_features, train_target)
accuray_10 = sum(model_10.predict(valid_features) == valid_target) / \
len(valid_target) #0.66619991383024557
model_50 = GradientBoostingClassifier(max_depth=6, n_estimators=50).fit(
train_features, train_target)
accuray_50 = sum(model_50.predict(valid_features) == valid_target) / \
len(valid_target) #0.68364928909952605
model_100 = GradientBoostingClassifier(max_depth=6, n_estimators=100).fit(
train_features, train_target)
accuray_100 = sum(model_100.predict(valid_features) == valid_target) / \
len(valid_target) #0.68968117190866007
model_200 = GradientBoostingClassifier(max_depth=6, n_estimators=200).fit(
train_features, train_target)
accuray_200 = sum(model_200.predict(valid_features) == valid_target) / \
len(valid_target) #0.68957345971563977
model_500 = GradientBoostingClassifier(max_depth=6, n_estimators=500).fit(
train_features, train_target)
accuray_500 = sum(model_500.predict(valid_features) == valid_target) / \
len(valid_target) #0.68634209392503231
#simpler coding style
train_errors = [] #[0.33450656922539568, 0.32832692979392242,
#0.28367231790214675, 0.25379510465085042, 0.21497084822268198,
#0.13458179961847438]
valid_errors = []
#[0.33864713485566567, 0.33380008616975443, 0.31635071090047395,
#0.31031882809133993, 0.31042654028436023, 0.31365790607496769]
x = [5, 10, 50, 100, 200, 500]
for i in x:
model = GradientBoostingClassifier(max_depth=6, n_estimators=i).fit(
train_features, train_target)
accuracy = model.score(valid_features, valid_target)
classification_error = 1 - accuracy
valid_errors.append(classification_error)
train_errors.append(1 - model.score(train_features, train_target))
#model_100 has the best accuracy on the validation_data?
#it is not always true that the model with the most trees will perform best
#on test data?
#Plot the training and validation error vs. number of trees
#classification error = 1 - accuracy
from IPython import get_ipython
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
def | make_figure | identifier_name |
|
GradientBoostingClassifier.py | for feat_name, feat_type in zip(loans.columns, loans.dtypes):
if feat_type == object:
categorical_variables.append(feat_name)
for feature in categorical_variables:
loans_one_hot_encoded = pd.get_dummies(loans[feature],prefix=feature)
loans = pd.concat([loans, loans_one_hot_encoded],axis=1)
loans = loans.drop(feature, axis=1)
#Import indices of train valid
train_idx = pd.read_json('module-8-assignment-1-train-idx.json')
valid_idx = pd.read_json('module-8-assignment-1-validation-idx.json')
#Split data into training and validation
train_data = loans.iloc[train_idx.iloc[:,0].values]
valid_data = loans.iloc[valid_idx.iloc[:,0].values]
#Gradient boosted tree classifier
import sklearn
from sklearn.ensemble import GradientBoostingClassifier
train_target = train_data[target].as_matrix()
train_features = train_data.drop(target, axis=1).as_matrix()
model_5 = GradientBoostingClassifier(max_depth=6, n_estimators=5).fit(
train_features, train_target)
#Make predictions
valid_safe_loans = valid_data[valid_data[target] == 1]
valid_risky_loans = valid_data[valid_data[target] == -1]
sample_valid_data_risky = valid_risky_loans[0:2]
sample_valid_data_safe = valid_safe_loans[0:2]
sample_valid_data = sample_valid_data_safe.append(sample_valid_data_risky)
sample_valid_data
#Prediction Classes
sample_predictions = model_5.predict(sample_valid_data.drop(target, axis=1
).as_matrix())
#prediction accuracy
sample_accuracy = sum(sample_predictions == sample_valid_data[target]) / \
len(sample_predictions)
#Prediction Probabilities
sample_predProbas = model_5.predict_proba(sample_valid_data.drop(
target, axis=1).as_matrix())[:,1]
#return the probabilities of being a safe loan
idx_min = np.argmin(sample_predProbas) + 1
#return the loan in sample that is least likely to be a safe loan
#all the predictions with probability >= 0.5, the model predicts: label +1
#Evaluate the model on the validation data
#class predictions
valid_predictions = model_5.predict(valid_data.drop(target, axis=1
).as_matrix())
#calculate prediction accuracy
valid_accuracy = sum(valid_predictions == valid_data[target]) / \
len(valid_predictions) #.6612
#Calculate the number of false positives
valid_fp = sum((valid_predictions == 1)&(valid_data[target] == -1)) #1654
#Calculate the number of false negatives
valid_fn = sum((valid_predictions == -1)&(valid_data[target] == 1)) #1491
#Comparison with decision trees
#the prediction accuracy of the decision trees was around 0.6361
'''
As we explored in the decision tree assignment, we calculated the cost of
the mistakes made by the model. We again consider the same costs as follows:
False negatives: Assume a cost of $10,000 per false negative.
False positives: Assume a cost of $20,000 per false positive.
Assume that the number of false positives and false negatives for
the learned decision tree was:
False negatives: 1936
False positives: 1503
'''
cost_dt = 10000 * 1936 + 20000 * 1503 #49,420,000
cost_gb = 10000 * valid_fn + 20000 * valid_fp #47,990,000
#Most positive & negative loans
#probability predictions for all the loans in validation
valid_predProbas = model_5.predict_proba(valid_data.drop(
target, axis=1).as_matrix())[:,1]
#add probability predictions as a column called predictions into validation
valid_data['predictions'] = valid_predProbas
#Sort the data (in descreasing order) by the probability predictions
valid_data = valid_data.sort_values(by = 'predictions', ascending = False)
#For each row, the probabilities should be a number in the range [0, 1]
#Find the top 5 loans with the highest probability of being a safe loan
print(valid_data.head(5))
#What grades are the top 5 loans?
print(valid_data.head(5)[['grade_A','grade_B', 'grade_C', 'grade_D',
'grade_E', 'grade_F', 'grade_G']])
#find the 5 loans with the lowest probability of being a safe loan
print(valid_data.tail(5)) #last is the least
#valid_data.sort_values(by='predictions', ascending=True).head(5)
print(valid_data.tail(5)[['grade_A','grade_B', 'grade_C', 'grade_D',
'grade_E', 'grade_F', 'grade_G']])
valid_target = valid_data[target].as_matrix()
valid_features = valid_data.drop([target, 'predictions'], axis=1).as_matrix()
#Effects of adding more trees
model_10 = GradientBoostingClassifier(max_depth=6, n_estimators=10).fit(
train_features, train_target)
accuray_10 = sum(model_10.predict(valid_features) == valid_target) / \
len(valid_target) #0.66619991383024557
model_50 = GradientBoostingClassifier(max_depth=6, n_estimators=50).fit(
train_features, train_target)
accuray_50 = sum(model_50.predict(valid_features) == valid_target) / \
len(valid_target) #0.68364928909952605
model_100 = GradientBoostingClassifier(max_depth=6, n_estimators=100).fit(
train_features, train_target)
accuray_100 = sum(model_100.predict(valid_features) == valid_target) / \
len(valid_target) #0.68968117190866007
model_200 = GradientBoostingClassifier(max_depth=6, n_estimators=200).fit(
train_features, train_target)
accuray_200 = sum(model_200.predict(valid_features) == valid_target) / \
len(valid_target) #0.68957345971563977
model_500 = GradientBoostingClassifier(max_depth=6, n_estimators=500).fit(
train_features, train_target)
accuray_500 = sum(model_500.predict(valid_features) == valid_target) / \
len(valid_target) #0.68634209392503231
#simpler coding style
train_errors = [] #[0.33450656922539568, 0.32832692979392242,
#0.28367231790214675, 0.25379510465085042, 0.21497084822268198,
#0.13458179961847438]
valid_errors = []
#[0.33864713485566567, 0.33380008616975443, 0.31635071090047395,
#0.31031882809133993, 0.31042654028436023, 0.31365790607496769]
x = [5, 10, 50, 100, 200, 500]
for i in x:
model = GradientBoostingClassifier(max_depth=6, n_estimators=i).fit(
train_features, train_target)
accuracy = model.score(valid_features, valid_target)
classification_error = 1 - accuracy
valid_errors.append(classification_error)
train_errors.append(1 - model.score(train_features, train_target))
#model_100 has the best accuracy on the validation_data?
#it is not always true that the model with the most trees will perform best
#on test data?
#Plot the training and validation error vs. number of trees
#classification error = 1 - accuracy
from IPython import get_ipython
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
def make_figure(dim, title, xlabel, ylabel, legend):
| plt.rcParams['figure.figsize'] = dim
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if legend is not None:
plt.legend(loc=legend, prop={'size':15})
plt.rcParams.update({'font.size': 16})
plt.tight_layout() | identifier_body |
|
GradientBoostingClassifier.py | #one year or less of employment
'emp_length_num', #number of years of employment
'home_ownership', #home_ownership status: own, mortgage or rent
'dti', #debt to income ratio
'purpose', #the purpose of the loan
'payment_inc_ratio', #ratio of the monthly payment to income
'delinq_2yrs', #number of delinquincies
'delinq_2yrs_zero', #no delinquincies in last 2 years
'inq_last_6mths', #number of creditor inquiries in last 6 months
'last_delinq_none', #has borrower had a delinquincy
'last_major_derog_none',#has borrower had 90 day or worse rating
'open_acc', #number of open credit accounts
'pub_rec', #number of derogatory public records
'pub_rec_zero', #no derogatory public records
'revol_util', #percent of available credit being used
'total_rec_late_fee', #total late fees received to day
'int_rate', #interest rate of the loan
'total_rec_int', #interest received to date
'annual_inc', #annual income of borrower
'funded_amnt', #amount committed to the loan
'funded_amnt_inv', #amount committed by investors for the loan
'installment', #monthly payment owed by the borrower
]
#Skip observations with missing values
loans = loans[[target] + features].dropna()
#Apply one-hot encoding to loans
categorical_variables = []
for feat_name, feat_type in zip(loans.columns, loans.dtypes):
|
for feature in categorical_variables:
loans_one_hot_encoded = pd.get_dummies(loans[feature],prefix=feature)
loans = pd.concat([loans, loans_one_hot_encoded],axis=1)
loans = loans.drop(feature, axis=1)
#Import indices of train valid
train_idx = pd.read_json('module-8-assignment-1-train-idx.json')
valid_idx = pd.read_json('module-8-assignment-1-validation-idx.json')
#Split data into training and validation
train_data = loans.iloc[train_idx.iloc[:,0].values]
valid_data = loans.iloc[valid_idx.iloc[:,0].values]
#Gradient boosted tree classifier
import sklearn
from sklearn.ensemble import GradientBoostingClassifier
train_target = train_data[target].as_matrix()
train_features = train_data.drop(target, axis=1).as_matrix()
model_5 = GradientBoostingClassifier(max_depth=6, n_estimators=5).fit(
train_features, train_target)
#Make predictions
valid_safe_loans = valid_data[valid_data[target] == 1]
valid_risky_loans = valid_data[valid_data[target] == -1]
sample_valid_data_risky = valid_risky_loans[0:2]
sample_valid_data_safe = valid_safe_loans[0:2]
sample_valid_data = sample_valid_data_safe.append(sample_valid_data_risky)
sample_valid_data
#Prediction Classes
sample_predictions = model_5.predict(sample_valid_data.drop(target, axis=1
).as_matrix())
#prediction accuracy
sample_accuracy = sum(sample_predictions == sample_valid_data[target]) / \
len(sample_predictions)
#Prediction Probabilities
sample_predProbas = model_5.predict_proba(sample_valid_data.drop(
target, axis=1).as_matrix())[:,1]
#return the probabilities of being a safe loan
idx_min = np.argmin(sample_predProbas) + 1
#return the loan in sample that is least likely to be a safe loan
#all the predictions with probability >= 0.5, the model predicts: label +1
#Evaluate the model on the validation data
#class predictions
valid_predictions = model_5.predict(valid_data.drop(target, axis=1
).as_matrix())
#calculate prediction accuracy
valid_accuracy = sum(valid_predictions == valid_data[target]) / \
len(valid_predictions) #.6612
#Calculate the number of false positives
valid_fp = sum((valid_predictions == 1)&(valid_data[target] == -1)) #1654
#Calculate the number of false negatives
valid_fn = sum((valid_predictions == -1)&(valid_data[target] == 1)) #1491
#Comparison with decision trees
#the prediction accuracy of the decision trees was around 0.6361
'''
As we explored in the decision tree assignment, we calculated the cost of
the mistakes made by the model. We again consider the same costs as follows:
False negatives: Assume a cost of $10,000 per false negative.
False positives: Assume a cost of $20,000 per false positive.
Assume that the number of false positives and false negatives for
the learned decision tree was:
False negatives: 1936
False positives: 1503
'''
cost_dt = 10000 * 1936 + 20000 * 1503 #49,420,000
cost_gb = 10000 * valid_fn + 20000 * valid_fp #47,990,000
#Most positive & negative loans
#probability predictions for all the loans in validation
valid_predProbas = model_5.predict_proba(valid_data.drop(
target, axis=1).as_matrix())[:,1]
#add probability predictions as a column called predictions into validation
valid_data['predictions'] = valid_predProbas
#Sort the data (in descreasing order) by the probability predictions
valid_data = valid_data.sort_values(by = 'predictions', ascending = False)
#For each row, the probabilities should be a number in the range [0, 1]
#Find the top 5 loans with the highest probability of being a safe loan
print(valid_data.head(5))
#What grades are the top 5 loans?
print(valid_data.head(5)[['grade_A','grade_B', 'grade_C', 'grade_D',
'grade_E', 'grade_F', 'grade_G']])
#find the 5 loans with the lowest probability of being a safe loan
print(valid_data.tail(5)) #last is the least
#valid_data.sort_values(by='predictions', ascending=True).head(5)
print(valid_data.tail(5)[['grade_A','grade_B', 'grade_C', 'grade_D',
'grade_E', 'grade_F', 'grade_G']])
valid_target = valid_data[target].as_matrix()
valid_features = valid_data.drop([target, 'predictions'], axis=1).as_matrix()
#Effects of adding more trees
model_10 = GradientBoostingClassifier(max_depth=6, n_estimators=10).fit(
train_features, train_target)
accuray_10 = sum(model_10.predict(valid_features) == valid_target) / \
len(valid_target) #0.66619991383024557
model_50 = GradientBoostingClassifier(max_depth=6, n_estimators=50).fit(
train_features, train_target)
accuray_50 = sum(model_50.predict(valid_features) == valid_target) / \
len(valid_target) #0.68364928909952605
model_100 = GradientBoostingClassifier(max_depth=6, n_estimators=100).fit(
train_features, train_target)
accuray_100 = sum(model_100.predict(valid_features) == valid_target) / \
len(valid_target) #0.68968117190866007
model_200 = GradientBoostingClassifier(max_depth=6, n_estimators=200).fit(
train_features, train_target)
accuray_200 = sum(model_200.predict(valid_features) == valid_target) / \
len(valid_target) #0.68957345971563977
model_500 = GradientBoostingClassifier(max_depth=6, n_estimators=500).fit(
train_features, train_target)
accuray_500 = sum(model_500.predict(valid_features) == valid_target) / \
len(valid_target) #0.68634209392503231
#simpler coding style
train_errors = [] #[0.33450656922539568, 0.32832692979392242,
#0.28367231790214675, 0.25379510465085042, 0.21497084822268198,
#0.13458179961847438]
valid_errors = []
#[0.33864713485566567, | if feat_type == object:
categorical_variables.append(feat_name) | conditional_block |
GradientBoostingClassifier.py | #interest rate of the loan
'total_rec_int', #interest received to date
'annual_inc', #annual income of borrower
'funded_amnt', #amount committed to the loan
'funded_amnt_inv', #amount committed by investors for the loan
'installment', #monthly payment owed by the borrower
]
#Skip observations with missing values
loans = loans[[target] + features].dropna()
#Apply one-hot encoding to loans
categorical_variables = []
for feat_name, feat_type in zip(loans.columns, loans.dtypes):
if feat_type == object:
categorical_variables.append(feat_name)
for feature in categorical_variables:
loans_one_hot_encoded = pd.get_dummies(loans[feature],prefix=feature)
loans = pd.concat([loans, loans_one_hot_encoded],axis=1)
loans = loans.drop(feature, axis=1)
#Import indices of train valid
train_idx = pd.read_json('module-8-assignment-1-train-idx.json')
valid_idx = pd.read_json('module-8-assignment-1-validation-idx.json')
#Split data into training and validation
train_data = loans.iloc[train_idx.iloc[:,0].values]
valid_data = loans.iloc[valid_idx.iloc[:,0].values]
#Gradient boosted tree classifier
import sklearn
from sklearn.ensemble import GradientBoostingClassifier
train_target = train_data[target].as_matrix()
train_features = train_data.drop(target, axis=1).as_matrix()
model_5 = GradientBoostingClassifier(max_depth=6, n_estimators=5).fit(
train_features, train_target)
#Make predictions
valid_safe_loans = valid_data[valid_data[target] == 1]
valid_risky_loans = valid_data[valid_data[target] == -1]
sample_valid_data_risky = valid_risky_loans[0:2]
sample_valid_data_safe = valid_safe_loans[0:2]
sample_valid_data = sample_valid_data_safe.append(sample_valid_data_risky)
sample_valid_data
#Prediction Classes
sample_predictions = model_5.predict(sample_valid_data.drop(target, axis=1
).as_matrix())
#prediction accuracy
sample_accuracy = sum(sample_predictions == sample_valid_data[target]) / \
len(sample_predictions)
#Prediction Probabilities
sample_predProbas = model_5.predict_proba(sample_valid_data.drop(
target, axis=1).as_matrix())[:,1]
#return the probabilities of being a safe loan
idx_min = np.argmin(sample_predProbas) + 1
#return the loan in sample that is least likely to be a safe loan
#all the predictions with probability >= 0.5, the model predicts: label +1
#Evaluate the model on the validation data
#class predictions
valid_predictions = model_5.predict(valid_data.drop(target, axis=1
).as_matrix())
#calculate prediction accuracy
valid_accuracy = sum(valid_predictions == valid_data[target]) / \
len(valid_predictions) #.6612
#Calculate the number of false positives
valid_fp = sum((valid_predictions == 1)&(valid_data[target] == -1)) #1654
#Calculate the number of false negatives
valid_fn = sum((valid_predictions == -1)&(valid_data[target] == 1)) #1491
#Comparison with decision trees
#the prediction accuracy of the decision trees was around 0.6361
'''
As we explored in the decision tree assignment, we calculated the cost of
the mistakes made by the model. We again consider the same costs as follows:
False negatives: Assume a cost of $10,000 per false negative.
False positives: Assume a cost of $20,000 per false positive.
Assume that the number of false positives and false negatives for
the learned decision tree was:
False negatives: 1936
False positives: 1503
'''
cost_dt = 10000 * 1936 + 20000 * 1503 #49,420,000
cost_gb = 10000 * valid_fn + 20000 * valid_fp #47,990,000
#Most positive & negative loans
#probability predictions for all the loans in validation
valid_predProbas = model_5.predict_proba(valid_data.drop(
target, axis=1).as_matrix())[:,1]
#add probability predictions as a column called predictions into validation
valid_data['predictions'] = valid_predProbas
#Sort the data (in descreasing order) by the probability predictions
valid_data = valid_data.sort_values(by = 'predictions', ascending = False)
#For each row, the probabilities should be a number in the range [0, 1]
#Find the top 5 loans with the highest probability of being a safe loan
print(valid_data.head(5))
#What grades are the top 5 loans?
print(valid_data.head(5)[['grade_A','grade_B', 'grade_C', 'grade_D',
'grade_E', 'grade_F', 'grade_G']])
#find the 5 loans with the lowest probability of being a safe loan
print(valid_data.tail(5)) #last is the least
#valid_data.sort_values(by='predictions', ascending=True).head(5)
print(valid_data.tail(5)[['grade_A','grade_B', 'grade_C', 'grade_D',
'grade_E', 'grade_F', 'grade_G']])
valid_target = valid_data[target].as_matrix()
valid_features = valid_data.drop([target, 'predictions'], axis=1).as_matrix()
#Effects of adding more trees
model_10 = GradientBoostingClassifier(max_depth=6, n_estimators=10).fit(
train_features, train_target)
accuray_10 = sum(model_10.predict(valid_features) == valid_target) / \
len(valid_target) #0.66619991383024557
model_50 = GradientBoostingClassifier(max_depth=6, n_estimators=50).fit(
train_features, train_target)
accuray_50 = sum(model_50.predict(valid_features) == valid_target) / \
len(valid_target) #0.68364928909952605
model_100 = GradientBoostingClassifier(max_depth=6, n_estimators=100).fit(
train_features, train_target)
accuray_100 = sum(model_100.predict(valid_features) == valid_target) / \
len(valid_target) #0.68968117190866007
model_200 = GradientBoostingClassifier(max_depth=6, n_estimators=200).fit(
train_features, train_target)
accuray_200 = sum(model_200.predict(valid_features) == valid_target) / \
len(valid_target) #0.68957345971563977
model_500 = GradientBoostingClassifier(max_depth=6, n_estimators=500).fit(
train_features, train_target)
accuray_500 = sum(model_500.predict(valid_features) == valid_target) / \
len(valid_target) #0.68634209392503231
#simpler coding style
train_errors = [] #[0.33450656922539568, 0.32832692979392242,
#0.28367231790214675, 0.25379510465085042, 0.21497084822268198,
#0.13458179961847438]
valid_errors = []
#[0.33864713485566567, 0.33380008616975443, 0.31635071090047395,
#0.31031882809133993, 0.31042654028436023, 0.31365790607496769]
x = [5, 10, 50, 100, 200, 500]
for i in x:
model = GradientBoostingClassifier(max_depth=6, n_estimators=i).fit(
train_features, train_target)
accuracy = model.score(valid_features, valid_target)
classification_error = 1 - accuracy
valid_errors.append(classification_error)
train_errors.append(1 - model.score(train_features, train_target))
| #model_100 has the best accuracy on the validation_data?
#it is not always true that the model with the most trees will perform best
#on test data?
#Plot the training and validation error vs. number of trees
| random_line_split |
|
filterset.py | filter_name):
"""
Return a related filter_name, using the filterset relationship if present.
"""
if not filterset.relationship:
return filter_name
return LOOKUP_SEP.join([filterset.relationship, filter_name])
class FilterSetMetaclass(filterset.FilterSetMetaclass):
def __new__(cls, name, bases, attrs):
new_class = super(FilterSetMetaclass, cls).__new__(cls, name, bases, attrs)
new_class.auto_filters = [
name for name, f in new_class.declared_filters.items()
if isinstance(f, filters.AutoFilter)]
new_class.related_filters = [
name for name, f in new_class.declared_filters.items()
if isinstance(f, filters.RelatedFilter)]
# see: :meth:`rest_framework_filters.filters.RelatedFilter.bind`
for name in new_class.related_filters:
new_class.declared_filters[name].bind(new_class)
# If model is defined, process auto filters
if new_class._meta.model is not None:
cls.expand_auto_filters(new_class)
return new_class
@classmethod
def expand_auto_filters(cls, new_class):
"""
Resolve `AutoFilter`s into their per-lookup filters. `AutoFilter`s are
a declarative alternative to the `Meta.fields` dictionary syntax, and
use the same machinery internally.
"""
# get reference to opts/declared filters
orig_meta, orig_declared = new_class._meta, new_class.declared_filters
# override opts/declared filters w/ copies
new_class._meta = copy.deepcopy(new_class._meta)
new_class.declared_filters = new_class.declared_filters.copy()
for name in new_class.auto_filters:
f = new_class.declared_filters[name]
# Remove auto filters from declared_filters so that they *are* overwritten
# RelatedFilter is an exception, and should *not* be overwritten
if not isinstance(f, filters.RelatedFilter):
del new_class.declared_filters[name]
# Use meta.fields to generate auto filters
new_class._meta.fields = {f.field_name: f.lookups or []}
for gen_name, gen_f in new_class.get_filters().items():
# get_filters() generates param names from the model field name
# Replace the field name with the parameter name from the filerset
gen_name = gen_name.replace(f.field_name, name, 1)
new_class.base_filters[gen_name] = gen_f
# restore reference to opts/declared filters
new_class._meta, new_class.declared_filters = orig_meta, orig_declared
class SubsetDisabledMixin:
"""
Used to disable filter subsetting (see: :meth:`FilterSet.disable_subset`).
"""
@classmethod
def get_filter_subset(cls, params, rel=None):
return cls.base_filters
class FilterSet(rest_framework.FilterSet, metaclass=FilterSetMetaclass):
def __init__(self, data=None, queryset=None, *, relationship=None, **kwargs):
|
@classmethod
def get_fields(cls):
fields = super(FilterSet, cls).get_fields()
for name, lookups in fields.items():
if lookups == filters.ALL_LOOKUPS:
field = get_model_field(cls._meta.model, name)
fields[name] = utils.lookups_for_field(field)
return fields
@classmethod
def get_filter_subset(cls, params, rel=None):
"""
Returns the subset of filters that should be initialized by the
FilterSet, dependent on the requested `params`. This helps minimize
the cost of initialization by reducing the number of deepcopy ops.
The `rel` argument is used for related filtersets to strip the param
of its relationship prefix. See `.get_param_filter_name()` for info.
"""
# Determine names of filters from query params and remove empty values.
# param names that traverse relations are translated to just the local
# filter names. eg, `author__username` => `author`. Empty values are
# removed, as they indicate an unknown field eg, author__foobar__isnull
filter_names = {cls.get_param_filter_name(param, rel) for param in params}
filter_names = {f for f in filter_names if f is not None}
return OrderedDict(
(k, v) for k, v in cls.base_filters.items() if k in filter_names
)
@classmethod
def disable_subset(cls, *, depth=0):
"""
Disable filter subsetting, allowing the form to render the filterset.
Note that this decreases performance and should only be used when
rendering a form, such as with DRF's browsable API.
"""
if not issubclass(cls, SubsetDisabledMixin):
cls = type('SubsetDisabled%s' % cls.__name__,
(SubsetDisabledMixin, cls), {})
# recursively disable subset for related filtersets
if depth > 0:
# shallow copy to prevent modifying original `base_filters`
cls.base_filters = cls.base_filters.copy()
# deepcopy RelateFilter to prevent modifying original `.filterset`
for name in cls.related_filters:
f = copy.deepcopy(cls.base_filters[name])
f.filterset = f.filterset.disable_subset(depth=depth - 1)
cls.base_filters[name] = f
return cls
@classmethod
def get_param_filter_name(cls, param, rel=None):
"""
Get the filter name for the request data parameter.
ex::
# regular attribute filters
>>> FilterSet.get_param_filter_name('email')
'email'
# exclusion filters
>>> FilterSet.get_param_filter_name('email!')
'email'
# related filters
>>> FilterSet.get_param_filter_name('author__email')
'author'
# attribute filters based on relationship
>>> FilterSet.get_param_filter_name('author__email', rel='author')
'email'
"""
# check for empty param
if not param:
return param
# strip the rel prefix from the param name.
prefix = '%s%s' % (rel or '', LOOKUP_SEP)
if rel and param.startswith(prefix):
param = param[len(prefix):]
# Attempt to match against filters with lookups first. (username__endswith)
if param in cls.base_filters:
return param
# Attempt to match against exclusion filters
if param[-1] == '!' and param[:-1] in cls.base_filters:
return param[:-1]
# Match against relationships. (author__username__endswith).
# Preference more specific filters. eg, `note__author` over `note`.
for name in reversed(sorted(cls.related_filters)):
# we need to match against '__' to prevent eager matching against
# like names. eg, note vs note2. Exact matches are handled above.
if param.startswith("%s%s" % (name, LOOKUP_SEP)):
return name
def get_request_filters(self):
"""
Build a set of filters based on the request data. This currently
includes only filter exclusion/negation.
"""
# build the compiled set of all filters
requested_filters = OrderedDict()
for filter_name, f in self.filters.items():
requested_filters[filter_name] = f
# exclusion params
exclude_name = '%s!' % filter_name
if related(self, exclude_name) in self.data:
# deepcopy the *base* filter to prevent copying of model & parent
f_copy = copy.deepcopy(self.base_filters[filter_name])
f_copy.parent = f.parent
f_copy.model = f.model
f_copy.exclude = not f.exclude
requested_filters[exclude_name] = f_copy
return requested_filters
def get_related_filtersets(self):
"""
Get the related filterset instances for all related filters.
"""
related_filtersets = OrderedDict()
for related_name in self.related_filters:
if related_name not in self.filters:
continue
f = self.filters[related_name]
related_filtersets[related_name] = f.filterset(
data=self.data,
queryset=f.get_queryset(self.request),
relationship=related(self, related_name),
request=self.request,
prefix=self.form_prefix,
)
return related_filtersets
def filter_queryset(self, queryset):
queryset = super(FilterSet, self).filter_queryset(queryset)
queryset = self.filter_related_filtersets(queryset)
return queryset
def filter_related_filtersets(self, queryset):
"""
Filter the provided `queryset` by the `related_filtersets`. It is
recommended that you override this method to change the filtering
behavior across relationships.
"""
for related_name, related_filterset in self.related_filtersets.items():
# Related filtersets should only be applied if they had data.
prefix = '%s%s' % (related(self, related_name), LOOKUP_SEP)
if not any(value.startswith(prefix) for value in self.data):
continue
field_name = self.filters[related_name].field_name
lookup_expr = LOOKUP_SEP.join([field_name, 'in'])
subquery = Subquery(related_filterset.qs.values('pk'))
queryset = queryset.filter(**{lookup_expr: subquery})
return queryset
def get_form_class(self):
class Form(super(FilterSet | self.base_filters = self.get_filter_subset(data or {}, relationship)
super().__init__(data, queryset, **kwargs)
self.relationship = relationship
self.related_filtersets = self.get_related_filtersets()
self.filters = self.get_request_filters() | identifier_body |
filterset.py | .AutoFilter)]
new_class.related_filters = [
name for name, f in new_class.declared_filters.items()
if isinstance(f, filters.RelatedFilter)]
# see: :meth:`rest_framework_filters.filters.RelatedFilter.bind`
for name in new_class.related_filters:
new_class.declared_filters[name].bind(new_class)
# If model is defined, process auto filters
if new_class._meta.model is not None:
cls.expand_auto_filters(new_class)
return new_class
@classmethod
def expand_auto_filters(cls, new_class):
"""
Resolve `AutoFilter`s into their per-lookup filters. `AutoFilter`s are
a declarative alternative to the `Meta.fields` dictionary syntax, and
use the same machinery internally.
"""
# get reference to opts/declared filters
orig_meta, orig_declared = new_class._meta, new_class.declared_filters
# override opts/declared filters w/ copies
new_class._meta = copy.deepcopy(new_class._meta)
new_class.declared_filters = new_class.declared_filters.copy()
for name in new_class.auto_filters:
f = new_class.declared_filters[name]
# Remove auto filters from declared_filters so that they *are* overwritten
# RelatedFilter is an exception, and should *not* be overwritten
if not isinstance(f, filters.RelatedFilter):
del new_class.declared_filters[name]
# Use meta.fields to generate auto filters
new_class._meta.fields = {f.field_name: f.lookups or []}
for gen_name, gen_f in new_class.get_filters().items():
# get_filters() generates param names from the model field name
# Replace the field name with the parameter name from the filerset
gen_name = gen_name.replace(f.field_name, name, 1)
new_class.base_filters[gen_name] = gen_f
# restore reference to opts/declared filters
new_class._meta, new_class.declared_filters = orig_meta, orig_declared
class SubsetDisabledMixin:
"""
Used to disable filter subsetting (see: :meth:`FilterSet.disable_subset`).
"""
@classmethod
def get_filter_subset(cls, params, rel=None):
return cls.base_filters
class FilterSet(rest_framework.FilterSet, metaclass=FilterSetMetaclass):
def __init__(self, data=None, queryset=None, *, relationship=None, **kwargs):
self.base_filters = self.get_filter_subset(data or {}, relationship)
super().__init__(data, queryset, **kwargs)
self.relationship = relationship
self.related_filtersets = self.get_related_filtersets()
self.filters = self.get_request_filters()
@classmethod
def get_fields(cls):
fields = super(FilterSet, cls).get_fields()
for name, lookups in fields.items():
if lookups == filters.ALL_LOOKUPS:
field = get_model_field(cls._meta.model, name)
fields[name] = utils.lookups_for_field(field)
return fields
@classmethod
def get_filter_subset(cls, params, rel=None):
"""
Returns the subset of filters that should be initialized by the
FilterSet, dependent on the requested `params`. This helps minimize
the cost of initialization by reducing the number of deepcopy ops.
The `rel` argument is used for related filtersets to strip the param
of its relationship prefix. See `.get_param_filter_name()` for info.
"""
# Determine names of filters from query params and remove empty values.
# param names that traverse relations are translated to just the local
# filter names. eg, `author__username` => `author`. Empty values are
# removed, as they indicate an unknown field eg, author__foobar__isnull
filter_names = {cls.get_param_filter_name(param, rel) for param in params}
filter_names = {f for f in filter_names if f is not None}
return OrderedDict(
(k, v) for k, v in cls.base_filters.items() if k in filter_names
)
@classmethod
def disable_subset(cls, *, depth=0):
"""
Disable filter subsetting, allowing the form to render the filterset.
Note that this decreases performance and should only be used when
rendering a form, such as with DRF's browsable API.
"""
if not issubclass(cls, SubsetDisabledMixin):
cls = type('SubsetDisabled%s' % cls.__name__,
(SubsetDisabledMixin, cls), {})
# recursively disable subset for related filtersets
if depth > 0:
# shallow copy to prevent modifying original `base_filters`
cls.base_filters = cls.base_filters.copy()
# deepcopy RelateFilter to prevent modifying original `.filterset`
for name in cls.related_filters:
f = copy.deepcopy(cls.base_filters[name])
f.filterset = f.filterset.disable_subset(depth=depth - 1)
cls.base_filters[name] = f
return cls
@classmethod
def get_param_filter_name(cls, param, rel=None):
"""
Get the filter name for the request data parameter.
ex::
# regular attribute filters
>>> FilterSet.get_param_filter_name('email')
'email'
# exclusion filters
>>> FilterSet.get_param_filter_name('email!')
'email'
# related filters
>>> FilterSet.get_param_filter_name('author__email')
'author'
# attribute filters based on relationship
>>> FilterSet.get_param_filter_name('author__email', rel='author')
'email'
"""
# check for empty param
if not param:
return param
# strip the rel prefix from the param name.
prefix = '%s%s' % (rel or '', LOOKUP_SEP)
if rel and param.startswith(prefix):
param = param[len(prefix):]
# Attempt to match against filters with lookups first. (username__endswith)
if param in cls.base_filters:
return param
# Attempt to match against exclusion filters
if param[-1] == '!' and param[:-1] in cls.base_filters:
return param[:-1]
# Match against relationships. (author__username__endswith).
# Preference more specific filters. eg, `note__author` over `note`.
for name in reversed(sorted(cls.related_filters)):
# we need to match against '__' to prevent eager matching against
# like names. eg, note vs note2. Exact matches are handled above.
if param.startswith("%s%s" % (name, LOOKUP_SEP)):
return name
def get_request_filters(self):
"""
Build a set of filters based on the request data. This currently
includes only filter exclusion/negation.
"""
# build the compiled set of all filters
requested_filters = OrderedDict()
for filter_name, f in self.filters.items():
requested_filters[filter_name] = f
# exclusion params
exclude_name = '%s!' % filter_name
if related(self, exclude_name) in self.data:
# deepcopy the *base* filter to prevent copying of model & parent
f_copy = copy.deepcopy(self.base_filters[filter_name])
f_copy.parent = f.parent
f_copy.model = f.model
f_copy.exclude = not f.exclude
requested_filters[exclude_name] = f_copy
return requested_filters
def get_related_filtersets(self):
"""
Get the related filterset instances for all related filters.
"""
related_filtersets = OrderedDict()
for related_name in self.related_filters:
if related_name not in self.filters:
continue
f = self.filters[related_name]
related_filtersets[related_name] = f.filterset(
data=self.data,
queryset=f.get_queryset(self.request),
relationship=related(self, related_name),
request=self.request,
prefix=self.form_prefix,
)
return related_filtersets
def filter_queryset(self, queryset):
queryset = super(FilterSet, self).filter_queryset(queryset)
queryset = self.filter_related_filtersets(queryset)
return queryset
def filter_related_filtersets(self, queryset):
"""
Filter the provided `queryset` by the `related_filtersets`. It is
recommended that you override this method to change the filtering
behavior across relationships.
"""
for related_name, related_filterset in self.related_filtersets.items():
# Related filtersets should only be applied if they had data.
prefix = '%s%s' % (related(self, related_name), LOOKUP_SEP)
if not any(value.startswith(prefix) for value in self.data):
continue
field_name = self.filters[related_name].field_name
lookup_expr = LOOKUP_SEP.join([field_name, 'in'])
subquery = Subquery(related_filterset.qs.values('pk'))
queryset = queryset.filter(**{lookup_expr: subquery})
return queryset
def get_form_class(self):
class Form(super(FilterSet, self).get_form_class()):
def add_prefix(form, field_name):
field_name = related(self, field_name)
return super(Form, form).add_prefix(field_name)
def clean(form):
cleaned_data = super(Form, form).clean()
# when prefixing the errors, use the related filter name,
# which is relative to the parent filterset, not the root.
for related_filterset in self.related_filtersets.values():
| for key, error in related_filterset.form.errors.items():
self.form.errors[related(related_filterset, key)] = error | conditional_block |
|
filterset.py | filter_name):
"""
Return a related filter_name, using the filterset relationship if present.
"""
if not filterset.relationship:
return filter_name
return LOOKUP_SEP.join([filterset.relationship, filter_name])
class FilterSetMetaclass(filterset.FilterSetMetaclass):
def __new__(cls, name, bases, attrs):
new_class = super(FilterSetMetaclass, cls).__new__(cls, name, bases, attrs)
new_class.auto_filters = [
name for name, f in new_class.declared_filters.items()
if isinstance(f, filters.AutoFilter)]
new_class.related_filters = [
name for name, f in new_class.declared_filters.items()
if isinstance(f, filters.RelatedFilter)]
# see: :meth:`rest_framework_filters.filters.RelatedFilter.bind`
for name in new_class.related_filters:
new_class.declared_filters[name].bind(new_class)
# If model is defined, process auto filters
if new_class._meta.model is not None:
cls.expand_auto_filters(new_class)
return new_class
@classmethod
def expand_auto_filters(cls, new_class):
"""
Resolve `AutoFilter`s into their per-lookup filters. `AutoFilter`s are
a declarative alternative to the `Meta.fields` dictionary syntax, and
use the same machinery internally.
"""
# get reference to opts/declared filters
orig_meta, orig_declared = new_class._meta, new_class.declared_filters
# override opts/declared filters w/ copies
new_class._meta = copy.deepcopy(new_class._meta)
new_class.declared_filters = new_class.declared_filters.copy()
for name in new_class.auto_filters:
f = new_class.declared_filters[name]
# Remove auto filters from declared_filters so that they *are* overwritten
# RelatedFilter is an exception, and should *not* be overwritten
if not isinstance(f, filters.RelatedFilter):
del new_class.declared_filters[name]
# Use meta.fields to generate auto filters
new_class._meta.fields = {f.field_name: f.lookups or []}
for gen_name, gen_f in new_class.get_filters().items():
# get_filters() generates param names from the model field name
# Replace the field name with the parameter name from the filerset
gen_name = gen_name.replace(f.field_name, name, 1)
new_class.base_filters[gen_name] = gen_f
# restore reference to opts/declared filters
new_class._meta, new_class.declared_filters = orig_meta, orig_declared
class SubsetDisabledMixin:
"""
Used to disable filter subsetting (see: :meth:`FilterSet.disable_subset`).
"""
@classmethod
def get_filter_subset(cls, params, rel=None):
return cls.base_filters
class FilterSet(rest_framework.FilterSet, metaclass=FilterSetMetaclass):
def __init__(self, data=None, queryset=None, *, relationship=None, **kwargs):
self.base_filters = self.get_filter_subset(data or {}, relationship)
super().__init__(data, queryset, **kwargs)
self.relationship = relationship
self.related_filtersets = self.get_related_filtersets()
self.filters = self.get_request_filters()
@classmethod
def get_fields(cls):
fields = super(FilterSet, cls).get_fields()
for name, lookups in fields.items():
if lookups == filters.ALL_LOOKUPS:
field = get_model_field(cls._meta.model, name)
fields[name] = utils.lookups_for_field(field)
return fields
@classmethod
def | (cls, params, rel=None):
"""
Returns the subset of filters that should be initialized by the
FilterSet, dependent on the requested `params`. This helps minimize
the cost of initialization by reducing the number of deepcopy ops.
The `rel` argument is used for related filtersets to strip the param
of its relationship prefix. See `.get_param_filter_name()` for info.
"""
# Determine names of filters from query params and remove empty values.
# param names that traverse relations are translated to just the local
# filter names. eg, `author__username` => `author`. Empty values are
# removed, as they indicate an unknown field eg, author__foobar__isnull
filter_names = {cls.get_param_filter_name(param, rel) for param in params}
filter_names = {f for f in filter_names if f is not None}
return OrderedDict(
(k, v) for k, v in cls.base_filters.items() if k in filter_names
)
@classmethod
def disable_subset(cls, *, depth=0):
"""
Disable filter subsetting, allowing the form to render the filterset.
Note that this decreases performance and should only be used when
rendering a form, such as with DRF's browsable API.
"""
if not issubclass(cls, SubsetDisabledMixin):
cls = type('SubsetDisabled%s' % cls.__name__,
(SubsetDisabledMixin, cls), {})
# recursively disable subset for related filtersets
if depth > 0:
# shallow copy to prevent modifying original `base_filters`
cls.base_filters = cls.base_filters.copy()
# deepcopy RelateFilter to prevent modifying original `.filterset`
for name in cls.related_filters:
f = copy.deepcopy(cls.base_filters[name])
f.filterset = f.filterset.disable_subset(depth=depth - 1)
cls.base_filters[name] = f
return cls
@classmethod
def get_param_filter_name(cls, param, rel=None):
"""
Get the filter name for the request data parameter.
ex::
# regular attribute filters
>>> FilterSet.get_param_filter_name('email')
'email'
# exclusion filters
>>> FilterSet.get_param_filter_name('email!')
'email'
# related filters
>>> FilterSet.get_param_filter_name('author__email')
'author'
# attribute filters based on relationship
>>> FilterSet.get_param_filter_name('author__email', rel='author')
'email'
"""
# check for empty param
if not param:
return param
# strip the rel prefix from the param name.
prefix = '%s%s' % (rel or '', LOOKUP_SEP)
if rel and param.startswith(prefix):
param = param[len(prefix):]
# Attempt to match against filters with lookups first. (username__endswith)
if param in cls.base_filters:
return param
# Attempt to match against exclusion filters
if param[-1] == '!' and param[:-1] in cls.base_filters:
return param[:-1]
# Match against relationships. (author__username__endswith).
# Preference more specific filters. eg, `note__author` over `note`.
for name in reversed(sorted(cls.related_filters)):
# we need to match against '__' to prevent eager matching against
# like names. eg, note vs note2. Exact matches are handled above.
if param.startswith("%s%s" % (name, LOOKUP_SEP)):
return name
def get_request_filters(self):
"""
Build a set of filters based on the request data. This currently
includes only filter exclusion/negation.
"""
# build the compiled set of all filters
requested_filters = OrderedDict()
for filter_name, f in self.filters.items():
requested_filters[filter_name] = f
# exclusion params
exclude_name = '%s!' % filter_name
if related(self, exclude_name) in self.data:
# deepcopy the *base* filter to prevent copying of model & parent
f_copy = copy.deepcopy(self.base_filters[filter_name])
f_copy.parent = f.parent
f_copy.model = f.model
f_copy.exclude = not f.exclude
requested_filters[exclude_name] = f_copy
return requested_filters
def get_related_filtersets(self):
"""
Get the related filterset instances for all related filters.
"""
related_filtersets = OrderedDict()
for related_name in self.related_filters:
if related_name not in self.filters:
continue
f = self.filters[related_name]
related_filtersets[related_name] = f.filterset(
data=self.data,
queryset=f.get_queryset(self.request),
relationship=related(self, related_name),
request=self.request,
prefix=self.form_prefix,
)
return related_filtersets
def filter_queryset(self, queryset):
queryset = super(FilterSet, self).filter_queryset(queryset)
queryset = self.filter_related_filtersets(queryset)
return queryset
def filter_related_filtersets(self, queryset):
"""
Filter the provided `queryset` by the `related_filtersets`. It is
recommended that you override this method to change the filtering
behavior across relationships.
"""
for related_name, related_filterset in self.related_filtersets.items():
# Related filtersets should only be applied if they had data.
prefix = '%s%s' % (related(self, related_name), LOOKUP_SEP)
if not any(value.startswith(prefix) for value in self.data):
continue
field_name = self.filters[related_name].field_name
lookup_expr = LOOKUP_SEP.join([field_name, 'in'])
subquery = Subquery(related_filterset.qs.values('pk'))
queryset = queryset.filter(**{lookup_expr: subquery})
return queryset
def get_form_class(self):
class Form(super(FilterSet | get_filter_subset | identifier_name |
filterset.py | et, filter_name):
"""
Return a related filter_name, using the filterset relationship if present.
"""
if not filterset.relationship:
return filter_name
return LOOKUP_SEP.join([filterset.relationship, filter_name])
class FilterSetMetaclass(filterset.FilterSetMetaclass):
def __new__(cls, name, bases, attrs):
new_class = super(FilterSetMetaclass, cls).__new__(cls, name, bases, attrs)
new_class.auto_filters = [
name for name, f in new_class.declared_filters.items()
if isinstance(f, filters.AutoFilter)]
new_class.related_filters = [
name for name, f in new_class.declared_filters.items()
if isinstance(f, filters.RelatedFilter)]
# see: :meth:`rest_framework_filters.filters.RelatedFilter.bind`
for name in new_class.related_filters:
new_class.declared_filters[name].bind(new_class)
# If model is defined, process auto filters
if new_class._meta.model is not None:
cls.expand_auto_filters(new_class)
return new_class
@classmethod
def expand_auto_filters(cls, new_class):
"""
Resolve `AutoFilter`s into their per-lookup filters. `AutoFilter`s are
a declarative alternative to the `Meta.fields` dictionary syntax, and
use the same machinery internally.
"""
# get reference to opts/declared filters | new_class._meta = copy.deepcopy(new_class._meta)
new_class.declared_filters = new_class.declared_filters.copy()
for name in new_class.auto_filters:
f = new_class.declared_filters[name]
# Remove auto filters from declared_filters so that they *are* overwritten
# RelatedFilter is an exception, and should *not* be overwritten
if not isinstance(f, filters.RelatedFilter):
del new_class.declared_filters[name]
# Use meta.fields to generate auto filters
new_class._meta.fields = {f.field_name: f.lookups or []}
for gen_name, gen_f in new_class.get_filters().items():
# get_filters() generates param names from the model field name
# Replace the field name with the parameter name from the filerset
gen_name = gen_name.replace(f.field_name, name, 1)
new_class.base_filters[gen_name] = gen_f
# restore reference to opts/declared filters
new_class._meta, new_class.declared_filters = orig_meta, orig_declared
class SubsetDisabledMixin:
"""
Used to disable filter subsetting (see: :meth:`FilterSet.disable_subset`).
"""
@classmethod
def get_filter_subset(cls, params, rel=None):
return cls.base_filters
class FilterSet(rest_framework.FilterSet, metaclass=FilterSetMetaclass):
def __init__(self, data=None, queryset=None, *, relationship=None, **kwargs):
self.base_filters = self.get_filter_subset(data or {}, relationship)
super().__init__(data, queryset, **kwargs)
self.relationship = relationship
self.related_filtersets = self.get_related_filtersets()
self.filters = self.get_request_filters()
@classmethod
def get_fields(cls):
fields = super(FilterSet, cls).get_fields()
for name, lookups in fields.items():
if lookups == filters.ALL_LOOKUPS:
field = get_model_field(cls._meta.model, name)
fields[name] = utils.lookups_for_field(field)
return fields
@classmethod
def get_filter_subset(cls, params, rel=None):
"""
Returns the subset of filters that should be initialized by the
FilterSet, dependent on the requested `params`. This helps minimize
the cost of initialization by reducing the number of deepcopy ops.
The `rel` argument is used for related filtersets to strip the param
of its relationship prefix. See `.get_param_filter_name()` for info.
"""
# Determine names of filters from query params and remove empty values.
# param names that traverse relations are translated to just the local
# filter names. eg, `author__username` => `author`. Empty values are
# removed, as they indicate an unknown field eg, author__foobar__isnull
filter_names = {cls.get_param_filter_name(param, rel) for param in params}
filter_names = {f for f in filter_names if f is not None}
return OrderedDict(
(k, v) for k, v in cls.base_filters.items() if k in filter_names
)
@classmethod
def disable_subset(cls, *, depth=0):
"""
Disable filter subsetting, allowing the form to render the filterset.
Note that this decreases performance and should only be used when
rendering a form, such as with DRF's browsable API.
"""
if not issubclass(cls, SubsetDisabledMixin):
cls = type('SubsetDisabled%s' % cls.__name__,
(SubsetDisabledMixin, cls), {})
# recursively disable subset for related filtersets
if depth > 0:
# shallow copy to prevent modifying original `base_filters`
cls.base_filters = cls.base_filters.copy()
# deepcopy RelateFilter to prevent modifying original `.filterset`
for name in cls.related_filters:
f = copy.deepcopy(cls.base_filters[name])
f.filterset = f.filterset.disable_subset(depth=depth - 1)
cls.base_filters[name] = f
return cls
@classmethod
def get_param_filter_name(cls, param, rel=None):
"""
Get the filter name for the request data parameter.
ex::
# regular attribute filters
>>> FilterSet.get_param_filter_name('email')
'email'
# exclusion filters
>>> FilterSet.get_param_filter_name('email!')
'email'
# related filters
>>> FilterSet.get_param_filter_name('author__email')
'author'
# attribute filters based on relationship
>>> FilterSet.get_param_filter_name('author__email', rel='author')
'email'
"""
# check for empty param
if not param:
return param
# strip the rel prefix from the param name.
prefix = '%s%s' % (rel or '', LOOKUP_SEP)
if rel and param.startswith(prefix):
param = param[len(prefix):]
# Attempt to match against filters with lookups first. (username__endswith)
if param in cls.base_filters:
return param
# Attempt to match against exclusion filters
if param[-1] == '!' and param[:-1] in cls.base_filters:
return param[:-1]
# Match against relationships. (author__username__endswith).
# Preference more specific filters. eg, `note__author` over `note`.
for name in reversed(sorted(cls.related_filters)):
# we need to match against '__' to prevent eager matching against
# like names. eg, note vs note2. Exact matches are handled above.
if param.startswith("%s%s" % (name, LOOKUP_SEP)):
return name
def get_request_filters(self):
"""
Build a set of filters based on the request data. This currently
includes only filter exclusion/negation.
"""
# build the compiled set of all filters
requested_filters = OrderedDict()
for filter_name, f in self.filters.items():
requested_filters[filter_name] = f
# exclusion params
exclude_name = '%s!' % filter_name
if related(self, exclude_name) in self.data:
# deepcopy the *base* filter to prevent copying of model & parent
f_copy = copy.deepcopy(self.base_filters[filter_name])
f_copy.parent = f.parent
f_copy.model = f.model
f_copy.exclude = not f.exclude
requested_filters[exclude_name] = f_copy
return requested_filters
def get_related_filtersets(self):
"""
Get the related filterset instances for all related filters.
"""
related_filtersets = OrderedDict()
for related_name in self.related_filters:
if related_name not in self.filters:
continue
f = self.filters[related_name]
related_filtersets[related_name] = f.filterset(
data=self.data,
queryset=f.get_queryset(self.request),
relationship=related(self, related_name),
request=self.request,
prefix=self.form_prefix,
)
return related_filtersets
def filter_queryset(self, queryset):
queryset = super(FilterSet, self).filter_queryset(queryset)
queryset = self.filter_related_filtersets(queryset)
return queryset
def filter_related_filtersets(self, queryset):
"""
Filter the provided `queryset` by the `related_filtersets`. It is
recommended that you override this method to change the filtering
behavior across relationships.
"""
for related_name, related_filterset in self.related_filtersets.items():
# Related filtersets should only be applied if they had data.
prefix = '%s%s' % (related(self, related_name), LOOKUP_SEP)
if not any(value.startswith(prefix) for value in self.data):
continue
field_name = self.filters[related_name].field_name
lookup_expr = LOOKUP_SEP.join([field_name, 'in'])
subquery = Subquery(related_filterset.qs.values('pk'))
queryset = queryset.filter(**{lookup_expr: subquery})
return queryset
def get_form_class(self):
class Form(super(FilterSet, | orig_meta, orig_declared = new_class._meta, new_class.declared_filters
# override opts/declared filters w/ copies | random_line_split |
file.pb.go | {
return x.Permission
}
return 0
}
func (x *UploadReq) GetSubsection() bool {
if x != nil {
return x.Subsection
}
return false
}
func (x *UploadReq) GetStart() bool {
if x != nil {
return x.Start
}
return false
}
func (x *UploadReq) GetEnd() bool {
if x | oadReq) GetBody() []byte {
if x != nil {
return x.Body
}
return nil
}
func (x *UploadReq) GetReplace() bool {
if x != nil {
return x.Replace
}
return false
}
type Response struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Success bool `protobuf:"varint,1,opt,name=Success,proto3" json:"Success,omitempty"`
Msg string `protobuf:"bytes,2,opt,name=Msg,proto3" json:"Msg,omitempty"`
}
func (x *Response) Reset() {
*x = Response{}
if protoimpl.UnsafeEnabled {
mi := &file_protobuf_file_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Response) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Response) ProtoMessage() {}
func (x *Response) ProtoReflect() protoreflect.Message {
mi := &file_protobuf_file_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Response.ProtoReflect.Descriptor instead.
func (*Response) Descriptor() ([]byte, []int) {
return file_protobuf_file_proto_rawDescGZIP(), []int{1}
}
func (x *Response) GetSuccess() bool {
if x != nil {
return x.Success
}
return false
}
func (x *Response) GetMsg() string {
if x != nil {
return x.Msg
}
return ""
}
var File_protobuf_file_proto protoreflect.FileDescriptor
var file_protobuf_file_proto_rawDesc = []byte{
0x0a, 0x13, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x6c, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x66, 0x69, 0x6c, 0x65, 0x22, 0xd5, 0x01, 0x0a, 0x09,
0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x71, 0x12, 0x1e, 0x0a, 0x0a, 0x46, 0x69, 0x6c,
0x65, 0x4d, 0x64, 0x35, 0x73, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x46,
0x69, 0x6c, 0x65, 0x4d, 0x64, 0x35, 0x73, 0x75, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d,
0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a,
0x0a, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28,
0x0d, 0x52, 0x0a, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a,
0x0a, 0x53, 0x75, 0x62, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28,
0x08, 0x52, 0x0a, 0x53, 0x75, 0x62, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a,
0x05, 0x53, 0x74, 0x61, 0x72, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x53, 0x74,
0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x45, 0x6e, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08,
0x52, 0x03, 0x45, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x09, 0x20,
0x01, 0x28, 0x0c, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x70,
0x6c, 0x61, 0x63, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x72, 0x65, 0x70, 0x6c,
0x61, 0x63, 0x65, 0x22, 0x36, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, | != nil {
return x.End
}
return false
}
func (x *Upl | identifier_body |
file.pb.go | nil {
return x.Permission
}
return 0
}
func (x *UploadReq) GetSubsection() bool {
if x != nil {
return x.Subsection
}
return false
}
func (x *UploadReq) GetStart() bool {
if x != nil {
return x.Start
}
return false
}
func (x *UploadReq) GetEnd() bool {
if x != nil {
return x.End
}
return false
}
func (x *UploadReq) GetBody() []byte {
if x != nil {
return x.Body
}
return nil
}
func (x *UploadReq) GetReplace() bool {
if x != nil {
return x.Replace
}
return false
}
type Response struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Success bool `protobuf:"varint,1,opt,name=Success,proto3" json:"Success,omitempty"`
Msg string `protobuf:"bytes,2,opt,name=Msg,proto3" json:"Msg,omitempty"`
}
func (x *Response) Reset() {
*x = Response{}
if protoimpl.UnsafeEnabled {
mi := &file_protobuf_file_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Response) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Response) ProtoMessage() {}
func (x *Response) ProtoReflect() protoreflect.Message {
mi := &file_protobuf_file_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Response.ProtoReflect.Descriptor instead.
func (*Response) Descriptor() ([]byte, []int) {
return file_protobuf_file_proto_rawDescGZIP(), []int{1}
}
func (x *Response) GetSuccess() bool {
if x != nil {
return x.Success
}
return false
}
func (x *Response) GetMsg() string {
if x != nil {
return x.Msg
}
return ""
}
var File_protobuf_file_proto protoreflect.FileDescriptor
var file_protobuf_file_proto_rawDesc = []byte{
0x0a, 0x13, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x | 6, 0x69, 0x6c, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x66, 0x69, 0x6c, 0x65, 0x22, 0xd5, 0x01, 0x0a, 0x09,
0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x71, 0x12, 0x1e, 0x0a, 0x0a, 0x46, 0x69, 0x6c,
0x65, 0x4d, 0x64, 0x35, 0x73, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x46,
0x69, 0x6c, 0x65, 0x4d, 0x64, 0x35, 0x73, 0x75, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d,
0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a,
0x0a, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28,
0x0d, 0x52, 0x0a, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a,
0x0a, 0x53, 0x75, 0x62, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28,
0x08, 0x52, 0x0a, 0x53, 0x75, 0x62, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a,
0x05, 0x53, 0x74, 0x61, 0x72, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x53, 0x74,
0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x45, 0x6e, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08,
0x52, 0x03, 0x45, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x09, 0x20,
0x01, 0x28, 0x0c, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x70,
0x6c, 0x61, 0x63, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x72, 0x65, 0x70, 0x6c,
0x61, 0x63, 0x65, 0x22, 0x36, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0 | 75, 0x66, 0x2f, 0x6 | conditional_block |
file.pb.go | [0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UploadReq.ProtoReflect.Descriptor instead.
func (*UploadReq) Descriptor() ([]byte, []int) {
return file_protobuf_file_proto_rawDescGZIP(), []int{0}
}
func (x *UploadReq) GetFileMd5Sum() string {
if x != nil {
return x.FileMd5Sum
}
return ""
}
func (x *UploadReq) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *UploadReq) GetPermission() uint32 {
if x != nil {
return x.Permission
}
return 0
}
func (x *UploadReq) GetSubsection() bool {
if x != nil {
return x.Subsection
}
return false
}
func (x *UploadReq) GetStart() bool {
if x != nil {
return x.Start
}
return false
}
func (x *UploadReq) GetEnd() bool {
if x != nil {
return x.End
}
return false
}
func (x *UploadReq) GetBody() []byte {
if x != nil {
return x.Body
}
return nil
}
func (x *UploadReq) GetReplace() bool {
if x != nil {
return x.Replace
}
return false
}
type Response struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Success bool `protobuf:"varint,1,opt,name=Success,proto3" json:"Success,omitempty"`
Msg string `protobuf:"bytes,2,opt,name=Msg,proto3" json:"Msg,omitempty"`
}
func (x *Response) Reset() {
*x = Response{}
if protoimpl.UnsafeEnabled {
mi := &file_protobuf_file_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Response) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Response) ProtoMessage() {}
func (x *Response) ProtoReflect() protoreflect.Message {
mi := &file_protobuf_file_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Response.ProtoReflect.Descriptor instead.
func (*Response) Descriptor() ([]byte, []int) {
return file_protobuf_file_proto_rawDescGZIP(), []int{1}
}
func (x *Response) GetSuccess() bool {
if x != nil {
return x.Success
}
return false
}
func (x *Response) GetMsg() string {
if x != nil {
return x.Msg
}
return ""
}
var File_protobuf_file_proto protoreflect.FileDescriptor
var file_protobuf_file_proto_rawDesc = []byte{
0x0a, 0x13, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x6c, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x66, 0x69, 0x6c, 0x65, 0x22, 0xd5, 0x01, 0x0a, 0x09,
0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x71, 0x12, 0x1e, 0x0a, 0x0a, 0x46, 0x69, 0x6c,
0x65, 0x4d, 0x64, 0x35, 0x73, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x46,
0x69, 0x6c, 0x65, 0x4d, 0x64, 0x35, 0x73, 0x75, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d,
0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a,
0x0a, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28,
0x0d, 0x52, 0x0a, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a,
0x0a, 0x53, 0x75, 0x62, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28,
0x08, 0x52, 0x0a, 0x53, 0x75, 0x62, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a,
0x05, 0x53, 0x74, 0x61, 0x72, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x53, 0x74,
0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x45, 0x6e, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08,
0x52, 0x03, 0x45, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x09, 0x20,
0x01, 0x28, 0x0c, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x70 | gTypes | identifier_name |
|
file.pb.go | 0x10, 0x0a, 0x03, 0x45, 0x6e, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08,
0x52, 0x03, 0x45, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x09, 0x20,
0x01, 0x28, 0x0c, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x70,
0x6c, 0x61, 0x63, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x72, 0x65, 0x70, 0x6c,
0x61, 0x63, 0x65, 0x22, 0x36, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
0x18, 0x0a, 0x07, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
0x52, 0x07, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x4d, 0x73, 0x67,
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x4d, 0x73, 0x67, 0x32, 0x37, 0x0a, 0x0a, 0x44,
0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x12, 0x29, 0x0a, 0x06, 0x55, 0x70, 0x6c,
0x6f, 0x61, 0x64, 0x12, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61,
0x64, 0x52, 0x65, 0x71, 0x1a, 0x0e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x42, 0x0d, 0x5a, 0x0b, 0x2e, 0x2e, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_protobuf_file_proto_rawDescOnce sync.Once
file_protobuf_file_proto_rawDescData = file_protobuf_file_proto_rawDesc
)
func file_protobuf_file_proto_rawDescGZIP() []byte {
file_protobuf_file_proto_rawDescOnce.Do(func() {
file_protobuf_file_proto_rawDescData = protoimpl.X.CompressGZIP(file_protobuf_file_proto_rawDescData)
})
return file_protobuf_file_proto_rawDescData
}
var file_protobuf_file_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_protobuf_file_proto_goTypes = []interface{}{
(*UploadReq)(nil), // 0: file.UploadReq
(*Response)(nil), // 1: file.Response
}
var file_protobuf_file_proto_depIdxs = []int32{
0, // 0: file.Distribute.Upload:input_type -> file.UploadReq
1, // 1: file.Distribute.Upload:output_type -> file.Response
1, // [1:2] is the sub-list for method output_type
0, // [0:1] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_protobuf_file_proto_init() }
func file_protobuf_file_proto_init() {
if File_protobuf_file_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_protobuf_file_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*UploadReq); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_protobuf_file_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Response); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_protobuf_file_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_protobuf_file_proto_goTypes,
DependencyIndexes: file_protobuf_file_proto_depIdxs,
MessageInfos: file_protobuf_file_proto_msgTypes,
}.Build()
File_protobuf_file_proto = out.File
file_protobuf_file_proto_rawDesc = nil
file_protobuf_file_proto_goTypes = nil
file_protobuf_file_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// DistributeClient is the client API for Distribute service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type DistributeClient interface {
Upload(ctx context.Context, in *UploadReq, opts ...grpc.CallOption) (*Response, error)
}
type distributeClient struct {
cc grpc.ClientConnInterface
} |
func NewDistributeClient(cc grpc.ClientConnInterface) DistributeClient {
return &distributeClient{cc} | random_line_split |
|
psd_channel.rs | _height() * 4) as usize;
let red = self.red();
let green = self.green();
let blue = self.blue();
let alpha = self.alpha();
// TODO: We're assuming that if we only see two channels it is a 16 bit grayscale
// PSD. Instead we should just check the Psd's color mode and depth to see if
// they are grayscale and sixteen. As we run into more cases we'll clean things like
// this up over time.
// if green.is_some() && blue.is_none() && alpha.is_none() {
// return self.generate_16_bit_grayscale_rgba();
// }
let mut rgba = vec![0; rgba_len];
use crate::psd_channel::PsdChannelKind::*;
self.insert_channel_bytes(&mut rgba, Red, red);
// If there is a green channel we use it, otherwise we use the red channel since this is
// a single channel grey image (such as a heightmap).
if let Some(green) = green {
self.insert_channel_bytes(&mut rgba, Green, green);
} else {
self.insert_channel_bytes(&mut rgba, Green, red);
}
// If there is a blue channel we use it, otherwise we use the red channel since this is
// a single channel grey image (such as a heightmap).
if let Some(blue) = blue {
self.insert_channel_bytes(&mut rgba, Blue, blue);
} else {
self.insert_channel_bytes(&mut rgba, Blue, red);
}
if let Some(alpha_channel) = alpha {
self.insert_channel_bytes(&mut rgba, TransparencyMask, alpha_channel);
} else {
// If there is no transparency data then the image is opaque
for idx in 0..rgba_len / 4 {
rgba[idx * 4 + 3] = 255;
}
}
rgba
}
/// Generate an RGBA Vec<u8> from a composite image or layer that uses 16 bits per
/// pixel. We do this by mapping the 16 bits back down to 8 bits.
///
/// The 16 bits are stored across the red and green channels (first and second).
fn generate_16_bit_grayscale_rgba(&self) -> Vec<u8> {
match self.red() {
ChannelBytes::RawData(red) => match self.green().unwrap() {
ChannelBytes::RawData(green) => sixteen_to_eight_rgba(red, green),
ChannelBytes::RleCompressed(green) => {
let green = &rle_decompress(green);
sixteen_to_eight_rgba(red, green)
}
},
ChannelBytes::RleCompressed(red) => {
let red = &rle_decompress(red);
match self.green().unwrap() {
ChannelBytes::RawData(green) => sixteen_to_eight_rgba(red, green),
ChannelBytes::RleCompressed(green) => {
let green = &rle_decompress(green);
sixteen_to_eight_rgba(red, green)
}
}
}
}
}
/// Given some vector of bytes, insert the bytes from the given channel into the vector.
///
/// Doing it this way allows us to allocate for one vector and insert all 4 (RGBA) channels into
/// it.
fn insert_channel_bytes(
&self,
rgba: &mut Vec<u8>,
channel_kind: PsdChannelKind,
channel_bytes: &ChannelBytes,
) {
match channel_bytes {
ChannelBytes::RawData(channel_bytes) => {
let offset = channel_kind.rgba_offset().unwrap();
for (idx, byte) in channel_bytes.iter().enumerate() {
let rgba_idx = self.rgba_idx(idx);
rgba[rgba_idx * 4 + offset] = *byte;
}
}
// https://en.wikipedia.org/wiki/PackBits
ChannelBytes::RleCompressed(channel_bytes) => {
self.insert_rle_channel(rgba, channel_kind, &channel_bytes);
}
}
}
/// rle decompress a channel (R,G,B or A) and insert it into a vector of RGBA pixels.
///
/// We use the channels offset to know where to put it.. So red would go in 0, 4, 8..
/// blue would go in 1, 5, 9.. etc
///
/// https://en.wikipedia.org/wiki/PackBits - algorithm used for decompression
fn | (
&self,
rgba: &mut Vec<u8>,
channel_kind: PsdChannelKind,
channel_bytes: &[u8],
) {
let mut cursor = PsdCursor::new(&channel_bytes[..]);
let mut idx = 0;
let offset = channel_kind.rgba_offset().unwrap();
let len = cursor.get_ref().len() as u64;
while cursor.position() < len {
let header = cursor.read_i8() as i16;
if header == -128 {
continue;
} else if header >= 0 {
let bytes_to_read = 1 + header;
if cursor.position() + bytes_to_read as u64 > len {
break;
}
for byte in cursor.read(bytes_to_read as u32) {
let rgba_idx = self.rgba_idx(idx);
rgba[rgba_idx * 4 + offset] = *byte;
idx += 1;
}
} else {
let repeat = 1 - header;
if cursor.position() + 1 > len {
break;
}
let byte = cursor.read_1()[0];
for _ in 0..repeat as usize {
let rgba_idx = self.rgba_idx(idx);
rgba[rgba_idx * 4 + offset] = byte;
idx += 1;
}
};
}
}
}
/// Rle decompress a channel
fn rle_decompress(bytes: &[u8]) -> Vec<u8> {
let mut cursor = PsdCursor::new(&bytes[..]);
let mut decompressed = vec![];
while cursor.position() != cursor.get_ref().len() as u64 {
let header = cursor.read_i8() as i16;
if header == -128 {
continue;
} else if header >= 0 {
let bytes_to_read = 1 + header;
for byte in cursor.read(bytes_to_read as u32) {
decompressed.push(*byte);
}
} else {
let repeat = 1 - header;
let byte = cursor.read_1()[0];
for _ in 0..repeat as usize {
decompressed.push(byte);
}
};
}
decompressed
}
/// Take two 8 bit channels that together represent a 16 bit channel and convert them down
/// into an 8 bit channel.
///
/// We store the final bytes in the first channel (overwriting the old bytes)
fn sixteen_to_eight_rgba(channel1: &[u8], channel2: &[u8]) -> Vec<u8> {
let mut eight = Vec::with_capacity(channel1.len());
for idx in 0..channel1.len() {
if idx % 2 == 1 {
continue;
}
let sixteen_bit = [channel1[idx], channel1[idx + 1]];
let sixteen_bit = u16::from_be_bytes(sixteen_bit);
let eight_bit = (sixteen_bit / 256) as u8;
eight.push(eight_bit);
eight.push(eight_bit);
eight.push(eight_bit);
eight.push(255);
}
for idx in 0..channel2.len() {
if idx % 2 == 1 {
continue;
}
let sixteen_bit = [channel2[idx], channel2[idx + 1]];
let sixteen_bit = u16::from_be_bytes(sixteen_bit);
let eight_bit = (sixteen_bit / 256) as u8;
eight.push(eight_bit);
eight.push(eight_bit);
eight.push(eight_bit);
eight.push(255);
}
eight
}
/// Indicates how a channe'sl data is compressed
#[derive(Debug, Eq, PartialEq)]
#[allow(missing_docs)]
pub enum PsdChannelCompression {
/// Not compressed
RawData = 0,
/// Compressed using [PackBits RLE compression](https://en.wikipedia.org/wiki/PackBits)
RleCompressed = 1,
/// Currently unsupported
ZipWithoutPrediction = 2,
/// Currently unsupported
ZipWithPrediction = 3,
}
impl PsdChannelCompression {
/// Create a new PsdLayerChannelCompression
pub fn new(compression: u16) -> Option<PsdChannelCompression> {
match compression {
0 => Some(PsdChannelCompression::RawData),
1 => Some(PsdChannelCompression::RleCompressed),
2 => Some(PsdChannelCompression::ZipWithoutPrediction),
3 => Some(PsdChannelCompression::ZipWithPrediction),
_ => None,
}
}
}
/// The different kinds of channels in a layer (red, green, blue, ...).
#[derive(Debug, Hash, Eq, PartialEq, Ord, PartialOrd, Copy, Clone)]
#[allow(missing_docs)]
pub enum PsdChannelKind {
Red = | insert_rle_channel | identifier_name |
psd_channel.rs | _height() * 4) as usize;
let red = self.red();
let green = self.green();
let blue = self.blue();
let alpha = self.alpha();
// TODO: We're assuming that if we only see two channels it is a 16 bit grayscale
// PSD. Instead we should just check the Psd's color mode and depth to see if
// they are grayscale and sixteen. As we run into more cases we'll clean things like
// this up over time.
// if green.is_some() && blue.is_none() && alpha.is_none() {
// return self.generate_16_bit_grayscale_rgba();
// }
let mut rgba = vec![0; rgba_len];
use crate::psd_channel::PsdChannelKind::*;
self.insert_channel_bytes(&mut rgba, Red, red);
// If there is a green channel we use it, otherwise we use the red channel since this is
// a single channel grey image (such as a heightmap).
if let Some(green) = green {
self.insert_channel_bytes(&mut rgba, Green, green);
} else {
self.insert_channel_bytes(&mut rgba, Green, red);
}
// If there is a blue channel we use it, otherwise we use the red channel since this is
// a single channel grey image (such as a heightmap).
if let Some(blue) = blue {
self.insert_channel_bytes(&mut rgba, Blue, blue);
} else {
self.insert_channel_bytes(&mut rgba, Blue, red);
}
if let Some(alpha_channel) = alpha {
self.insert_channel_bytes(&mut rgba, TransparencyMask, alpha_channel);
} else {
// If there is no transparency data then the image is opaque
for idx in 0..rgba_len / 4 {
rgba[idx * 4 + 3] = 255;
}
}
rgba
}
/// Generate an RGBA Vec<u8> from a composite image or layer that uses 16 bits per
/// pixel. We do this by mapping the 16 bits back down to 8 bits.
///
/// The 16 bits are stored across the red and green channels (first and second).
fn generate_16_bit_grayscale_rgba(&self) -> Vec<u8> {
match self.red() { | sixteen_to_eight_rgba(red, green)
}
},
ChannelBytes::RleCompressed(red) => {
let red = &rle_decompress(red);
match self.green().unwrap() {
ChannelBytes::RawData(green) => sixteen_to_eight_rgba(red, green),
ChannelBytes::RleCompressed(green) => {
let green = &rle_decompress(green);
sixteen_to_eight_rgba(red, green)
}
}
}
}
}
/// Given some vector of bytes, insert the bytes from the given channel into the vector.
///
/// Doing it this way allows us to allocate for one vector and insert all 4 (RGBA) channels into
/// it.
fn insert_channel_bytes(
&self,
rgba: &mut Vec<u8>,
channel_kind: PsdChannelKind,
channel_bytes: &ChannelBytes,
) {
match channel_bytes {
ChannelBytes::RawData(channel_bytes) => {
let offset = channel_kind.rgba_offset().unwrap();
for (idx, byte) in channel_bytes.iter().enumerate() {
let rgba_idx = self.rgba_idx(idx);
rgba[rgba_idx * 4 + offset] = *byte;
}
}
// https://en.wikipedia.org/wiki/PackBits
ChannelBytes::RleCompressed(channel_bytes) => {
self.insert_rle_channel(rgba, channel_kind, &channel_bytes);
}
}
}
/// rle decompress a channel (R,G,B or A) and insert it into a vector of RGBA pixels.
///
/// We use the channels offset to know where to put it.. So red would go in 0, 4, 8..
/// blue would go in 1, 5, 9.. etc
///
/// https://en.wikipedia.org/wiki/PackBits - algorithm used for decompression
fn insert_rle_channel(
&self,
rgba: &mut Vec<u8>,
channel_kind: PsdChannelKind,
channel_bytes: &[u8],
) {
let mut cursor = PsdCursor::new(&channel_bytes[..]);
let mut idx = 0;
let offset = channel_kind.rgba_offset().unwrap();
let len = cursor.get_ref().len() as u64;
while cursor.position() < len {
let header = cursor.read_i8() as i16;
if header == -128 {
continue;
} else if header >= 0 {
let bytes_to_read = 1 + header;
if cursor.position() + bytes_to_read as u64 > len {
break;
}
for byte in cursor.read(bytes_to_read as u32) {
let rgba_idx = self.rgba_idx(idx);
rgba[rgba_idx * 4 + offset] = *byte;
idx += 1;
}
} else {
let repeat = 1 - header;
if cursor.position() + 1 > len {
break;
}
let byte = cursor.read_1()[0];
for _ in 0..repeat as usize {
let rgba_idx = self.rgba_idx(idx);
rgba[rgba_idx * 4 + offset] = byte;
idx += 1;
}
};
}
}
}
/// Rle decompress a channel
fn rle_decompress(bytes: &[u8]) -> Vec<u8> {
let mut cursor = PsdCursor::new(&bytes[..]);
let mut decompressed = vec![];
while cursor.position() != cursor.get_ref().len() as u64 {
let header = cursor.read_i8() as i16;
if header == -128 {
continue;
} else if header >= 0 {
let bytes_to_read = 1 + header;
for byte in cursor.read(bytes_to_read as u32) {
decompressed.push(*byte);
}
} else {
let repeat = 1 - header;
let byte = cursor.read_1()[0];
for _ in 0..repeat as usize {
decompressed.push(byte);
}
};
}
decompressed
}
/// Take two 8 bit channels that together represent a 16 bit channel and convert them down
/// into an 8 bit channel.
///
/// We store the final bytes in the first channel (overwriting the old bytes)
fn sixteen_to_eight_rgba(channel1: &[u8], channel2: &[u8]) -> Vec<u8> {
let mut eight = Vec::with_capacity(channel1.len());
for idx in 0..channel1.len() {
if idx % 2 == 1 {
continue;
}
let sixteen_bit = [channel1[idx], channel1[idx + 1]];
let sixteen_bit = u16::from_be_bytes(sixteen_bit);
let eight_bit = (sixteen_bit / 256) as u8;
eight.push(eight_bit);
eight.push(eight_bit);
eight.push(eight_bit);
eight.push(255);
}
for idx in 0..channel2.len() {
if idx % 2 == 1 {
continue;
}
let sixteen_bit = [channel2[idx], channel2[idx + 1]];
let sixteen_bit = u16::from_be_bytes(sixteen_bit);
let eight_bit = (sixteen_bit / 256) as u8;
eight.push(eight_bit);
eight.push(eight_bit);
eight.push(eight_bit);
eight.push(255);
}
eight
}
/// Indicates how a channe'sl data is compressed
#[derive(Debug, Eq, PartialEq)]
#[allow(missing_docs)]
pub enum PsdChannelCompression {
/// Not compressed
RawData = 0,
/// Compressed using [PackBits RLE compression](https://en.wikipedia.org/wiki/PackBits)
RleCompressed = 1,
/// Currently unsupported
ZipWithoutPrediction = 2,
/// Currently unsupported
ZipWithPrediction = 3,
}
impl PsdChannelCompression {
/// Create a new PsdLayerChannelCompression
pub fn new(compression: u16) -> Option<PsdChannelCompression> {
match compression {
0 => Some(PsdChannelCompression::RawData),
1 => Some(PsdChannelCompression::RleCompressed),
2 => Some(PsdChannelCompression::ZipWithoutPrediction),
3 => Some(PsdChannelCompression::ZipWithPrediction),
_ => None,
}
}
}
/// The different kinds of channels in a layer (red, green, blue, ...).
#[derive(Debug, Hash, Eq, PartialEq, Ord, PartialOrd, Copy, Clone)]
#[allow(missing_docs)]
pub enum PsdChannelKind {
Red = 0 | ChannelBytes::RawData(red) => match self.green().unwrap() {
ChannelBytes::RawData(green) => sixteen_to_eight_rgba(red, green),
ChannelBytes::RleCompressed(green) => {
let green = &rle_decompress(green);
| random_line_split |
psd_channel.rs | () * 4) as usize;
let red = self.red();
let green = self.green();
let blue = self.blue();
let alpha = self.alpha();
// TODO: We're assuming that if we only see two channels it is a 16 bit grayscale
// PSD. Instead we should just check the Psd's color mode and depth to see if
// they are grayscale and sixteen. As we run into more cases we'll clean things like
// this up over time.
// if green.is_some() && blue.is_none() && alpha.is_none() {
// return self.generate_16_bit_grayscale_rgba();
// }
let mut rgba = vec![0; rgba_len];
use crate::psd_channel::PsdChannelKind::*;
self.insert_channel_bytes(&mut rgba, Red, red);
// If there is a green channel we use it, otherwise we use the red channel since this is
// a single channel grey image (such as a heightmap).
if let Some(green) = green {
self.insert_channel_bytes(&mut rgba, Green, green);
} else {
self.insert_channel_bytes(&mut rgba, Green, red);
}
// If there is a blue channel we use it, otherwise we use the red channel since this is
// a single channel grey image (such as a heightmap).
if let Some(blue) = blue {
self.insert_channel_bytes(&mut rgba, Blue, blue);
} else {
self.insert_channel_bytes(&mut rgba, Blue, red);
}
if let Some(alpha_channel) = alpha {
self.insert_channel_bytes(&mut rgba, TransparencyMask, alpha_channel);
} else {
// If there is no transparency data then the image is opaque
for idx in 0..rgba_len / 4 {
rgba[idx * 4 + 3] = 255;
}
}
rgba
}
/// Generate an RGBA Vec<u8> from a composite image or layer that uses 16 bits per
/// pixel. We do this by mapping the 16 bits back down to 8 bits.
///
/// The 16 bits are stored across the red and green channels (first and second).
fn generate_16_bit_grayscale_rgba(&self) -> Vec<u8> {
match self.red() {
ChannelBytes::RawData(red) => match self.green().unwrap() {
ChannelBytes::RawData(green) => sixteen_to_eight_rgba(red, green),
ChannelBytes::RleCompressed(green) => {
let green = &rle_decompress(green);
sixteen_to_eight_rgba(red, green)
}
},
ChannelBytes::RleCompressed(red) => {
let red = &rle_decompress(red);
match self.green().unwrap() {
ChannelBytes::RawData(green) => sixteen_to_eight_rgba(red, green),
ChannelBytes::RleCompressed(green) => {
let green = &rle_decompress(green);
sixteen_to_eight_rgba(red, green)
}
}
}
}
}
/// Given some vector of bytes, insert the bytes from the given channel into the vector.
///
/// Doing it this way allows us to allocate for one vector and insert all 4 (RGBA) channels into
/// it.
fn insert_channel_bytes(
&self,
rgba: &mut Vec<u8>,
channel_kind: PsdChannelKind,
channel_bytes: &ChannelBytes,
) {
match channel_bytes {
ChannelBytes::RawData(channel_bytes) => {
let offset = channel_kind.rgba_offset().unwrap();
for (idx, byte) in channel_bytes.iter().enumerate() {
let rgba_idx = self.rgba_idx(idx);
rgba[rgba_idx * 4 + offset] = *byte;
}
}
// https://en.wikipedia.org/wiki/PackBits
ChannelBytes::RleCompressed(channel_bytes) => {
self.insert_rle_channel(rgba, channel_kind, &channel_bytes);
}
}
}
/// rle decompress a channel (R,G,B or A) and insert it into a vector of RGBA pixels.
///
/// We use the channels offset to know where to put it.. So red would go in 0, 4, 8..
/// blue would go in 1, 5, 9.. etc
///
/// https://en.wikipedia.org/wiki/PackBits - algorithm used for decompression
fn insert_rle_channel(
&self,
rgba: &mut Vec<u8>,
channel_kind: PsdChannelKind,
channel_bytes: &[u8],
) {
let mut cursor = PsdCursor::new(&channel_bytes[..]);
let mut idx = 0;
let offset = channel_kind.rgba_offset().unwrap();
let len = cursor.get_ref().len() as u64;
while cursor.position() < len {
let header = cursor.read_i8() as i16;
if header == -128 {
continue;
} else if header >= 0 {
let bytes_to_read = 1 + header;
if cursor.position() + bytes_to_read as u64 > len {
break;
}
for byte in cursor.read(bytes_to_read as u32) {
let rgba_idx = self.rgba_idx(idx);
rgba[rgba_idx * 4 + offset] = *byte;
idx += 1;
}
} else {
let repeat = 1 - header;
if cursor.position() + 1 > len |
let byte = cursor.read_1()[0];
for _ in 0..repeat as usize {
let rgba_idx = self.rgba_idx(idx);
rgba[rgba_idx * 4 + offset] = byte;
idx += 1;
}
};
}
}
}
/// Rle decompress a channel
fn rle_decompress(bytes: &[u8]) -> Vec<u8> {
let mut cursor = PsdCursor::new(&bytes[..]);
let mut decompressed = vec![];
while cursor.position() != cursor.get_ref().len() as u64 {
let header = cursor.read_i8() as i16;
if header == -128 {
continue;
} else if header >= 0 {
let bytes_to_read = 1 + header;
for byte in cursor.read(bytes_to_read as u32) {
decompressed.push(*byte);
}
} else {
let repeat = 1 - header;
let byte = cursor.read_1()[0];
for _ in 0..repeat as usize {
decompressed.push(byte);
}
};
}
decompressed
}
/// Take two 8 bit channels that together represent a 16 bit channel and convert them down
/// into an 8 bit channel.
///
/// We store the final bytes in the first channel (overwriting the old bytes)
fn sixteen_to_eight_rgba(channel1: &[u8], channel2: &[u8]) -> Vec<u8> {
let mut eight = Vec::with_capacity(channel1.len());
for idx in 0..channel1.len() {
if idx % 2 == 1 {
continue;
}
let sixteen_bit = [channel1[idx], channel1[idx + 1]];
let sixteen_bit = u16::from_be_bytes(sixteen_bit);
let eight_bit = (sixteen_bit / 256) as u8;
eight.push(eight_bit);
eight.push(eight_bit);
eight.push(eight_bit);
eight.push(255);
}
for idx in 0..channel2.len() {
if idx % 2 == 1 {
continue;
}
let sixteen_bit = [channel2[idx], channel2[idx + 1]];
let sixteen_bit = u16::from_be_bytes(sixteen_bit);
let eight_bit = (sixteen_bit / 256) as u8;
eight.push(eight_bit);
eight.push(eight_bit);
eight.push(eight_bit);
eight.push(255);
}
eight
}
/// Indicates how a channe'sl data is compressed
#[derive(Debug, Eq, PartialEq)]
#[allow(missing_docs)]
pub enum PsdChannelCompression {
/// Not compressed
RawData = 0,
/// Compressed using [PackBits RLE compression](https://en.wikipedia.org/wiki/PackBits)
RleCompressed = 1,
/// Currently unsupported
ZipWithoutPrediction = 2,
/// Currently unsupported
ZipWithPrediction = 3,
}
impl PsdChannelCompression {
/// Create a new PsdLayerChannelCompression
pub fn new(compression: u16) -> Option<PsdChannelCompression> {
match compression {
0 => Some(PsdChannelCompression::RawData),
1 => Some(PsdChannelCompression::RleCompressed),
2 => Some(PsdChannelCompression::ZipWithoutPrediction),
3 => Some(PsdChannelCompression::ZipWithPrediction),
_ => None,
}
}
}
/// The different kinds of channels in a layer (red, green, blue, ...).
#[derive(Debug, Hash, Eq, PartialEq, Ord, PartialOrd, Copy, Clone)]
#[allow(missing_docs)]
pub enum PsdChannelKind {
Red = | {
break;
} | conditional_block |
train_nn.py | , data_dir, model_dir, task_id, isInteractive=True, OOV=False,
memory_size=50, random_state=None, batch_size=32, learning_rate=0.001, epsilon=1e-8,
max_grad_norm=40.0, evaluation_interval=10, hops=3, epochs=200, embedding_size=20, save_model=10,
checkpoint_path='./models', optim='adam', momentum=0.9, decay=0, gamma=0.1, step=30):
self.data_dir = data_dir
self.task_id = task_id
self.model_dir = model_dir
self.isInteractive = isInteractive
self.OOV = OOV
self.memory_size = memory_size
self.random_state = random_state
self.batch_size = batch_size
self.learning_rate = learning_rate
self.epsilon = epsilon
self.max_grad_norm = max_grad_norm
self.evaluation_interval = evaluation_interval
self.hops = hops
self.epochs = epochs
self.embedding_size = embedding_size
self.save_model = save_model
self.checkpoint_path = checkpoint_path
self.optim = optim
self.momentum = momentum
self.decay = decay
self.gamma = gamma
self.step = step
self.train_dataset = CDATA(data_dir=self.data_dir, task_id=self.task_id, memory_size=self.memory_size,
train=0, batch_size=self.batch_size, nn=False) # 0->train, 1->validate, 2->test
self.model = MemN2NDialog(batch_size=self.batch_size, vocab_size=self.train_dataset.getParam('vocab_size'),
candidate_size=self.train_dataset.getParam('candidate_sentence_size'), sentence_size=self.train_dataset.getParam('sentence_size'),
candidates_vec=self.train_dataset.getParam('candidates_vec'), embedding_size=self.embedding_size, hops=self.hops,
learning_rate=self.learning_rate, max_grad_norm=self.max_grad_norm, task_id=self.task_id)
if torch.cuda.is_available():
self.model = self.model.cuda()
def train(self):
trainS, trainQ, trainA = self.train_dataset.getData()
assert len(trainS) == len(trainQ) and len(trainQ) == len(trainA)
n_train = len(trainS)
batches = zip(range(0, n_train - self.batch_size, self.batch_size),
range(self.batch_size, n_train, self.batch_size))
batches = [(start, end) for start, end in batches]
if self.optim == 'sgd':
|
elif self.optim == 'rms':
optimizer = torch.optim.RMSprop(self.model.parameters(), lr=self.learning_rate, momentum=self.momentum)
print("RMSprop optimizer")
else:
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate)
print("Adam optimizer")
scheduler = None
if self.decay:
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=self.step, gamma=self.gamma)
print("Decay learning rate initialized")
for epoch in range(self.epochs):
print('epoch :', epoch)
if self.decay:
scheduler.step()
np.random.shuffle(batches)
running_loss = 0.0
for start, end in batches:
s = trainS[start:end]
q = trainQ[start:end]
a = trainA[start:end]
optimizer.zero_grad()
running_loss += self.model.batch_train(s, q, a)
optimizer.step()
print('loss = ',running_loss / n_train)
#-----------------------Save model after every nth epoch-----------------------------------
if epoch % self.save_model == 0:
print("Saving models")
if not os.path.exists(self.checkpoint_path):
os.makedirs(self.checkpoint_path)
model_name = os.path.join(self.checkpoint_path, str(self.task_id) + '.pkl')
torch.save(self.model.state_dict(), model_name)
self.test(0)
self.test(1)
self.test(2)
#------------------------------------------------------------------------------------------
def test(self,data_type):
# 0->train, 1->validate, 2->test
print("----------------------------------------------------------------------")
print("STARTED TESTING: ", data_type)
dataset = CDATA(data_dir=self.data_dir, task_id=self.task_id, memory_size=self.memory_size,
train=data_type, batch_size=self.batch_size) # 0->train, 1->validate, 2->test
testS, testQ, testA = dataset.getData()
assert len(testS) == len(testQ) and len(testQ) == len(testA)
n_test = len(testS)
fname = os.path.join(self.checkpoint_path, str(self.task_id) + '.pkl')
self.model.load_state_dict(torch.load(fname))
acc, loss = self.model.test(testS, testQ, testA)
print('Accuracy = ', acc)
print("----------------------------------------------------------------------")
def build_vocab(self, data, candidates):
vocab = reduce(lambda x, y: x | y, (set(
list(chain.from_iterable(s)) + q) for s, q, a in data))
vocab |= reduce(lambda x, y: x | y, (set(candidate)
for candidate in candidates))
vocab = sorted(vocab)
self.word_idx = dict((c, i + 1) for i, c in enumerate(vocab))
max_story_size = max(map(len, (s for s, _, _ in data)))
mean_story_size = int(np.mean([len(s) for s, _, _ in data]))
self.sentence_size = max(
map(len, chain.from_iterable(s for s, _, _ in data)))
self.candidate_sentence_size = max(map(len, candidates))
query_size = max(map(len, (q for _, q, _ in data)))
self.memory_size = min(self.memory_size, max_story_size)
self.vocab_size = len(self.word_idx) + 1 # +1 for nil word
self.sentence_size = max(
query_size, self.sentence_size) # for the position
# params
print("vocab size:", self.vocab_size)
print("Longest sentence length", self.sentence_size)
print("Longest candidate sentence length",
self.candidate_sentence_size)
print("Longest story length", max_story_size)
print("Average story length", mean_story_size)
def main(params):
model_dir = "task" + str(params['task_id']) + "_" + params['model_dir']
if not os.path.exists(model_dir):
os.makedirs(model_dir)
chatbot = chatBot(data_dir=params['data_dir'], model_dir=model_dir, task_id=params['task_id'], isInteractive=params['interactive'], OOV=params['OOV'], memory_size=params['memory_size'], random_state=params['random_state'], batch_size=params['batch_size'],
learning_rate=params['learning_rate'], epsilon=params['epsilon'], max_grad_norm=params['max_grad_norm'], evaluation_interval=params['evaluation_interval'], hops=params['hops'], epochs=params['epochs'], embedding_size=params['embedding_size'],
save_model=params['save_model'], checkpoint_path=params['checkpoint_path'], optim=params['optim'], momentum=params['momentum'],
decay=params['decay'], gamma=params['gamma'], step=params['step'])
if params['train']:
chatbot.train()
else:
chatbot.test(0)
chatbot.test(1)
chatbot.test(2)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--learning_rate', default=0.001, type=float,
help='Learning rate for Optimizer')
parser.add_argument('--epsilon', default=1e-8, type=float,
help='Epsilon value for Adam Optimizer')
parser.add_argument('--max_grad_norm', default=40.0, type=float,
help='Clip gradients to this norm')
parser.add_argument('--evaluation_interval', default=10, type=int,
help='Evaluate and print results every x epochs')
parser.add_argument('--batch_size', default=32, type=int, help='Batch size for training')
parser.add_argument('--hops', default=3, type=int, help='Number of hops in the Memory Network')
parser.add_argument('--epochs', default=200, type=int, help='Number of epochs to train for')
parser.add_argument('--embedding_size', default=20, type=int,
help='Embedding size for embedding matrices')
parser.add_argument('--memory_size', default=50, type=int, help='Maximum size of memory')
parser.add_argument('--task_id', default=6, type=int, help='bAbI task id, 1 <= id <= 6')
parser.add_argument('--random_state', default=None, help='Random state')
parser.add_argument('--data_dir', default='data/dialog-bAbI-tasks/',
help='Directory containing bAbI tasks')
parser.add_argument('--model_dir', default='model/',
help='Directory containing memn2n model checkpoints')
parser.add_argument('--train', default=1, type=int, help='Train if True, test if False')
parser.add_argument('--interactive', default=0, type=int, help='if True, interactive')
parser.add_argument('--OOV', default=0, type=int, help='if True, use O | optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate, momentum=self.momentum)
print("SGD optimizer") | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.